hash
stringlengths 40
40
| date
stringdate 2022-04-19 15:26:27
2025-03-21 10:49:23
| author
stringclasses 86
values | commit_message
stringlengths 12
115
| is_merge
bool 1
class | git_diff
stringlengths 214
553k
⌀ | type
stringclasses 15
values | masked_commit_message
stringlengths 8
110
|
|---|---|---|---|---|---|---|---|
2df8143ad5ee2b52d5b568fbf789416d2d9703aa
|
2023-02-22 14:26:20
|
Lei, HUANG
|
feat: support table ttl (#1052)
| false
|
diff --git a/src/cmd/src/standalone.rs b/src/cmd/src/standalone.rs
index f3f1ea6ae54b..060c7e17399a 100644
--- a/src/cmd/src/standalone.rs
+++ b/src/cmd/src/standalone.rs
@@ -17,7 +17,9 @@ use std::sync::Arc;
use clap::Parser;
use common_base::Plugins;
use common_telemetry::info;
-use datanode::datanode::{Datanode, DatanodeOptions, ObjectStoreConfig, WalConfig};
+use datanode::datanode::{
+ CompactionConfig, Datanode, DatanodeOptions, ObjectStoreConfig, WalConfig,
+};
use datanode::instance::InstanceRef;
use frontend::frontend::{Frontend, FrontendOptions};
use frontend::grpc::GrpcOptions;
@@ -77,6 +79,7 @@ pub struct StandaloneOptions {
pub mode: Mode,
pub wal: WalConfig,
pub storage: ObjectStoreConfig,
+ pub compaction: CompactionConfig,
pub enable_memory_catalog: bool,
}
@@ -94,6 +97,7 @@ impl Default for StandaloneOptions {
mode: Mode::Standalone,
wal: WalConfig::default(),
storage: ObjectStoreConfig::default(),
+ compaction: CompactionConfig::default(),
enable_memory_catalog: false,
}
}
@@ -120,6 +124,7 @@ impl StandaloneOptions {
wal: self.wal,
storage: self.storage,
enable_memory_catalog: self.enable_memory_catalog,
+ compaction: self.compaction,
..Default::default()
}
}
diff --git a/src/common/time/src/error.rs b/src/common/time/src/error.rs
index b99af9680386..c6603ef80fa9 100644
--- a/src/common/time/src/error.rs
+++ b/src/common/time/src/error.rs
@@ -13,6 +13,7 @@
// limitations under the License.
use std::any::Any;
+use std::num::TryFromIntError;
use chrono::ParseError;
use common_error::ext::ErrorExt;
@@ -24,8 +25,18 @@ use snafu::{Backtrace, ErrorCompat, Snafu};
pub enum Error {
#[snafu(display("Failed to parse string to date, raw: {}, source: {}", raw, source))]
ParseDateStr { raw: String, source: ParseError },
+
#[snafu(display("Failed to parse a string into Timestamp, raw string: {}", raw))]
ParseTimestamp { raw: String, backtrace: Backtrace },
+
+ #[snafu(display("Current timestamp overflow, source: {}", source))]
+ TimestampOverflow {
+ source: TryFromIntError,
+ backtrace: Backtrace,
+ },
+
+ #[snafu(display("Timestamp arithmetic overflow, msg: {}", msg))]
+ ArithmeticOverflow { msg: String, backtrace: Backtrace },
}
impl ErrorExt for Error {
@@ -34,6 +45,8 @@ impl ErrorExt for Error {
Error::ParseDateStr { .. } | Error::ParseTimestamp { .. } => {
StatusCode::InvalidArguments
}
+ Error::TimestampOverflow { .. } => StatusCode::Internal,
+ Error::ArithmeticOverflow { .. } => StatusCode::InvalidArguments,
}
}
diff --git a/src/common/time/src/timestamp.rs b/src/common/time/src/timestamp.rs
index e34f35e6efec..6eaf1f0e3270 100644
--- a/src/common/time/src/timestamp.rs
+++ b/src/common/time/src/timestamp.rs
@@ -17,12 +17,15 @@ use std::cmp::Ordering;
use std::fmt::{Display, Formatter};
use std::hash::{Hash, Hasher};
use std::str::FromStr;
+use std::time::Duration;
use chrono::offset::Local;
use chrono::{DateTime, LocalResult, NaiveDateTime, TimeZone, Utc};
use serde::{Deserialize, Serialize};
+use snafu::{OptionExt, ResultExt};
-use crate::error::{Error, ParseTimestampSnafu};
+use crate::error;
+use crate::error::{ArithmeticOverflowSnafu, Error, ParseTimestampSnafu, TimestampOverflowSnafu};
#[derive(Debug, Clone, Default, Copy, Serialize, Deserialize)]
pub struct Timestamp {
@@ -31,6 +34,50 @@ pub struct Timestamp {
}
impl Timestamp {
+ /// Creates current timestamp in millisecond.
+ pub fn current_millis() -> Self {
+ Self {
+ value: crate::util::current_time_millis(),
+ unit: TimeUnit::Millisecond,
+ }
+ }
+
+ /// Subtracts a duration from timestamp.
+ /// # Note
+ /// The result time unit remains unchanged even if `duration` has a different unit with `self`.
+ /// For example, a timestamp with value 1 and time unit second, subtracted by 1 millisecond
+ /// and the result is still 1 second.
+ pub fn sub(&self, duration: Duration) -> error::Result<Self> {
+ let duration: i64 = match self.unit {
+ TimeUnit::Second => {
+ i64::try_from(duration.as_secs()).context(TimestampOverflowSnafu)?
+ }
+ TimeUnit::Millisecond => {
+ i64::try_from(duration.as_millis()).context(TimestampOverflowSnafu)?
+ }
+ TimeUnit::Microsecond => {
+ i64::try_from(duration.as_micros()).context(TimestampOverflowSnafu)?
+ }
+ TimeUnit::Nanosecond => {
+ i64::try_from(duration.as_nanos()).context(TimestampOverflowSnafu)?
+ }
+ };
+
+ let value = self
+ .value
+ .checked_sub(duration)
+ .with_context(|| ArithmeticOverflowSnafu {
+ msg: format!(
+ "Try to subtract timestamp: {:?} with duration: {:?}",
+ self, duration
+ ),
+ })?;
+ Ok(Timestamp {
+ value,
+ unit: self.unit,
+ })
+ }
+
pub fn new(value: i64, unit: TimeUnit) -> Self {
Self { unit, value }
}
@@ -77,11 +124,11 @@ impl Timestamp {
pub fn convert_to(&self, unit: TimeUnit) -> Option<Timestamp> {
if self.unit().factor() >= unit.factor() {
let mul = self.unit().factor() / unit.factor();
- let value = self.value.checked_mul(mul)?;
+ let value = self.value.checked_mul(mul as i64)?;
Some(Timestamp::new(value, unit))
} else {
let mul = unit.factor() / self.unit().factor();
- Some(Timestamp::new(self.value.div_euclid(mul), unit))
+ Some(Timestamp::new(self.value.div_euclid(mul as i64), unit))
}
}
@@ -92,23 +139,25 @@ impl Timestamp {
pub fn convert_to_ceil(&self, unit: TimeUnit) -> Option<Timestamp> {
if self.unit().factor() >= unit.factor() {
let mul = self.unit().factor() / unit.factor();
- let value = self.value.checked_mul(mul)?;
+ let value = self.value.checked_mul(mul as i64)?;
Some(Timestamp::new(value, unit))
} else {
let mul = unit.factor() / self.unit().factor();
- Some(Timestamp::new(self.value.div_ceil(mul), unit))
+ Some(Timestamp::new(self.value.div_ceil(mul as i64), unit))
}
}
/// Split a [Timestamp] into seconds part and nanoseconds part.
/// Notice the seconds part of split result is always rounded down to floor.
- fn split(&self) -> (i64, i64) {
- let sec_mul = TimeUnit::Second.factor() / self.unit.factor();
- let nsec_mul = self.unit.factor() / TimeUnit::Nanosecond.factor();
+ fn split(&self) -> (i64, u32) {
+ let sec_mul = (TimeUnit::Second.factor() / self.unit.factor()) as i64;
+ let nsec_mul = (self.unit.factor() / TimeUnit::Nanosecond.factor()) as i64;
let sec_div = self.value.div_euclid(sec_mul);
let sec_mod = self.value.rem_euclid(sec_mul);
- (sec_div, sec_mod * nsec_mul)
+ // safety: the max possible value of `sec_mod` is 999,999,999
+ let nsec = u32::try_from(sec_mod * nsec_mul).unwrap();
+ (sec_div, nsec)
}
/// Format timestamp to ISO8601 string. If the timestamp exceeds what chrono timestamp can
@@ -122,15 +171,8 @@ impl Timestamp {
}
pub fn to_chrono_datetime(&self) -> LocalResult<DateTime<Utc>> {
- let nano_factor = TimeUnit::Second.factor() / TimeUnit::Nanosecond.factor();
- let (mut secs, mut nsecs) = self.split();
-
- if nsecs < 0 {
- secs -= 1;
- nsecs += nano_factor;
- }
-
- Utc.timestamp_opt(secs, nsecs as u32)
+ let (sec, nsec) = self.split();
+ Utc.timestamp_opt(sec, nsec)
}
}
@@ -252,7 +294,7 @@ impl Display for TimeUnit {
}
impl TimeUnit {
- pub fn factor(&self) -> i64 {
+ pub fn factor(&self) -> u32 {
match self {
TimeUnit::Second => 1_000_000_000,
TimeUnit::Millisecond => 1_000_000,
@@ -300,7 +342,7 @@ impl Hash for Timestamp {
fn hash<H: Hasher>(&self, state: &mut H) {
let (sec, nsec) = self.split();
state.write_i64(sec);
- state.write_i64(nsec);
+ state.write_u32(nsec);
}
}
@@ -789,4 +831,41 @@ mod tests {
Timestamp::new(1, TimeUnit::Second).convert_to_ceil(TimeUnit::Millisecond)
);
}
+
+ #[test]
+ fn test_split_overflow() {
+ Timestamp::new(i64::MAX, TimeUnit::Second).split();
+ Timestamp::new(i64::MIN, TimeUnit::Second).split();
+ Timestamp::new(i64::MAX, TimeUnit::Millisecond).split();
+ Timestamp::new(i64::MIN, TimeUnit::Millisecond).split();
+ Timestamp::new(i64::MAX, TimeUnit::Microsecond).split();
+ Timestamp::new(i64::MIN, TimeUnit::Microsecond).split();
+ Timestamp::new(i64::MAX, TimeUnit::Nanosecond).split();
+ Timestamp::new(i64::MIN, TimeUnit::Nanosecond).split();
+ let (sec, nsec) = Timestamp::new(i64::MIN, TimeUnit::Nanosecond).split();
+ let time = NaiveDateTime::from_timestamp_opt(sec, nsec).unwrap();
+ assert_eq!(sec, time.timestamp());
+ assert_eq!(nsec, time.timestamp_subsec_nanos());
+ }
+
+ #[test]
+ fn test_timestamp_sub() {
+ let res = Timestamp::new(1, TimeUnit::Second)
+ .sub(Duration::from_secs(1))
+ .unwrap();
+ assert_eq!(0, res.value);
+ assert_eq!(TimeUnit::Second, res.unit);
+
+ let res = Timestamp::new(0, TimeUnit::Second)
+ .sub(Duration::from_secs(1))
+ .unwrap();
+ assert_eq!(-1, res.value);
+ assert_eq!(TimeUnit::Second, res.unit);
+
+ let res = Timestamp::new(1, TimeUnit::Second)
+ .sub(Duration::from_millis(1))
+ .unwrap();
+ assert_eq!(1, res.value);
+ assert_eq!(TimeUnit::Second, res.unit);
+ }
}
diff --git a/src/storage/src/compaction/picker.rs b/src/storage/src/compaction/picker.rs
index 5e1a38814d39..966e4ed5af44 100644
--- a/src/storage/src/compaction/picker.rs
+++ b/src/storage/src/compaction/picker.rs
@@ -14,14 +14,20 @@
use std::marker::PhantomData;
use std::sync::Arc;
+use std::time::Duration;
-use common_telemetry::debug;
+use common_telemetry::{debug, error, info};
+use common_time::Timestamp;
+use snafu::ResultExt;
use store_api::logstore::LogStore;
use crate::compaction::scheduler::CompactionRequestImpl;
use crate::compaction::strategy::{SimpleTimeWindowStrategy, StrategyRef};
use crate::compaction::task::{CompactionTask, CompactionTaskImpl};
+use crate::error::TtlCalculationSnafu;
use crate::scheduler::Request;
+use crate::sst::{FileHandle, Level};
+use crate::version::LevelMetasRef;
/// Picker picks input SST files and builds the compaction task.
/// Different compaction strategy may implement different pickers.
@@ -57,6 +63,24 @@ impl<S> SimplePicker<S> {
_phantom_data: Default::default(),
}
}
+
+ fn get_expired_ssts(
+ &self,
+ levels: &LevelMetasRef,
+ ttl: Option<Duration>,
+ ) -> crate::error::Result<Vec<FileHandle>> {
+ let Some(ttl) = ttl else { return Ok(vec![]); };
+
+ let expire_time = Timestamp::current_millis()
+ .sub(ttl)
+ .context(TtlCalculationSnafu)?;
+
+ let mut expired_ssts = vec![];
+ for level in 0..levels.level_num() {
+ expired_ssts.extend(levels.level(level as Level).get_expired_files(&expire_time));
+ }
+ Ok(expired_ssts)
+ }
}
impl<S: LogStore> Picker for SimplePicker<S> {
@@ -69,6 +93,22 @@ impl<S: LogStore> Picker for SimplePicker<S> {
req: &CompactionRequestImpl<S>,
) -> crate::error::Result<Option<CompactionTaskImpl<S>>> {
let levels = &req.levels();
+ let expired_ssts = self
+ .get_expired_ssts(levels, req.ttl)
+ .map_err(|e| {
+ error!(e;"Failed to get region expired SST files, region: {}, ttl: {:?}", req.region_id, req.ttl);
+ e
+ })
+ .unwrap_or_default();
+
+ if !expired_ssts.is_empty() {
+ info!(
+ "Expired SSTs in region {}: {:?}",
+ req.region_id, expired_ssts
+ );
+ // here we mark expired SSTs as compacting to avoid them being picked.
+ expired_ssts.iter().for_each(|f| f.mark_compacting(true));
+ }
for level_num in 0..levels.level_num() {
let level = levels.level(level_num as u8);
@@ -91,6 +131,7 @@ impl<S: LogStore> Picker for SimplePicker<S> {
shared_data: req.shared.clone(),
wal: req.wal.clone(),
manifest: req.manifest.clone(),
+ expired_ssts,
}));
}
diff --git a/src/storage/src/compaction/scheduler.rs b/src/storage/src/compaction/scheduler.rs
index 921e0db360b1..22bf002d18e2 100644
--- a/src/storage/src/compaction/scheduler.rs
+++ b/src/storage/src/compaction/scheduler.rs
@@ -13,6 +13,7 @@
// limitations under the License.
use std::sync::Arc;
+use std::time::Duration;
use common_telemetry::{debug, error, info};
use store_api::logstore::LogStore;
@@ -48,6 +49,7 @@ pub struct CompactionRequestImpl<S: LogStore> {
pub shared: SharedDataRef,
pub manifest: RegionManifest,
pub wal: Wal<S>,
+ pub ttl: Option<Duration>,
}
impl<S: LogStore> CompactionRequestImpl<S> {
diff --git a/src/storage/src/compaction/task.rs b/src/storage/src/compaction/task.rs
index 501c0a3314d2..3cee9ea04977 100644
--- a/src/storage/src/compaction/task.rs
+++ b/src/storage/src/compaction/task.rs
@@ -42,6 +42,7 @@ pub struct CompactionTaskImpl<S: LogStore> {
pub shared_data: SharedDataRef,
pub wal: Wal<S>,
pub manifest: RegionManifest,
+ pub expired_ssts: Vec<FileHandle>,
}
impl<S: LogStore> Debug for CompactionTaskImpl<S> {
@@ -60,19 +61,14 @@ impl<S: LogStore> Drop for CompactionTaskImpl<S> {
impl<S: LogStore> CompactionTaskImpl<S> {
/// Compacts inputs SSTs, returns `(output file, compacted input file)`.
- async fn merge_ssts(&mut self) -> Result<(Vec<FileMeta>, Vec<FileMeta>)> {
+ async fn merge_ssts(&mut self) -> Result<(HashSet<FileMeta>, HashSet<FileMeta>)> {
let mut futs = Vec::with_capacity(self.outputs.len());
let mut compacted_inputs = HashSet::new();
let region_id = self.shared_data.id();
for output in self.outputs.drain(..) {
let schema = self.schema.clone();
let sst_layer = self.sst_layer.clone();
- compacted_inputs.extend(output.inputs.iter().map(|f| FileMeta {
- region_id,
- file_name: f.file_name().to_string(),
- time_range: *f.time_range(),
- level: f.level(),
- }));
+ compacted_inputs.extend(output.inputs.iter().map(FileHandle::meta));
// TODO(hl): Maybe spawn to runtime to exploit in-job parallelism.
futs.push(async move {
@@ -94,8 +90,8 @@ impl<S: LogStore> CompactionTaskImpl<S> {
/// Writes updated SST info into manifest.
async fn write_manifest_and_apply(
&self,
- output: Vec<FileMeta>,
- input: Vec<FileMeta>,
+ output: HashSet<FileMeta>,
+ input: HashSet<FileMeta>,
) -> Result<()> {
let version = &self.shared_data.version_control;
let region_version = version.metadata().version();
@@ -103,8 +99,8 @@ impl<S: LogStore> CompactionTaskImpl<S> {
let edit = RegionEdit {
region_version,
flushed_sequence: None,
- files_to_add: output,
- files_to_remove: input,
+ files_to_add: Vec::from_iter(output.into_iter()),
+ files_to_remove: Vec::from_iter(input.into_iter()),
};
info!(
"Compacted region: {}, region edit: {:?}",
@@ -131,10 +127,11 @@ impl<S: LogStore> CompactionTask for CompactionTaskImpl<S> {
async fn run(mut self) -> Result<()> {
self.mark_files_compacting(true);
- let (output, compacted) = self.merge_ssts().await.map_err(|e| {
+ let (output, mut compacted) = self.merge_ssts().await.map_err(|e| {
error!(e; "Failed to compact region: {}", self.shared_data.name());
e
})?;
+ compacted.extend(self.expired_ssts.iter().map(FileHandle::meta));
self.write_manifest_and_apply(output, compacted)
.await
.map_err(|e| {
diff --git a/src/storage/src/engine.rs b/src/storage/src/engine.rs
index 6edfbc833698..b11beed5b27c 100644
--- a/src/storage/src/engine.rs
+++ b/src/storage/src/engine.rs
@@ -14,6 +14,7 @@
use std::collections::HashMap;
use std::sync::{Arc, RwLock};
+use std::time::Duration;
use async_trait::async_trait;
use common_telemetry::logging::info;
@@ -289,7 +290,8 @@ impl<S: LogStore> EngineInner<S> {
let mut guard = SlotGuard::new(name, &self.regions);
- let store_config = self.region_store_config(&opts.parent_dir, opts.write_buffer_size, name);
+ let store_config =
+ self.region_store_config(&opts.parent_dir, opts.write_buffer_size, name, opts.ttl);
let region = match RegionImpl::open(name.to_string(), store_config, opts).await? {
None => return Ok(None),
@@ -319,8 +321,12 @@ impl<S: LogStore> EngineInner<S> {
.context(error::InvalidRegionDescSnafu {
region: ®ion_name,
})?;
- let store_config =
- self.region_store_config(&opts.parent_dir, opts.write_buffer_size, ®ion_name);
+ let store_config = self.region_store_config(
+ &opts.parent_dir,
+ opts.write_buffer_size,
+ ®ion_name,
+ opts.ttl,
+ );
let region = RegionImpl::create(metadata, store_config).await?;
@@ -341,6 +347,7 @@ impl<S: LogStore> EngineInner<S> {
parent_dir: &str,
write_buffer_size: Option<usize>,
region_name: &str,
+ ttl: Option<Duration>,
) -> StoreConfig<S> {
let parent_dir = util::normalize_dir(parent_dir);
@@ -363,6 +370,7 @@ impl<S: LogStore> EngineInner<S> {
compaction_scheduler: self.compaction_scheduler.clone(),
engine_config: self.config.clone(),
file_purger: self.file_purger.clone(),
+ ttl,
}
}
}
diff --git a/src/storage/src/error.rs b/src/storage/src/error.rs
index 339711a4c642..e08ce9a2d09f 100644
--- a/src/storage/src/error.rs
+++ b/src/storage/src/error.rs
@@ -434,6 +434,12 @@ pub enum Error {
source: object_store::Error,
backtrace: Backtrace,
},
+
+ #[snafu(display("Failed to calculate SST expire time, source: {}", source))]
+ TtlCalculation {
+ #[snafu(backtrace)]
+ source: common_time::error::Error,
+ },
}
pub type Result<T> = std::result::Result<T, Error>;
@@ -508,6 +514,7 @@ impl ErrorExt for Error {
StopScheduler { .. } => StatusCode::Internal,
DeleteSst { .. } => StatusCode::StorageUnavailable,
IllegalSchedulerState { .. } => StatusCode::Unexpected,
+ TtlCalculation { source, .. } => source.status_code(),
}
}
diff --git a/src/storage/src/region.rs b/src/storage/src/region.rs
index 77d0531d045f..d91fad2de0c6 100644
--- a/src/storage/src/region.rs
+++ b/src/storage/src/region.rs
@@ -19,6 +19,7 @@ mod writer;
use std::collections::BTreeMap;
use std::fmt;
use std::sync::Arc;
+use std::time::Duration;
use async_trait::async_trait;
use common_telemetry::logging;
@@ -140,6 +141,7 @@ pub struct StoreConfig<S: LogStore> {
pub compaction_scheduler: CompactionSchedulerRef<S>,
pub engine_config: Arc<EngineConfig>,
pub file_purger: FilePurgerRef,
+ pub ttl: Option<Duration>,
}
pub type RecoverdMetadata = (SequenceNumber, (ManifestVersion, RawRegionMetadata));
@@ -198,6 +200,7 @@ impl<S: LogStore> RegionImpl<S> {
writer: Arc::new(RegionWriter::new(
store_config.memtable_builder,
store_config.engine_config.clone(),
+ store_config.ttl,
)),
wal,
flush_strategy: store_config.flush_strategy,
@@ -277,6 +280,7 @@ impl<S: LogStore> RegionImpl<S> {
let writer = Arc::new(RegionWriter::new(
store_config.memtable_builder,
store_config.engine_config.clone(),
+ store_config.ttl,
));
let writer_ctx = WriterContext {
shared: &shared,
diff --git a/src/storage/src/region/writer.rs b/src/storage/src/region/writer.rs
index 9a4d2a574c98..56af329c040f 100644
--- a/src/storage/src/region/writer.rs
+++ b/src/storage/src/region/writer.rs
@@ -13,6 +13,7 @@
// limitations under the License.
use std::sync::Arc;
+use std::time::Duration;
use common_error::prelude::BoxedError;
use common_telemetry::tracing::log::info;
@@ -60,9 +61,13 @@ pub struct RegionWriter {
}
impl RegionWriter {
- pub fn new(memtable_builder: MemtableBuilderRef, config: Arc<EngineConfig>) -> RegionWriter {
+ pub fn new(
+ memtable_builder: MemtableBuilderRef,
+ config: Arc<EngineConfig>,
+ ttl: Option<Duration>,
+ ) -> RegionWriter {
RegionWriter {
- inner: Mutex::new(WriterInner::new(memtable_builder, config)),
+ inner: Mutex::new(WriterInner::new(memtable_builder, config, ttl)),
version_mutex: Mutex::new(()),
}
}
@@ -324,15 +329,21 @@ struct WriterInner {
/// It should protected by upper mutex
closed: bool,
engine_config: Arc<EngineConfig>,
+ ttl: Option<Duration>,
}
impl WriterInner {
- fn new(memtable_builder: MemtableBuilderRef, engine_config: Arc<EngineConfig>) -> WriterInner {
+ fn new(
+ memtable_builder: MemtableBuilderRef,
+ engine_config: Arc<EngineConfig>,
+ ttl: Option<Duration>,
+ ) -> WriterInner {
WriterInner {
memtable_builder,
flush_handle: None,
engine_config,
closed: false,
+ ttl,
}
}
@@ -596,7 +607,7 @@ impl WriterInner {
return Ok(());
}
- let cb = Self::build_flush_callback(¤t_version, ctx, &self.engine_config);
+ let cb = Self::build_flush_callback(¤t_version, ctx, &self.engine_config, self.ttl);
let flush_req = FlushJob {
max_memtable_id: max_memtable_id.unwrap(),
@@ -624,6 +635,7 @@ impl WriterInner {
version: &VersionRef,
ctx: &WriterContext<S>,
config: &Arc<EngineConfig>,
+ ttl: Option<Duration>,
) -> Option<FlushCallback> {
let region_id = version.metadata().id();
let compaction_request = CompactionRequestImpl {
@@ -633,6 +645,7 @@ impl WriterInner {
shared: ctx.shared.clone(),
manifest: ctx.manifest.clone(),
wal: ctx.wal.clone(),
+ ttl,
};
let compaction_scheduler = ctx.compaction_scheduler.clone();
let shared_data = ctx.shared.clone();
diff --git a/src/storage/src/sst.rs b/src/storage/src/sst.rs
index 25edf74e9318..f7ebdc1af256 100644
--- a/src/storage/src/sst.rs
+++ b/src/storage/src/sst.rs
@@ -145,6 +145,21 @@ impl LevelMeta {
self.files.len()
}
+ /// Returns expired SSTs from current level.
+ pub fn get_expired_files(&self, expire_time: &Timestamp) -> Vec<FileHandle> {
+ self.files
+ .iter()
+ .filter_map(|(_, v)| {
+ let Some((_, end)) = v.time_range() else { return None; };
+ if end < expire_time {
+ Some(v.clone())
+ } else {
+ None
+ }
+ })
+ .collect()
+ }
+
pub fn files(&self) -> impl Iterator<Item = &FileHandle> {
self.files.values()
}
@@ -212,6 +227,11 @@ impl FileHandle {
pub fn mark_deleted(&self) {
self.inner.deleted.store(true, Ordering::Relaxed);
}
+
+ #[inline]
+ pub fn meta(&self) -> FileMeta {
+ self.inner.meta.clone()
+ }
}
/// Actually data of [FileHandle].
diff --git a/src/storage/src/test_util/config_util.rs b/src/storage/src/test_util/config_util.rs
index 261b7d360623..31459cecee95 100644
--- a/src/storage/src/test_util/config_util.rs
+++ b/src/storage/src/test_util/config_util.rs
@@ -69,5 +69,6 @@ pub async fn new_store_config(
compaction_scheduler,
engine_config: Default::default(),
file_purger,
+ ttl: None,
}
}
|
feat
|
support table ttl (#1052)
|
c6f024a171b2c75d88812a9113c415590d8fdf00
|
2023-04-18 11:38:19
|
Near
|
feat: Add metrics for cache hit/miss for object store cache (#1405)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index 72da8b122da5..491fe43e6511 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -5333,6 +5333,7 @@ dependencies = [
"common-test-util",
"futures",
"lru 0.9.0",
+ "metrics",
"opendal",
"pin-project",
"tokio",
diff --git a/src/object-store/Cargo.toml b/src/object-store/Cargo.toml
index d38a93e85a21..79cf2ac5bd76 100644
--- a/src/object-store/Cargo.toml
+++ b/src/object-store/Cargo.toml
@@ -9,6 +9,7 @@ lru = "0.9"
async-trait = "0.1"
bytes = "1.4"
futures = { version = "0.3" }
+metrics = "0.20"
opendal = { version = "0.30", features = ["layers-tracing", "layers-metrics"] }
pin-project = "1.0"
tokio.workspace = true
diff --git a/src/object-store/src/cache_policy.rs b/src/object-store/src/cache_policy.rs
index 8965ebd53cab..cb08a26d99da 100644
--- a/src/object-store/src/cache_policy.rs
+++ b/src/object-store/src/cache_policy.rs
@@ -19,12 +19,18 @@ use std::sync::Arc;
use async_trait::async_trait;
use bytes::Bytes;
use lru::LruCache;
+use metrics::increment_counter;
use opendal::ops::{OpDelete, OpList, OpRead, OpScan, OpWrite};
use opendal::raw::oio::{Read, Reader, Write};
use opendal::raw::{Accessor, Layer, LayeredAccessor, RpDelete, RpList, RpRead, RpScan, RpWrite};
use opendal::{ErrorKind, Result};
use tokio::sync::Mutex;
+use crate::metrics::{
+ OBJECT_STORE_LRU_CACHE_ERROR, OBJECT_STORE_LRU_CACHE_ERROR_KIND, OBJECT_STORE_LRU_CACHE_HIT,
+ OBJECT_STORE_LRU_CACHE_MISS,
+};
+
pub struct LruCacheLayer<C> {
cache: Arc<C>,
lru_cache: Arc<Mutex<LruCache<String, ()>>>,
@@ -89,12 +95,16 @@ impl<I: Accessor, C: Accessor> LayeredAccessor for LruCacheAccessor<I, C> {
match self.cache.read(&cache_path, OpRead::default()).await {
Ok((rp, r)) => {
+ increment_counter!(OBJECT_STORE_LRU_CACHE_HIT);
+
// update lru when cache hit
let mut lru_cache = lru_cache.lock().await;
lru_cache.get_or_insert(cache_path.clone(), || ());
Ok(to_output_reader((rp, r)))
}
Err(err) if err.kind() == ErrorKind::NotFound => {
+ increment_counter!(OBJECT_STORE_LRU_CACHE_MISS);
+
let (rp, mut reader) = self.inner.read(&path, args.clone()).await?;
let size = rp.clone().into_metadata().content_length();
let (_, mut writer) = self.cache.write(&cache_path, OpWrite::new()).await?;
@@ -122,7 +132,10 @@ impl<I: Accessor, C: Accessor> LayeredAccessor for LruCacheAccessor<I, C> {
Err(_) => return self.inner.read(&path, args).await.map(to_output_reader),
}
}
- Err(_) => return self.inner.read(&path, args).await.map(to_output_reader),
+ Err(err) => {
+ increment_counter!(OBJECT_STORE_LRU_CACHE_ERROR, OBJECT_STORE_LRU_CACHE_ERROR_KIND => format!("{}", err.kind()));
+ return self.inner.read(&path, args).await.map(to_output_reader);
+ }
}
}
diff --git a/src/object-store/src/lib.rs b/src/object-store/src/lib.rs
index 7cfbb712117f..5e74aa0cec69 100644
--- a/src/object-store/src/lib.rs
+++ b/src/object-store/src/lib.rs
@@ -20,5 +20,6 @@ pub use opendal::{
};
pub mod cache_policy;
+mod metrics;
pub mod test_util;
pub mod util;
diff --git a/src/object-store/src/metrics.rs b/src/object-store/src/metrics.rs
new file mode 100644
index 000000000000..6a8f0ee18994
--- /dev/null
+++ b/src/object-store/src/metrics.rs
@@ -0,0 +1,20 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! object-store metrics
+
+pub const OBJECT_STORE_LRU_CACHE_HIT: &str = "object_store.lru_cache.hit";
+pub const OBJECT_STORE_LRU_CACHE_MISS: &str = "object_store.lru_cache.miss";
+pub const OBJECT_STORE_LRU_CACHE_ERROR: &str = "object_store.lru_cache.error";
+pub const OBJECT_STORE_LRU_CACHE_ERROR_KIND: &str = "error";
diff --git a/src/object-store/tests/object_store_test.rs b/src/object-store/tests/object_store_test.rs
index 820973996f5b..4efc2ac72c3b 100644
--- a/src/object-store/tests/object_store_test.rs
+++ b/src/object-store/tests/object_store_test.rs
@@ -16,7 +16,7 @@ use std::env;
use std::sync::Arc;
use anyhow::Result;
-use common_telemetry::logging;
+use common_telemetry::{logging, metric};
use common_test_util::temp_dir::create_temp_dir;
use object_store::cache_policy::LruCacheLayer;
use object_store::services::{Fs, S3};
@@ -185,6 +185,7 @@ async fn assert_cache_files(
#[tokio::test]
async fn test_object_store_cache_policy() -> Result<()> {
common_telemetry::init_default_ut_logging();
+ common_telemetry::init_default_metrics_recorder();
// create file storage
let root_dir = create_temp_dir("test_fs_backend");
let store = OperatorBuilder::new(
@@ -258,5 +259,11 @@ async fn test_object_store_cache_policy() -> Result<()> {
)
.await?;
+ let handle = metric::try_handle().unwrap();
+ let metric_text = handle.render();
+
+ assert!(metric_text.contains("object_store_lru_cache_hit"));
+ assert!(metric_text.contains("object_store_lru_cache_miss"));
+
Ok(())
}
|
feat
|
Add metrics for cache hit/miss for object store cache (#1405)
|
14267c2aed30cc7c546baddac66ee999c97a3a35
|
2024-03-29 12:07:25
|
Eugene Tolbakov
|
feat(tql): add initial support for start,stop,step as sql functions (#3507)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index ce314d70b51e..8f62994542d4 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -9377,6 +9377,7 @@ name = "sql"
version = "0.7.1"
dependencies = [
"api",
+ "chrono",
"common-base",
"common-catalog",
"common-datasource",
@@ -9385,6 +9386,10 @@ dependencies = [
"common-macro",
"common-query",
"common-time",
+ "datafusion",
+ "datafusion-common",
+ "datafusion-expr",
+ "datafusion-physical-expr",
"datafusion-sql",
"datatypes",
"hex",
diff --git a/src/sql/Cargo.toml b/src/sql/Cargo.toml
index 1c360bfbd20d..ce08faf9dcfa 100644
--- a/src/sql/Cargo.toml
+++ b/src/sql/Cargo.toml
@@ -9,6 +9,7 @@ workspace = true
[dependencies]
api.workspace = true
+chrono.workspace = true
common-base.workspace = true
common-catalog.workspace = true
common-decimal.workspace = true
@@ -16,6 +17,10 @@ common-error.workspace = true
common-macro.workspace = true
common-query.workspace = true
common-time.workspace = true
+datafusion.workspace = true
+datafusion-common.workspace = true
+datafusion-expr.workspace = true
+datafusion-physical-expr.workspace = true
datafusion-sql.workspace = true
datatypes.workspace = true
hex = "0.4"
diff --git a/src/sql/src/error.rs b/src/sql/src/error.rs
index 24643b4a6505..208dd6915f68 100644
--- a/src/sql/src/error.rs
+++ b/src/sql/src/error.rs
@@ -24,11 +24,12 @@ use snafu::{Location, Snafu};
use sqlparser::parser::ParserError;
use crate::ast::{Expr, Value as SqlValue};
+use crate::parsers::error::TQLError;
pub type Result<T> = std::result::Result<T, Error>;
/// SQL parser errors.
-// Now the error in parser does not contains backtrace to avoid generating backtrace
+// Now the error in parser does not contain backtrace to avoid generating backtrace
// every time the parser parses an invalid SQL.
#[derive(Snafu)]
#[snafu(visibility(pub))]
@@ -66,6 +67,14 @@ pub enum Error {
location: Location,
},
+ // Syntax error from tql parser.
+ #[snafu(display(""))]
+ TQLSyntax {
+ #[snafu(source)]
+ error: TQLError,
+ location: Location,
+ },
+
#[snafu(display("Missing time index constraint"))]
MissingTimeIndex {},
@@ -170,6 +179,7 @@ impl ErrorExt for Error {
UnsupportedDefaultValue { .. } | Unsupported { .. } => StatusCode::Unsupported,
Unexpected { .. }
| Syntax { .. }
+ | TQLSyntax { .. }
| MissingTimeIndex { .. }
| InvalidTimeIndex { .. }
| InvalidSql { .. }
diff --git a/src/sql/src/parsers.rs b/src/sql/src/parsers.rs
index ca249cf64024..b7e5c8c44e84 100644
--- a/src/sql/src/parsers.rs
+++ b/src/sql/src/parsers.rs
@@ -18,6 +18,7 @@ pub(crate) mod create_parser;
pub(crate) mod delete_parser;
pub(crate) mod describe_parser;
pub(crate) mod drop_parser;
+pub(crate) mod error;
pub(crate) mod explain_parser;
pub(crate) mod insert_parser;
pub(crate) mod query_parser;
diff --git a/src/sql/src/parsers/error.rs b/src/sql/src/parsers/error.rs
new file mode 100644
index 000000000000..bdb469ac2bae
--- /dev/null
+++ b/src/sql/src/parsers/error.rs
@@ -0,0 +1,48 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use common_macro::stack_trace_debug;
+use datafusion_common::DataFusionError;
+use snafu::{Location, Snafu};
+use sqlparser::parser::ParserError;
+
+/// TQL parser & evaluation errors.
+#[derive(Snafu)]
+#[snafu(visibility(pub))]
+#[stack_trace_debug]
+pub enum TQLError {
+ #[snafu(display("Failed to parse TQL expression"))]
+ Parser {
+ #[snafu(source)]
+ error: ParserError,
+ location: Location,
+ },
+
+ #[snafu(display("Failed to convert to logical TQL expression"))]
+ ConvertToLogicalExpression {
+ #[snafu(source)]
+ error: DataFusionError,
+ location: Location,
+ },
+
+ #[snafu(display("Failed to simplify TQL expression"))]
+ Simplification {
+ #[snafu(source)]
+ error: DataFusionError,
+ location: Location,
+ },
+
+ #[snafu(display("Failed to evaluate TQL expression: {}", msg))]
+ Evaluation { msg: String },
+}
diff --git a/src/sql/src/parsers/tql_parser.rs b/src/sql/src/parsers/tql_parser.rs
index 843c54b47b2b..a681ca10012c 100644
--- a/src/sql/src/parsers/tql_parser.rs
+++ b/src/sql/src/parsers/tql_parser.rs
@@ -12,7 +12,16 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-use snafu::ResultExt;
+use std::sync::Arc;
+
+use chrono::Utc;
+use datafusion::optimizer::simplify_expressions::{ExprSimplifier, SimplifyContext};
+use datafusion_common::config::ConfigOptions;
+use datafusion_common::{DFSchema, Result as DFResult, ScalarValue, TableReference};
+use datafusion_expr::{AggregateUDF, Expr, ScalarUDF, TableSource, WindowUDF};
+use datafusion_physical_expr::execution_props::ExecutionProps;
+use datafusion_sql::planner::{ContextProvider, SqlToRel};
+use snafu::{OptionExt, ResultExt};
use sqlparser::keywords::Keyword;
use sqlparser::parser::ParserError;
use sqlparser::tokenizer::Token;
@@ -20,16 +29,21 @@ use sqlparser::tokenizer::Token;
use crate::error::{self, Result};
use crate::parser::ParserContext;
use crate::statements::statement::Statement;
-use crate::statements::tql::{Tql, TqlAnalyze, TqlEval, TqlExplain};
+use crate::statements::tql::{Tql, TqlAnalyze, TqlEval, TqlExplain, TqlParameters};
pub const TQL: &str = "TQL";
const EVAL: &str = "EVAL";
const EVALUATE: &str = "EVALUATE";
-const EXPLAIN: &str = "EXPLAIN";
const VERBOSE: &str = "VERBOSE";
+use datatypes::arrow::datatypes::DataType;
use sqlparser::parser::Parser;
+use crate::dialect::GreptimeDbDialect;
+use crate::parsers::error::{
+ ConvertToLogicalExpressionSnafu, EvaluationSnafu, ParserSnafu, SimplificationSnafu, TQLError,
+};
+
/// TQL extension parser, including:
/// - `TQL EVAL <query>`
/// - `TQL EXPLAIN [VERBOSE] <query>`
@@ -41,23 +55,41 @@ impl<'a> ParserContext<'a> {
match self.parser.peek_token().token {
Token::Word(w) => {
let uppercase = w.value.to_uppercase();
+ let _consume_tql_keyword_token = self.parser.next_token();
match w.keyword {
Keyword::NoKeyword
if (uppercase == EVAL || uppercase == EVALUATE)
&& w.quote_style.is_none() =>
{
- let _ = self.parser.next_token();
- self.parse_tql_eval().context(error::SyntaxSnafu)
+ self.parse_tql_params()
+ .map(|params| Statement::Tql(Tql::Eval(TqlEval::from(params))))
+ .context(error::TQLSyntaxSnafu)
}
Keyword::EXPLAIN => {
- let _ = self.parser.next_token();
- self.parse_tql_explain()
+ let is_verbose = self.has_verbose_keyword();
+ if is_verbose {
+ let _consume_verbose_token = self.parser.next_token();
+ }
+ self.parse_tql_params()
+ .map(|mut params| {
+ params.is_verbose = is_verbose;
+ Statement::Tql(Tql::Explain(TqlExplain::from(params)))
+ })
+ .context(error::TQLSyntaxSnafu)
}
Keyword::ANALYZE => {
- let _ = self.parser.next_token();
- self.parse_tql_analyze().context(error::SyntaxSnafu)
+ let is_verbose = self.has_verbose_keyword();
+ if is_verbose {
+ let _consume_verbose_token = self.parser.next_token();
+ }
+ self.parse_tql_params()
+ .map(|mut params| {
+ params.is_verbose = is_verbose;
+ Statement::Tql(Tql::Analyze(TqlAnalyze::from(params)))
+ })
+ .context(error::TQLSyntaxSnafu)
}
_ => self.unsupported(self.peek_token_as_string()),
}
@@ -66,121 +98,196 @@ impl<'a> ParserContext<'a> {
}
}
- fn parse_tql_eval(&mut self) -> std::result::Result<Statement, ParserError> {
+ fn parse_tql_params(&mut self) -> std::result::Result<TqlParameters, TQLError> {
let parser = &mut self.parser;
- parser.expect_token(&Token::LParen)?;
- let start = Self::parse_string_or_number(parser, Token::Comma)?;
- let end = Self::parse_string_or_number(parser, Token::Comma)?;
- let step = Self::parse_string_or_number(parser, Token::RParen)?;
- let query = Self::parse_tql_query(parser, self.sql, ")")?;
-
- Ok(Statement::Tql(Tql::Eval(TqlEval {
- start,
- end,
- step,
- query,
- })))
+ let (start, end, step, lookback) = match parser.peek_token().token {
+ Token::LParen => {
+ let _consume_lparen_token = parser.next_token();
+ let start = Self::parse_string_or_number_or_word(parser, Token::Comma)?;
+ let end = Self::parse_string_or_number_or_word(parser, Token::Comma)?;
+ let delimiter_token = Self::find_next_delimiter_token(parser);
+ let (step, lookback) = if Self::is_comma(&delimiter_token) {
+ let step = Self::parse_string_or_number_or_word(parser, Token::Comma)?;
+ let lookback = Self::parse_string_or_number_or_word(parser, Token::RParen).ok();
+ (step, lookback)
+ } else {
+ let step = Self::parse_string_or_number_or_word(parser, Token::RParen)?;
+ (step, None)
+ };
+ (start, end, step, lookback)
+ }
+ _ => ("0".to_string(), "0".to_string(), "5m".to_string(), None),
+ };
+ let query = Self::parse_tql_query(parser, self.sql).context(ParserSnafu)?;
+ Ok(TqlParameters::new(start, end, step, lookback, query))
}
- fn parse_string_or_number(
+ fn find_next_delimiter_token(parser: &mut Parser) -> Token {
+ let mut n: usize = 0;
+ while !(Self::is_comma(&parser.peek_nth_token(n).token)
+ || Self::is_rparen(&parser.peek_nth_token(n).token))
+ {
+ n += 1;
+ }
+ parser.peek_nth_token(n).token
+ }
+
+ pub fn is_delimiter_token(token: &Token, delimiter_token: &Token) -> bool {
+ match token {
+ Token::Comma => Self::is_comma(delimiter_token),
+ Token::RParen => Self::is_rparen(delimiter_token),
+ _ => false,
+ }
+ }
+
+ #[inline]
+ fn is_comma(token: &Token) -> bool {
+ matches!(token, Token::Comma)
+ }
+
+ #[inline]
+ fn is_rparen(token: &Token) -> bool {
+ matches!(token, Token::RParen)
+ }
+
+ fn has_verbose_keyword(&mut self) -> bool {
+ self.peek_token_as_string().eq_ignore_ascii_case(VERBOSE)
+ }
+
+ fn parse_string_or_number_or_word(
parser: &mut Parser,
- token: Token,
- ) -> std::result::Result<String, ParserError> {
- let value = match parser.next_token().token {
- Token::Number(n, _) => n,
- Token::DoubleQuotedString(s) | Token::SingleQuotedString(s) => s,
- unexpected => {
- return Err(ParserError::ParserError(format!(
- "Expect number or string, but is {unexpected:?}"
- )));
+ delimiter_token: Token,
+ ) -> std::result::Result<String, TQLError> {
+ let mut tokens = vec![];
+
+ while !Self::is_delimiter_token(&parser.peek_token().token, &delimiter_token) {
+ let token = parser.next_token();
+ tokens.push(token.token);
+ }
+ let result = match tokens.len() {
+ 0 => Err(ParserError::ParserError(
+ "Expected at least one token".to_string(),
+ ))
+ .context(ParserSnafu),
+ 1 => {
+ let value = match tokens[0].clone() {
+ Token::Number(n, _) => n,
+ Token::DoubleQuotedString(s) | Token::SingleQuotedString(s) => s,
+ Token::Word(_) => Self::parse_tokens(tokens)?,
+ unexpected => {
+ return Err(ParserError::ParserError(format!(
+ "Expected number, string or word, but have {unexpected:?}"
+ )))
+ .context(ParserSnafu);
+ }
+ };
+ Ok(value)
}
+ _ => Self::parse_tokens(tokens),
};
- parser.expect_token(&token)?;
+ parser.expect_token(&delimiter_token).context(ParserSnafu)?;
+ result
+ }
- Ok(value)
+ fn parse_tokens(tokens: Vec<Token>) -> std::result::Result<String, TQLError> {
+ Self::parse_to_expr(tokens)
+ .and_then(Self::parse_to_logical_expr)
+ .and_then(Self::simplify_expr)
+ .and_then(Self::evaluate_expr)
}
- fn parse_tql_query(
- parser: &mut Parser,
- sql: &str,
- delimiter: &str,
- ) -> std::result::Result<String, ParserError> {
- let index = sql.to_uppercase().find(delimiter);
+ fn parse_to_expr(tokens: Vec<Token>) -> std::result::Result<sqlparser::ast::Expr, TQLError> {
+ Parser::new(&GreptimeDbDialect {})
+ .with_tokens(tokens)
+ .parse_expr()
+ .context(ParserSnafu)
+ }
- if let Some(index) = index {
- let index = index + delimiter.len() + 1;
- if index >= sql.len() {
- return Err(ParserError::ParserError("empty TQL query".to_string()));
- }
+ fn parse_to_logical_expr(expr: sqlparser::ast::Expr) -> std::result::Result<Expr, TQLError> {
+ let empty_df_schema = DFSchema::empty();
+ SqlToRel::new(&StubContextProvider {})
+ .sql_to_expr(expr.into(), &empty_df_schema, &mut Default::default())
+ .context(ConvertToLogicalExpressionSnafu)
+ }
- let query = &sql[index..];
+ fn simplify_expr(logical_expr: Expr) -> std::result::Result<Expr, TQLError> {
+ let empty_df_schema = DFSchema::empty();
+ let execution_props = ExecutionProps::new().with_query_execution_start_time(Utc::now());
+ let info = SimplifyContext::new(&execution_props).with_schema(Arc::new(empty_df_schema));
+ ExprSimplifier::new(info)
+ .simplify(logical_expr)
+ .context(SimplificationSnafu)
+ }
- while parser.next_token() != Token::EOF {
- // consume all tokens
- // TODO(dennis): supports multi TQL statements separated by ';'?
+ fn evaluate_expr(simplified_expr: Expr) -> std::result::Result<String, TQLError> {
+ match simplified_expr {
+ Expr::Literal(ScalarValue::TimestampNanosecond(ts_nanos, _))
+ | Expr::Literal(ScalarValue::DurationNanosecond(ts_nanos)) => {
+ ts_nanos.map(|v| v / 1_000_000_000)
}
+ Expr::Literal(ScalarValue::TimestampMicrosecond(ts_micros, _))
+ | Expr::Literal(ScalarValue::DurationMicrosecond(ts_micros)) => {
+ ts_micros.map(|v| v / 1_000_000)
+ }
+ Expr::Literal(ScalarValue::TimestampMillisecond(ts_millis, _))
+ | Expr::Literal(ScalarValue::DurationMillisecond(ts_millis)) => {
+ ts_millis.map(|v| v / 1_000)
+ }
+ Expr::Literal(ScalarValue::TimestampSecond(ts_secs, _))
+ | Expr::Literal(ScalarValue::DurationSecond(ts_secs)) => ts_secs,
+ _ => None,
+ }
+ .map(|ts| ts.to_string())
+ .context(EvaluationSnafu {
+ msg: format!("Failed to extract a timestamp value {simplified_expr:?}"),
+ })
+ }
- // remove the last ';' or tailing space if exists
- Ok(query.trim().trim_end_matches(';').to_string())
- } else {
- Err(ParserError::ParserError(format!("{delimiter} not found",)))
+ fn parse_tql_query(parser: &mut Parser, sql: &str) -> std::result::Result<String, ParserError> {
+ while matches!(parser.peek_token().token, Token::Comma) {
+ let _skip_token = parser.next_token();
}
+ let index = parser.next_token().location.column as usize;
+ if index == 0 {
+ return Err(ParserError::ParserError("empty TQL query".to_string()));
+ }
+
+ let query = &sql[index - 1..];
+ while parser.next_token() != Token::EOF {
+ // consume all tokens
+ // TODO(dennis): supports multi TQL statements separated by ';'?
+ }
+ // remove the last ';' or tailing space if exists
+ Ok(query.trim().trim_end_matches(';').to_string())
}
+}
- fn parse_tql_explain(&mut self) -> Result<Statement> {
- let parser = &mut self.parser;
- let is_verbose = if parser.peek_token().token.to_string() == VERBOSE {
- let _ = parser.next_token();
- true
- } else {
- false
- };
- let delimiter = match parser.expect_token(&Token::LParen) {
- Ok(_) => ")",
- Err(_) => {
- if is_verbose {
- VERBOSE
- } else {
- EXPLAIN
- }
- }
- };
- let start = Self::parse_string_or_number(parser, Token::Comma).unwrap_or("0".to_string());
- let end = Self::parse_string_or_number(parser, Token::Comma).unwrap_or("0".to_string());
- let step = Self::parse_string_or_number(parser, Token::RParen).unwrap_or("5m".to_string());
- let query =
- Self::parse_tql_query(parser, self.sql, delimiter).context(error::SyntaxSnafu)?;
-
- Ok(Statement::Tql(Tql::Explain(TqlExplain {
- query,
- start,
- end,
- step,
- is_verbose,
- })))
+#[derive(Default)]
+struct StubContextProvider {}
+
+impl ContextProvider for StubContextProvider {
+ fn get_table_provider(&self, _name: TableReference) -> DFResult<Arc<dyn TableSource>> {
+ unimplemented!()
}
- fn parse_tql_analyze(&mut self) -> std::result::Result<Statement, ParserError> {
- let parser = &mut self.parser;
- let is_verbose = if parser.peek_token().token.to_string() == VERBOSE {
- let _ = parser.next_token();
- true
- } else {
- false
- };
+ fn get_function_meta(&self, _name: &str) -> Option<Arc<ScalarUDF>> {
+ None
+ }
+
+ fn get_aggregate_meta(&self, _name: &str) -> Option<Arc<AggregateUDF>> {
+ unimplemented!()
+ }
+
+ fn get_window_meta(&self, _name: &str) -> Option<Arc<WindowUDF>> {
+ unimplemented!()
+ }
+
+ fn get_variable_type(&self, _variable_names: &[String]) -> Option<DataType> {
+ unimplemented!()
+ }
- parser.expect_token(&Token::LParen)?;
- let start = Self::parse_string_or_number(parser, Token::Comma)?;
- let end = Self::parse_string_or_number(parser, Token::Comma)?;
- let step = Self::parse_string_or_number(parser, Token::RParen)?;
- let query = Self::parse_tql_query(parser, self.sql, ")")?;
- Ok(Statement::Tql(Tql::Analyze(TqlAnalyze {
- start,
- end,
- step,
- query,
- is_verbose,
- })))
+ fn options(&self) -> &ConfigOptions {
+ unimplemented!()
}
}
@@ -191,67 +298,82 @@ mod tests {
use super::*;
use crate::dialect::GreptimeDbDialect;
use crate::parser::ParseOptions;
- #[test]
- fn test_parse_tql_eval() {
- let sql = "TQL EVAL (1676887657, 1676887659, '1m') http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m";
+ fn parse_into_statement(sql: &str) -> Statement {
let mut result =
ParserContext::create_with_dialect(sql, &GreptimeDbDialect {}, ParseOptions::default())
.unwrap();
assert_eq!(1, result.len());
+ result.remove(0)
+ }
- let statement = result.remove(0);
+ #[test]
+ fn test_parse_tql_eval_with_functions() {
+ let sql = "TQL EVAL (now() - now(), now() - (now() - '10 seconds'::interval), '1s') http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m";
+ let statement = parse_into_statement(sql);
match statement {
+ Statement::Tql(Tql::Eval(eval)) => {
+ assert_eq!(eval.start, "0");
+ assert_eq!(eval.end, "10");
+ assert_eq!(eval.step, "1s");
+ assert_eq!(eval.lookback, None);
+ assert_eq!(eval.query, "http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m");
+ }
+ _ => unreachable!(),
+ }
+
+ let sql = "TQL EVAL ('1970-01-01T00:05:00'::timestamp, '1970-01-01T00:10:00'::timestamp + '10 minutes'::interval, '1m') http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m";
+ match parse_into_statement(sql) {
+ Statement::Tql(Tql::Eval(eval)) => {
+ assert_eq!(eval.start, "300");
+ assert_eq!(eval.end, "1200");
+ assert_eq!(eval.step, "1m");
+ assert_eq!(eval.lookback, None);
+ assert_eq!(eval.query, "http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m");
+ }
+ _ => unreachable!(),
+ }
+ }
+
+ #[test]
+ fn test_parse_tql_eval() {
+ let sql = "TQL EVAL (1676887657, 1676887659, '1m') http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m";
+ match parse_into_statement(sql) {
Statement::Tql(Tql::Eval(eval)) => {
assert_eq!(eval.start, "1676887657");
assert_eq!(eval.end, "1676887659");
assert_eq!(eval.step, "1m");
+ assert_eq!(eval.lookback, None);
assert_eq!(eval.query, "http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m");
}
_ => unreachable!(),
}
let sql = "TQL EVAL (1676887657.1, 1676887659.5, 30.3) http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m";
+ let statement = parse_into_statement(sql);
- let mut result =
- ParserContext::create_with_dialect(sql, &GreptimeDbDialect {}, ParseOptions::default())
- .unwrap();
- assert_eq!(1, result.len());
-
- let statement = result.remove(0);
- match statement.clone() {
+ match &statement {
Statement::Tql(Tql::Eval(eval)) => {
assert_eq!(eval.start, "1676887657.1");
assert_eq!(eval.end, "1676887659.5");
assert_eq!(eval.step, "30.3");
+ assert_eq!(eval.lookback, None);
assert_eq!(eval.query, "http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m");
}
_ => unreachable!(),
}
- let sql = "TQL EVALUATE (1676887657.1, 1676887659.5, 30.3) http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m";
-
- let mut result =
- ParserContext::create_with_dialect(sql, &GreptimeDbDialect {}, ParseOptions::default())
- .unwrap();
- assert_eq!(1, result.len());
-
- let statement2 = result.remove(0);
+ let sql2 = "TQL EVALUATE (1676887657.1, 1676887659.5, 30.3) http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m";
+ let statement2 = parse_into_statement(sql2);
assert_eq!(statement, statement2);
let sql = "tql eval ('2015-07-01T20:10:30.781Z', '2015-07-01T20:11:00.781Z', '30s') http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m";
-
- let mut result =
- ParserContext::create_with_dialect(sql, &GreptimeDbDialect {}, ParseOptions::default())
- .unwrap();
- assert_eq!(1, result.len());
-
- let statement = result.remove(0);
- match statement {
+ match parse_into_statement(sql) {
Statement::Tql(Tql::Eval(eval)) => {
assert_eq!(eval.start, "2015-07-01T20:10:30.781Z");
assert_eq!(eval.end, "2015-07-01T20:11:00.781Z");
assert_eq!(eval.step, "30s");
+ assert_eq!(eval.lookback, None);
assert_eq!(eval.query, "http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m");
}
_ => unreachable!(),
@@ -259,78 +381,172 @@ mod tests {
}
#[test]
- fn test_parse_tql_explain() {
- let sql = "TQL EXPLAIN http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m";
+ fn test_parse_tql_with_lookback_values() {
+ let sql = "TQL EVAL (1676887657, 1676887659, '1m', '5m') http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m";
+ match parse_into_statement(sql) {
+ Statement::Tql(Tql::Eval(eval)) => {
+ assert_eq!(eval.start, "1676887657");
+ assert_eq!(eval.end, "1676887659");
+ assert_eq!(eval.step, "1m".to_string());
+ assert_eq!(eval.lookback, Some("5m".to_string()));
+ assert_eq!(eval.query, "http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m");
+ }
+ _ => unreachable!(),
+ }
- let mut result =
- ParserContext::create_with_dialect(sql, &GreptimeDbDialect {}, ParseOptions::default())
- .unwrap();
- assert_eq!(1, result.len());
+ let sql = "TQL EVAL ('1970-01-01T00:05:00'::timestamp, '1970-01-01T00:10:00'::timestamp + '10 minutes'::interval, '1m', '7m') http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m";
+ match parse_into_statement(sql) {
+ Statement::Tql(Tql::Eval(eval)) => {
+ assert_eq!(eval.start, "300");
+ assert_eq!(eval.end, "1200");
+ assert_eq!(eval.step, "1m");
+ assert_eq!(eval.lookback, Some("7m".to_string()));
+ assert_eq!(eval.query, "http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m");
+ }
+ _ => unreachable!(),
+ }
- let statement = result.remove(0);
- match statement {
+ let sql = "TQL EXPLAIN (20, 100, 10, '3m') http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m";
+ match parse_into_statement(sql) {
+ Statement::Tql(Tql::Explain(explain)) => {
+ assert_eq!(explain.query, "http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m");
+ assert_eq!(explain.start, "20");
+ assert_eq!(explain.end, "100");
+ assert_eq!(explain.step, "10");
+ assert_eq!(explain.lookback, Some("3m".to_string()));
+ assert!(!explain.is_verbose);
+ }
+ _ => unreachable!(),
+ }
+
+ let sql = "TQL EXPLAIN VERBOSE (20, 100, 10, '3m') http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m";
+ match parse_into_statement(sql) {
+ Statement::Tql(Tql::Explain(explain)) => {
+ assert_eq!(explain.query, "http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m");
+ assert_eq!(explain.start, "20");
+ assert_eq!(explain.end, "100");
+ assert_eq!(explain.step, "10");
+ assert_eq!(explain.lookback, Some("3m".to_string()));
+ assert!(explain.is_verbose);
+ }
+ _ => unreachable!(),
+ }
+
+ let sql = "TQL ANALYZE (1676887657, 1676887659, '1m', '9m') http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m";
+ match parse_into_statement(sql) {
+ Statement::Tql(Tql::Analyze(analyze)) => {
+ assert_eq!(analyze.start, "1676887657");
+ assert_eq!(analyze.end, "1676887659");
+ assert_eq!(analyze.step, "1m");
+ assert_eq!(analyze.lookback, Some("9m".to_string()));
+ assert_eq!(analyze.query, "http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m");
+ assert!(!analyze.is_verbose);
+ }
+ _ => unreachable!(),
+ }
+
+ let sql = "TQL ANALYZE VERBOSE (1676887657, 1676887659, '1m', '9m') http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m";
+ match parse_into_statement(sql) {
+ Statement::Tql(Tql::Analyze(analyze)) => {
+ assert_eq!(analyze.start, "1676887657");
+ assert_eq!(analyze.end, "1676887659");
+ assert_eq!(analyze.step, "1m");
+ assert_eq!(analyze.lookback, Some("9m".to_string()));
+ assert_eq!(analyze.query, "http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m");
+ assert!(analyze.is_verbose);
+ }
+ _ => unreachable!(),
+ }
+ }
+
+ #[test]
+ fn test_parse_tql_explain() {
+ let sql = "TQL EXPLAIN http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m";
+ match parse_into_statement(sql) {
Statement::Tql(Tql::Explain(explain)) => {
assert_eq!(explain.query, "http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m");
assert_eq!(explain.start, "0");
assert_eq!(explain.end, "0");
assert_eq!(explain.step, "5m");
+ assert_eq!(explain.lookback, None);
assert!(!explain.is_verbose);
}
_ => unreachable!(),
}
let sql = "TQL EXPLAIN VERBOSE http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m";
-
- let mut result =
- ParserContext::create_with_dialect(sql, &GreptimeDbDialect {}, ParseOptions::default())
- .unwrap();
- assert_eq!(1, result.len());
-
- let statement = result.remove(0);
- match statement {
+ match parse_into_statement(sql) {
Statement::Tql(Tql::Explain(explain)) => {
assert_eq!(explain.query, "http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m");
assert_eq!(explain.start, "0");
assert_eq!(explain.end, "0");
assert_eq!(explain.step, "5m");
+ assert_eq!(explain.lookback, None);
assert!(explain.is_verbose);
}
_ => unreachable!(),
}
let sql = "TQL EXPLAIN (20,100,10) http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m";
-
- let mut result =
- ParserContext::create_with_dialect(sql, &GreptimeDbDialect {}, ParseOptions::default())
- .unwrap();
- assert_eq!(1, result.len());
-
- let statement = result.remove(0);
- match statement {
+ match parse_into_statement(sql) {
Statement::Tql(Tql::Explain(explain)) => {
assert_eq!(explain.query, "http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m");
assert_eq!(explain.start, "20");
assert_eq!(explain.end, "100");
assert_eq!(explain.step, "10");
+ assert_eq!(explain.lookback, None);
assert!(!explain.is_verbose);
}
_ => unreachable!(),
}
- let sql = "TQL EXPLAIN VERBOSE (20,100,10) http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m";
+ let sql = "TQL EXPLAIN ('1970-01-01T00:05:00'::timestamp, '1970-01-01T00:10:00'::timestamp + '10 minutes'::interval, 10) http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m";
+ match parse_into_statement(sql) {
+ Statement::Tql(Tql::Explain(explain)) => {
+ assert_eq!(explain.query, "http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m");
+ assert_eq!(explain.start, "300");
+ assert_eq!(explain.end, "1200");
+ assert_eq!(explain.step, "10");
+ assert_eq!(explain.lookback, None);
+ assert!(!explain.is_verbose);
+ }
+ _ => unreachable!(),
+ }
- let mut result =
- ParserContext::create_with_dialect(sql, &GreptimeDbDialect {}, ParseOptions::default())
- .unwrap();
- assert_eq!(1, result.len());
+ let sql = "TQL EXPLAIN VERBOSE (20,100,10) http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m";
+ match parse_into_statement(sql) {
+ Statement::Tql(Tql::Explain(explain)) => {
+ assert_eq!(explain.query, "http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m");
+ assert_eq!(explain.start, "20");
+ assert_eq!(explain.end, "100");
+ assert_eq!(explain.step, "10");
+ assert_eq!(explain.lookback, None);
+ assert!(explain.is_verbose);
+ }
+ _ => unreachable!(),
+ }
- let statement = result.remove(0);
- match statement {
+ let sql = "TQL EXPLAIN verbose (20,100,10) http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m";
+ match parse_into_statement(sql) {
Statement::Tql(Tql::Explain(explain)) => {
assert_eq!(explain.query, "http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m");
assert_eq!(explain.start, "20");
assert_eq!(explain.end, "100");
assert_eq!(explain.step, "10");
+ assert_eq!(explain.lookback, None);
+ assert!(explain.is_verbose);
+ }
+ _ => unreachable!(),
+ }
+
+ let sql = "TQL EXPLAIN VERBOSE ('1970-01-01T00:05:00'::timestamp, '1970-01-01T00:10:00'::timestamp + '10 minutes'::interval, 10) http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m";
+ match parse_into_statement(sql) {
+ Statement::Tql(Tql::Explain(explain)) => {
+ assert_eq!(explain.query, "http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m");
+ assert_eq!(explain.start, "300");
+ assert_eq!(explain.end, "1200");
+ assert_eq!(explain.step, "10");
+ assert_eq!(explain.lookback, None);
assert!(explain.is_verbose);
}
_ => unreachable!(),
@@ -340,16 +556,25 @@ mod tests {
#[test]
fn test_parse_tql_analyze() {
let sql = "TQL ANALYZE (1676887657.1, 1676887659.5, 30.3) http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m";
- let mut result =
- ParserContext::create_with_dialect(sql, &GreptimeDbDialect {}, ParseOptions::default())
- .unwrap();
- assert_eq!(1, result.len());
- let statement = result.remove(0);
- match statement {
+ match parse_into_statement(sql) {
Statement::Tql(Tql::Analyze(analyze)) => {
assert_eq!(analyze.start, "1676887657.1");
assert_eq!(analyze.end, "1676887659.5");
assert_eq!(analyze.step, "30.3");
+ assert_eq!(analyze.lookback, None);
+ assert_eq!(analyze.query, "http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m");
+ assert!(!analyze.is_verbose);
+ }
+ _ => unreachable!(),
+ }
+
+ let sql = "TQL ANALYZE ('1970-01-01T00:05:00'::timestamp, '1970-01-01T00:10:00'::timestamp + '10 minutes'::interval, 10) http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m";
+ match parse_into_statement(sql) {
+ Statement::Tql(Tql::Analyze(analyze)) => {
+ assert_eq!(analyze.start, "300");
+ assert_eq!(analyze.end, "1200");
+ assert_eq!(analyze.step, "10");
+ assert_eq!(analyze.lookback, None);
assert_eq!(analyze.query, "http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m");
assert!(!analyze.is_verbose);
}
@@ -357,16 +582,38 @@ mod tests {
}
let sql = "TQL ANALYZE VERBOSE (1676887657.1, 1676887659.5, 30.3) http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m";
- let mut result =
- ParserContext::create_with_dialect(sql, &GreptimeDbDialect {}, ParseOptions::default())
- .unwrap();
- assert_eq!(1, result.len());
- let statement = result.remove(0);
- match statement {
+ match parse_into_statement(sql) {
Statement::Tql(Tql::Analyze(analyze)) => {
assert_eq!(analyze.start, "1676887657.1");
assert_eq!(analyze.end, "1676887659.5");
assert_eq!(analyze.step, "30.3");
+ assert_eq!(analyze.lookback, None);
+ assert_eq!(analyze.query, "http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m");
+ assert!(analyze.is_verbose);
+ }
+ _ => unreachable!(),
+ }
+
+ let sql = "TQL ANALYZE verbose (1676887657.1, 1676887659.5, 30.3) http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m";
+ match parse_into_statement(sql) {
+ Statement::Tql(Tql::Analyze(analyze)) => {
+ assert_eq!(analyze.start, "1676887657.1");
+ assert_eq!(analyze.end, "1676887659.5");
+ assert_eq!(analyze.step, "30.3");
+ assert_eq!(analyze.lookback, None);
+ assert_eq!(analyze.query, "http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m");
+ assert!(analyze.is_verbose);
+ }
+ _ => unreachable!(),
+ }
+
+ let sql = "TQL ANALYZE VERBOSE ('1970-01-01T00:05:00'::timestamp, '1970-01-01T00:10:00'::timestamp + '10 minutes'::interval, 10) http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m";
+ match parse_into_statement(sql) {
+ Statement::Tql(Tql::Analyze(analyze)) => {
+ assert_eq!(analyze.start, "300");
+ assert_eq!(analyze.end, "1200");
+ assert_eq!(analyze.step, "10");
+ assert_eq!(analyze.lookback, None);
assert_eq!(analyze.query, "http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m");
assert!(analyze.is_verbose);
}
@@ -374,20 +621,95 @@ mod tests {
}
}
+ #[test]
+ fn test_parse_tql_with_various_queries() {
+ // query has whitespaces and comma
+ match parse_into_statement("TQL EVAL (0, 30, '10s') , data + (1 < bool 2);")
+ {
+ Statement::Tql(Tql::Eval(eval)) => {
+ assert_eq!(eval.start, "0");
+ assert_eq!(eval.end, "30");
+ assert_eq!(eval.step, "10s");
+ assert_eq!(eval.lookback, None);
+ assert_eq!(eval.query, "data + (1 < bool 2)");
+ }
+ _ => unreachable!(),
+ }
+ // query starts with a quote
+ match parse_into_statement("TQL EVAL (0, 10, '5s') '1+1';") {
+ Statement::Tql(Tql::Eval(eval)) => {
+ assert_eq!(eval.start, "0");
+ assert_eq!(eval.end, "10");
+ assert_eq!(eval.step, "5s");
+ assert_eq!(eval.lookback, None);
+ assert_eq!(eval.query, "'1+1'");
+ }
+ _ => unreachable!(),
+ }
+
+ // query starts with number
+ match parse_into_statement("TQL EVAL (300, 300, '1s') 10 atan2 20;") {
+ Statement::Tql(Tql::Eval(eval)) => {
+ assert_eq!(eval.start, "300");
+ assert_eq!(eval.end, "300");
+ assert_eq!(eval.step, "1s");
+ assert_eq!(eval.lookback, None);
+ assert_eq!(eval.query, "10 atan2 20");
+ }
+ _ => unreachable!(),
+ }
+
+ // query starts with a bracket
+ let sql = "TQL EVAL (0, 30, '10s') (sum by(host) (irate(host_cpu_seconds_total{mode!='idle'}[1m0s])) / sum by (host)((irate(host_cpu_seconds_total[1m0s])))) * 100;";
+ match parse_into_statement(sql) {
+ Statement::Tql(Tql::Eval(eval)) => {
+ assert_eq!(eval.start, "0");
+ assert_eq!(eval.end, "30");
+ assert_eq!(eval.step, "10s");
+ assert_eq!(eval.lookback, None);
+ assert_eq!(eval.query, "(sum by(host) (irate(host_cpu_seconds_total{mode!='idle'}[1m0s])) / sum by (host)((irate(host_cpu_seconds_total[1m0s])))) * 100");
+ }
+ _ => unreachable!(),
+ }
+
+ // query starts with a curly bracket
+ match parse_into_statement("TQL EVAL (0, 10, '5s') {__name__=\"test\"}") {
+ Statement::Tql(Tql::Eval(eval)) => {
+ assert_eq!(eval.start, "0");
+ assert_eq!(eval.end, "10");
+ assert_eq!(eval.step, "5s");
+ assert_eq!(eval.lookback, None);
+ assert_eq!(eval.query, "{__name__=\"test\"}");
+ }
+ _ => unreachable!(),
+ }
+ }
+
#[test]
fn test_parse_tql_error() {
- // Invalid duration
+ let dialect = &GreptimeDbDialect {};
+ let parse_options = ParseOptions::default();
+
+ // invalid duration
let sql = "TQL EVAL (1676887657, 1676887659, 1m) http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m";
let result =
- ParserContext::create_with_dialect(sql, &GreptimeDbDialect {}, ParseOptions::default())
- .unwrap_err();
- assert!(result.output_msg().contains("Expected ), found: m"));
+ ParserContext::create_with_dialect(sql, dialect, parse_options.clone()).unwrap_err();
+ assert!(result
+ .output_msg()
+ .contains("Failed to extract a timestamp value"));
// missing end
let sql = "TQL EVAL (1676887657, '1m') http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m";
let result =
- ParserContext::create_with_dialect(sql, &GreptimeDbDialect {}, ParseOptions::default())
- .unwrap_err();
- assert!(result.output_msg().contains("Expected ,, found: )"));
+ ParserContext::create_with_dialect(sql, dialect, parse_options.clone()).unwrap_err();
+ assert!(result
+ .output_msg()
+ .contains("Failed to extract a timestamp value"));
+
+ // empty TQL query
+ let sql = "TQL EVAL (0, 30, '10s')";
+ let result =
+ ParserContext::create_with_dialect(sql, dialect, parse_options.clone()).unwrap_err();
+ assert!(result.output_msg().contains("empty TQL query"));
}
}
diff --git a/src/sql/src/statements/tql.rs b/src/sql/src/statements/tql.rs
index 46ca696f82ab..6bc4136068ea 100644
--- a/src/sql/src/statements/tql.rs
+++ b/src/sql/src/statements/tql.rs
@@ -21,30 +21,106 @@ pub enum Tql {
Analyze(TqlAnalyze),
}
+/// TQL EVAL (<start>, <end>, <step>, [lookback]) <promql>
#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut)]
pub struct TqlEval {
pub start: String,
pub end: String,
pub step: String,
+ pub lookback: Option<String>,
pub query: String,
}
-/// TQL EXPLAIN [VERBOSE] (like SQL EXPLAIN): doesn't execute the query but tells how the query would be executed.
+/// TQL EXPLAIN [VERBOSE] [<start>, <end>, <step>, [lookback]] <promql>
+/// doesn't execute the query but tells how the query would be executed (similar to SQL EXPLAIN).
#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut)]
pub struct TqlExplain {
pub start: String,
pub end: String,
pub step: String,
+ pub lookback: Option<String>,
pub query: String,
pub is_verbose: bool,
}
-/// TQL ANALYZE [VERBOSE] (like SQL ANALYZE): executes the plan and tells the detailed per-step execution time.
+/// TQL ANALYZE [VERBOSE] (<start>, <end>, <step>, [lookback]) <promql>
+/// executes the plan and tells the detailed per-step execution time (similar to SQL ANALYZE).
#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut)]
pub struct TqlAnalyze {
pub start: String,
pub end: String,
pub step: String,
+ pub lookback: Option<String>,
pub query: String,
pub is_verbose: bool,
}
+
+/// Intermediate structure used to unify parameter mappings for various TQL operations.
+/// This struct serves as a common parameter container for parsing TQL queries
+/// and constructing corresponding TQL operations: `TqlEval`, `TqlAnalyze` or `TqlExplain`.
+#[derive(Debug)]
+pub struct TqlParameters {
+ start: String,
+ end: String,
+ step: String,
+ lookback: Option<String>,
+ query: String,
+ pub is_verbose: bool,
+}
+
+impl TqlParameters {
+ pub fn new(
+ start: String,
+ end: String,
+ step: String,
+ lookback: Option<String>,
+ query: String,
+ ) -> Self {
+ TqlParameters {
+ start,
+ end,
+ step,
+ lookback,
+ query,
+ is_verbose: false,
+ }
+ }
+}
+
+impl From<TqlParameters> for TqlEval {
+ fn from(params: TqlParameters) -> Self {
+ TqlEval {
+ start: params.start,
+ end: params.end,
+ step: params.step,
+ lookback: params.lookback,
+ query: params.query,
+ }
+ }
+}
+
+impl From<TqlParameters> for TqlExplain {
+ fn from(params: TqlParameters) -> Self {
+ TqlExplain {
+ start: params.start,
+ end: params.end,
+ step: params.step,
+ query: params.query,
+ lookback: params.lookback,
+ is_verbose: params.is_verbose,
+ }
+ }
+}
+
+impl From<TqlParameters> for TqlAnalyze {
+ fn from(params: TqlParameters) -> Self {
+ TqlAnalyze {
+ start: params.start,
+ end: params.end,
+ step: params.step,
+ query: params.query,
+ lookback: params.lookback,
+ is_verbose: params.is_verbose,
+ }
+ }
+}
diff --git a/tests/cases/standalone/common/tql-explain-analyze/analyze.result b/tests/cases/standalone/common/tql-explain-analyze/analyze.result
index 9ac49f77f541..96b2548503df 100644
--- a/tests/cases/standalone/common/tql-explain-analyze/analyze.result
+++ b/tests/cases/standalone/common/tql-explain-analyze/analyze.result
@@ -27,6 +27,47 @@ TQL ANALYZE (0, 10, '5s') test;
|_|_|
+-+-+
+-- 'lookback' parameter is not fully supported, the test has to be updated
+-- analyze at 0s, 5s and 10s. No point at 0s.
+-- SQLNESS REPLACE (metrics.*) REDACTED
+-- SQLNESS REPLACE (RoundRobinBatch.*) REDACTED
+-- SQLNESS REPLACE (-+) -
+-- SQLNESS REPLACE (\s\s+) _
+-- SQLNESS REPLACE (peers.*) REDACTED
+TQL ANALYZE (0, 10, '1s', '2s') test;
+
++-+-+
+| plan_type_| plan_|
++-+-+
+| Plan with Metrics | PromInstantManipulateExec: range=[0..10000], lookback=[300000], interval=[1000], time index=[j], REDACTED
+|_|_RepartitionExec: partitioning=REDACTED
+|_|_PromSeriesNormalizeExec: offset=[0], time index=[j], filter NaN: [false], REDACTED
+|_|_PromSeriesDivideExec: tags=["k"], REDACTED
+|_|_SortExec: expr=[k@2 ASC NULLS LAST], REDACTED
+|_|_MergeScanExec: REDACTED
+|_|_|
++-+-+
+
+-- analyze at 0s, 5s and 10s. No point at 0s.
+-- SQLNESS REPLACE (metrics.*) REDACTED
+-- SQLNESS REPLACE (RoundRobinBatch.*) REDACTED
+-- SQLNESS REPLACE (-+) -
+-- SQLNESS REPLACE (\s\s+) _
+-- SQLNESS REPLACE (peers.*) REDACTED
+TQL ANALYZE ('1970-01-01T00:00:00'::timestamp, '1970-01-01T00:00:00'::timestamp + '10 seconds'::interval, '5s') test;
+
++-+-+
+| plan_type_| plan_|
++-+-+
+| Plan with Metrics | PromInstantManipulateExec: range=[0..10000], lookback=[300000], interval=[5000], time index=[j], REDACTED
+|_|_RepartitionExec: partitioning=REDACTED
+|_|_PromSeriesNormalizeExec: offset=[0], time index=[j], filter NaN: [false], REDACTED
+|_|_PromSeriesDivideExec: tags=["k"], REDACTED
+|_|_SortExec: expr=[k@2 ASC NULLS LAST], REDACTED
+|_|_MergeScanExec: REDACTED
+|_|_|
++-+-+
+
-- analyze verbose at 0s, 5s and 10s. No point at 0s.
-- SQLNESS REPLACE (-+) -
-- SQLNESS REPLACE (\s\s+) _
diff --git a/tests/cases/standalone/common/tql-explain-analyze/analyze.sql b/tests/cases/standalone/common/tql-explain-analyze/analyze.sql
index 6fb8f3c0e555..e888ba8d51ad 100644
--- a/tests/cases/standalone/common/tql-explain-analyze/analyze.sql
+++ b/tests/cases/standalone/common/tql-explain-analyze/analyze.sql
@@ -11,6 +11,23 @@ INSERT INTO test VALUES (1, 1, "a"), (1, 1, "b"), (2, 2, "a");
-- SQLNESS REPLACE (peers.*) REDACTED
TQL ANALYZE (0, 10, '5s') test;
+-- 'lookback' parameter is not fully supported, the test has to be updated
+-- analyze at 0s, 5s and 10s. No point at 0s.
+-- SQLNESS REPLACE (metrics.*) REDACTED
+-- SQLNESS REPLACE (RoundRobinBatch.*) REDACTED
+-- SQLNESS REPLACE (-+) -
+-- SQLNESS REPLACE (\s\s+) _
+-- SQLNESS REPLACE (peers.*) REDACTED
+TQL ANALYZE (0, 10, '1s', '2s') test;
+
+-- analyze at 0s, 5s and 10s. No point at 0s.
+-- SQLNESS REPLACE (metrics.*) REDACTED
+-- SQLNESS REPLACE (RoundRobinBatch.*) REDACTED
+-- SQLNESS REPLACE (-+) -
+-- SQLNESS REPLACE (\s\s+) _
+-- SQLNESS REPLACE (peers.*) REDACTED
+TQL ANALYZE ('1970-01-01T00:00:00'::timestamp, '1970-01-01T00:00:00'::timestamp + '10 seconds'::interval, '5s') test;
+
-- analyze verbose at 0s, 5s and 10s. No point at 0s.
-- SQLNESS REPLACE (-+) -
-- SQLNESS REPLACE (\s\s+) _
diff --git a/tests/cases/standalone/common/tql-explain-analyze/explain.result b/tests/cases/standalone/common/tql-explain-analyze/explain.result
index 3e1877654f2b..e50d1892f351 100644
--- a/tests/cases/standalone/common/tql-explain-analyze/explain.result
+++ b/tests/cases/standalone/common/tql-explain-analyze/explain.result
@@ -28,6 +28,49 @@ TQL EXPLAIN (0, 10, '5s') test;
| | |
+---------------+-----------------------------------------------------------------------------------------------+
+-- 'lookback' parameter is not fully supported, the test has to be updated
+-- explain at 0s, 5s and 10s. No point at 0s.
+-- SQLNESS REPLACE (RoundRobinBatch.*) REDACTED
+-- SQLNESS REPLACE (peers.*) REDACTED
+TQL EXPLAIN (0, 10, '1s', '2s') test;
+
++---------------+-----------------------------------------------------------------------------------------------+
+| plan_type | plan |
++---------------+-----------------------------------------------------------------------------------------------+
+| logical_plan | PromInstantManipulate: range=[0..0], lookback=[300000], interval=[300000], time index=[j] |
+| | PromSeriesNormalize: offset=[0], time index=[j], filter NaN: [false] |
+| | PromSeriesDivide: tags=["k"] |
+| | MergeScan [is_placeholder=false] |
+| physical_plan | PromInstantManipulateExec: range=[0..0], lookback=[300000], interval=[300000], time index=[j] |
+| | RepartitionExec: partitioning=REDACTED
+| | PromSeriesNormalizeExec: offset=[0], time index=[j], filter NaN: [false] |
+| | PromSeriesDivideExec: tags=["k"] |
+| | SortExec: expr=[k@2 ASC NULLS LAST] |
+| | MergeScanExec: REDACTED
+| | |
++---------------+-----------------------------------------------------------------------------------------------+
+
+-- explain at 0s, 5s and 10s. No point at 0s.
+-- SQLNESS REPLACE (RoundRobinBatch.*) REDACTED
+-- SQLNESS REPLACE (peers.*) REDACTED
+TQL EXPLAIN ('1970-01-01T00:00:00'::timestamp, '1970-01-01T00:00:00'::timestamp + '10 seconds'::interval, '5s') test;
+
++---------------+-----------------------------------------------------------------------------------------------+
+| plan_type | plan |
++---------------+-----------------------------------------------------------------------------------------------+
+| logical_plan | PromInstantManipulate: range=[0..0], lookback=[300000], interval=[300000], time index=[j] |
+| | PromSeriesNormalize: offset=[0], time index=[j], filter NaN: [false] |
+| | PromSeriesDivide: tags=["k"] |
+| | MergeScan [is_placeholder=false] |
+| physical_plan | PromInstantManipulateExec: range=[0..0], lookback=[300000], interval=[300000], time index=[j] |
+| | RepartitionExec: partitioning=REDACTED
+| | PromSeriesNormalizeExec: offset=[0], time index=[j], filter NaN: [false] |
+| | PromSeriesDivideExec: tags=["k"] |
+| | SortExec: expr=[k@2 ASC NULLS LAST] |
+| | MergeScanExec: REDACTED
+| | |
++---------------+-----------------------------------------------------------------------------------------------+
+
-- explain verbose at 0s, 5s and 10s. No point at 0s.
-- SQLNESS REPLACE (-+) -
-- SQLNESS REPLACE (\s\s+) _
diff --git a/tests/cases/standalone/common/tql-explain-analyze/explain.sql b/tests/cases/standalone/common/tql-explain-analyze/explain.sql
index 3b2c961933d3..cf5618496d65 100644
--- a/tests/cases/standalone/common/tql-explain-analyze/explain.sql
+++ b/tests/cases/standalone/common/tql-explain-analyze/explain.sql
@@ -8,6 +8,17 @@ INSERT INTO test VALUES (1, 1, "a"), (1, 1, "b"), (2, 2, "a");
-- SQLNESS REPLACE (peers.*) REDACTED
TQL EXPLAIN (0, 10, '5s') test;
+-- 'lookback' parameter is not fully supported, the test has to be updated
+-- explain at 0s, 5s and 10s. No point at 0s.
+-- SQLNESS REPLACE (RoundRobinBatch.*) REDACTED
+-- SQLNESS REPLACE (peers.*) REDACTED
+TQL EXPLAIN (0, 10, '1s', '2s') test;
+
+-- explain at 0s, 5s and 10s. No point at 0s.
+-- SQLNESS REPLACE (RoundRobinBatch.*) REDACTED
+-- SQLNESS REPLACE (peers.*) REDACTED
+TQL EXPLAIN ('1970-01-01T00:00:00'::timestamp, '1970-01-01T00:00:00'::timestamp + '10 seconds'::interval, '5s') test;
+
-- explain verbose at 0s, 5s and 10s. No point at 0s.
-- SQLNESS REPLACE (-+) -
-- SQLNESS REPLACE (\s\s+) _
diff --git a/tests/cases/standalone/common/tql/basic.result b/tests/cases/standalone/common/tql/basic.result
index 8d0229dd3164..f679dcb341d1 100644
--- a/tests/cases/standalone/common/tql/basic.result
+++ b/tests/cases/standalone/common/tql/basic.result
@@ -59,6 +59,58 @@ TQL EVAL (0, 10, '5s') test{k="a"};
| 2.0 | 1970-01-01T00:00:10 | a |
+-----+---------------------+---+
+-- 'lookback' parameter is not fully supported, the test has to be updated
+TQL EVAL (0, 10, '1s', '2s') test{k="a"};
+
++-----+---------------------+---+
+| i | j | k |
++-----+---------------------+---+
+| 2.0 | 1970-01-01T00:00:01 | a |
+| 2.0 | 1970-01-01T00:00:02 | a |
+| 2.0 | 1970-01-01T00:00:03 | a |
+| 2.0 | 1970-01-01T00:00:04 | a |
+| 2.0 | 1970-01-01T00:00:05 | a |
+| 2.0 | 1970-01-01T00:00:06 | a |
+| 2.0 | 1970-01-01T00:00:07 | a |
+| 2.0 | 1970-01-01T00:00:08 | a |
+| 2.0 | 1970-01-01T00:00:09 | a |
+| 2.0 | 1970-01-01T00:00:10 | a |
++-----+---------------------+---+
+
+TQL EVAL ('1970-01-01T00:00:00'::timestamp, '1970-01-01T00:00:00'::timestamp + '10 seconds'::interval, '1s') test{k="a"};
+
++-----+---------------------+---+
+| i | j | k |
++-----+---------------------+---+
+| 2.0 | 1970-01-01T00:00:01 | a |
+| 2.0 | 1970-01-01T00:00:02 | a |
+| 2.0 | 1970-01-01T00:00:03 | a |
+| 2.0 | 1970-01-01T00:00:04 | a |
+| 2.0 | 1970-01-01T00:00:05 | a |
+| 2.0 | 1970-01-01T00:00:06 | a |
+| 2.0 | 1970-01-01T00:00:07 | a |
+| 2.0 | 1970-01-01T00:00:08 | a |
+| 2.0 | 1970-01-01T00:00:09 | a |
+| 2.0 | 1970-01-01T00:00:10 | a |
++-----+---------------------+---+
+
+TQL EVAL (now() - now(), now() - (now() - '10 seconds'::interval), '1s') test{k="a"};
+
++-----+---------------------+---+
+| i | j | k |
++-----+---------------------+---+
+| 2.0 | 1970-01-01T00:00:01 | a |
+| 2.0 | 1970-01-01T00:00:02 | a |
+| 2.0 | 1970-01-01T00:00:03 | a |
+| 2.0 | 1970-01-01T00:00:04 | a |
+| 2.0 | 1970-01-01T00:00:05 | a |
+| 2.0 | 1970-01-01T00:00:06 | a |
+| 2.0 | 1970-01-01T00:00:07 | a |
+| 2.0 | 1970-01-01T00:00:08 | a |
+| 2.0 | 1970-01-01T00:00:09 | a |
+| 2.0 | 1970-01-01T00:00:10 | a |
++-----+---------------------+---+
+
DROP TABLE test;
Affected Rows: 0
diff --git a/tests/cases/standalone/common/tql/basic.sql b/tests/cases/standalone/common/tql/basic.sql
index bd21518665f7..9d9f3c8863b4 100644
--- a/tests/cases/standalone/common/tql/basic.sql
+++ b/tests/cases/standalone/common/tql/basic.sql
@@ -19,4 +19,11 @@ TQL EVAL (0, 10, '5s') {__name__!="test"};
-- the point at 1ms will be shadowed by the point at 2ms
TQL EVAL (0, 10, '5s') test{k="a"};
+-- 'lookback' parameter is not fully supported, the test has to be updated
+TQL EVAL (0, 10, '1s', '2s') test{k="a"};
+
+TQL EVAL ('1970-01-01T00:00:00'::timestamp, '1970-01-01T00:00:00'::timestamp + '10 seconds'::interval, '1s') test{k="a"};
+
+TQL EVAL (now() - now(), now() - (now() - '10 seconds'::interval), '1s') test{k="a"};
+
DROP TABLE test;
|
feat
|
add initial support for start,stop,step as sql functions (#3507)
|
50d2685365093aa47efb5137129ce10aeca7887e
|
2023-02-27 20:21:49
|
shuiyisong
|
fix: fix catalog parsing issue (#1091)
| false
|
diff --git a/src/catalog/src/remote/manager.rs b/src/catalog/src/remote/manager.rs
index 200341ec473a..47b065262790 100644
--- a/src/catalog/src/remote/manager.rs
+++ b/src/catalog/src/remote/manager.rs
@@ -21,7 +21,7 @@ use arc_swap::ArcSwap;
use async_stream::stream;
use async_trait::async_trait;
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, MIN_USER_TABLE_ID};
-use common_telemetry::{debug, info};
+use common_telemetry::{debug, error, info};
use futures::Stream;
use futures_util::StreamExt;
use snafu::{OptionExt, ResultExt};
@@ -109,9 +109,14 @@ impl RemoteCatalogManager {
debug!("Ignoring non-catalog key: {}", String::from_utf8_lossy(&k));
continue;
}
- let key = CatalogKey::parse(&String::from_utf8_lossy(&k))
- .context(InvalidCatalogValueSnafu)?;
- yield Ok(key)
+
+ let catalog_key = String::from_utf8_lossy(&k);
+ if let Ok(key) = CatalogKey::parse(&catalog_key) {
+ yield Ok(key)
+ } else {
+ error!("Invalid catalog key: {:?}", catalog_key);
+ continue;
+ }
}
}))
}
|
fix
|
fix catalog parsing issue (#1091)
|
ab9b1a91d42971e5e6db90216daa4e81a252587b
|
2022-11-14 18:48:14
|
Ruihang Xia
|
chore: turn-off codecov's patch comment (#498)
| false
|
diff --git a/codecov.yml b/codecov.yml
index 464654c8f54f..cdd5d34113e0 100644
--- a/codecov.yml
+++ b/codecov.yml
@@ -4,5 +4,6 @@ coverage:
project:
default:
threshold: 1%
+ patch: off
ignore:
- "**/error*.rs" # ignore all error.rs files
|
chore
|
turn-off codecov's patch comment (#498)
|
2665616f72bde392b4ab7f9ad66ae777d5f4b4c9
|
2024-07-01 22:35:15
|
tison
|
build(deps): Upgrade OpenDAL to 0.47 (#4224)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index 717106c55f2a..d123ebfe8a8c 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -7051,9 +7051,9 @@ checksum = "0ab1bc2a289d34bd04a330323ac98a1b4bc82c9d9fcb1e66b63caa84da26b575"
[[package]]
name = "opendal"
-version = "0.46.0"
+version = "0.47.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "328c4992328e8965e6a6ef102d38438b5fdc7d9b9107eda2377ba05379d9d544"
+checksum = "876c6655dd5b410c83e0c9edf38be60fed540a1cc1c2f3a2ac31830eb8a8ff45"
dependencies = [
"anyhow",
"async-trait",
diff --git a/src/common/datasource/src/file_format.rs b/src/common/datasource/src/file_format.rs
index c555f763b59b..999830354226 100644
--- a/src/common/datasource/src/file_format.rs
+++ b/src/common/datasource/src/file_format.rs
@@ -149,7 +149,9 @@ pub fn open_with_decoder<T: ArrowDecoder, F: Fn() -> DataFusionResult<T>>(
.reader(&path)
.await
.map_err(|e| DataFusionError::External(Box::new(e)))?
- .into_bytes_stream(..);
+ .into_bytes_stream(..)
+ .await
+ .map_err(|e| DataFusionError::External(Box::new(e)))?;
let mut upstream = compression_type.convert_stream(reader).fuse();
diff --git a/src/common/datasource/src/file_format/csv.rs b/src/common/datasource/src/file_format/csv.rs
index ade4e5409e42..1172004a9e19 100644
--- a/src/common/datasource/src/file_format/csv.rs
+++ b/src/common/datasource/src/file_format/csv.rs
@@ -169,11 +169,14 @@ impl FileFormat for CsvFormat {
.stat(path)
.await
.context(error::ReadObjectSnafu { path })?;
+
let reader = store
.reader(path)
.await
.context(error::ReadObjectSnafu { path })?
.into_futures_async_read(0..meta.content_length())
+ .await
+ .context(error::ReadObjectSnafu { path })?
.compat();
let decoded = self.compression_type.convert_async_read(reader);
diff --git a/src/common/datasource/src/file_format/json.rs b/src/common/datasource/src/file_format/json.rs
index 97057f836200..3599fcd4ec9d 100644
--- a/src/common/datasource/src/file_format/json.rs
+++ b/src/common/datasource/src/file_format/json.rs
@@ -87,11 +87,14 @@ impl FileFormat for JsonFormat {
.stat(path)
.await
.context(error::ReadObjectSnafu { path })?;
+
let reader = store
.reader(path)
.await
.context(error::ReadObjectSnafu { path })?
.into_futures_async_read(0..meta.content_length())
+ .await
+ .context(error::ReadObjectSnafu { path })?
.compat();
let decoded = self.compression_type.convert_async_read(reader);
diff --git a/src/common/datasource/src/file_format/parquet.rs b/src/common/datasource/src/file_format/parquet.rs
index 7994aafc4260..9988a311f51c 100644
--- a/src/common/datasource/src/file_format/parquet.rs
+++ b/src/common/datasource/src/file_format/parquet.rs
@@ -52,11 +52,14 @@ impl FileFormat for ParquetFormat {
.stat(path)
.await
.context(error::ReadObjectSnafu { path })?;
+
let mut reader = store
.reader(path)
.await
.context(error::ReadObjectSnafu { path })?
.into_futures_async_read(0..meta.content_length())
+ .await
+ .context(error::ReadObjectSnafu { path })?
.compat();
let metadata = reader
@@ -129,6 +132,7 @@ impl LazyParquetFileReader {
.reader(&self.path)
.await?
.into_futures_async_read(0..meta.content_length())
+ .await?
.compat();
self.reader = Some(reader);
}
diff --git a/src/datanode/src/store.rs b/src/datanode/src/store.rs
index 8b64511598a8..6e6e5bea6813 100644
--- a/src/datanode/src/store.rs
+++ b/src/datanode/src/store.rs
@@ -20,6 +20,7 @@ mod gcs;
mod oss;
mod s3;
+use std::sync::Arc;
use std::time::Duration;
use std::{env, path};
@@ -28,7 +29,7 @@ use common_telemetry::info;
use object_store::layers::{LruCacheLayer, RetryLayer};
use object_store::services::Fs;
use object_store::util::{join_dir, normalize_dir, with_instrument_layers};
-use object_store::{HttpClient, ObjectStore};
+use object_store::{HttpClient, ObjectStore, ObjectStoreBuilder};
use snafu::prelude::*;
use crate::config::{ObjectStoreConfig, DEFAULT_OBJECT_STORE_CACHE_SIZE};
@@ -106,13 +107,14 @@ async fn create_object_store_with_cache(
if let Some(path) = cache_path {
let atomic_temp_dir = join_dir(path, ".tmp/");
clean_temp_dir(&atomic_temp_dir)?;
- let mut builder = Fs::default();
- builder.root(path).atomic_write_dir(&atomic_temp_dir);
- let cache_store = ObjectStore::new(builder)
- .context(error::InitBackendSnafu)?
- .finish();
- let cache_layer = LruCacheLayer::new(cache_store, cache_capacity.0 as usize)
+ let cache_store = {
+ let mut builder = Fs::default();
+ builder.root(path).atomic_write_dir(&atomic_temp_dir);
+ builder.build().context(error::InitBackendSnafu)?
+ };
+
+ let cache_layer = LruCacheLayer::new(Arc::new(cache_store), cache_capacity.0 as usize)
.await
.context(error::InitBackendSnafu)?;
diff --git a/src/mito2/src/cache/write_cache.rs b/src/mito2/src/cache/write_cache.rs
index 26eea8f2d82a..f2731e25d0b2 100644
--- a/src/mito2/src/cache/write_cache.rs
+++ b/src/mito2/src/cache/write_cache.rs
@@ -188,7 +188,9 @@ impl WriteCache {
.reader(&cache_path)
.await
.context(error::OpenDalSnafu)?
- .into_futures_async_read(0..cached_value.content_length());
+ .into_futures_async_read(0..cached_value.content_length())
+ .await
+ .context(error::OpenDalSnafu)?;
let mut writer = remote_store
.writer_with(upload_path)
diff --git a/src/mito2/src/sst/index/applier.rs b/src/mito2/src/sst/index/applier.rs
index aba4534b2847..da06361568f5 100644
--- a/src/mito2/src/sst/index/applier.rs
+++ b/src/mito2/src/sst/index/applier.rs
@@ -28,8 +28,8 @@ use store_api::storage::RegionId;
use crate::cache::file_cache::{FileCacheRef, FileType, IndexKey};
use crate::error::{
- ApplyIndexSnafu, PuffinBlobTypeNotFoundSnafu, PuffinReadBlobSnafu, PuffinReadMetadataSnafu,
- Result,
+ ApplyIndexSnafu, OpenDalSnafu, PuffinBlobTypeNotFoundSnafu, PuffinReadBlobSnafu,
+ PuffinReadMetadataSnafu, Result,
};
use crate::metrics::{
INDEX_APPLY_ELAPSED, INDEX_APPLY_MEMORY_USAGE, INDEX_PUFFIN_READ_BYTES_TOTAL,
@@ -128,11 +128,19 @@ impl SstIndexApplier {
return Ok(None);
};
- Ok(file_cache
+ let Some(reader) = file_cache
.reader(IndexKey::new(self.region_id, file_id, FileType::Puffin))
.await
- .map(|v| v.into_futures_async_read(0..indexed_value.file_size as u64))
- .map(PuffinFileReader::new))
+ else {
+ return Ok(None);
+ };
+
+ let reader = reader
+ .into_futures_async_read(0..indexed_value.file_size as u64)
+ .await
+ .context(OpenDalSnafu)?;
+
+ Ok(Some(PuffinFileReader::new(reader)))
}
/// Helper function to create a [`PuffinFileReader`] from the remote index file.
diff --git a/src/mito2/src/sst/index/store.rs b/src/mito2/src/sst/index/store.rs
index 9d26118366ad..7dfcdc253cd6 100644
--- a/src/mito2/src/sst/index/store.rs
+++ b/src/mito2/src/sst/index/store.rs
@@ -67,7 +67,9 @@ impl InstrumentedStore {
.reader(path)
.await
.context(OpenDalSnafu)?
- .into_futures_async_read(0..meta.content_length());
+ .into_futures_async_read(0..meta.content_length())
+ .await
+ .context(OpenDalSnafu)?;
Ok(InstrumentedAsyncRead::new(
reader,
read_byte_count,
diff --git a/src/object-store/Cargo.toml b/src/object-store/Cargo.toml
index 5f4c4c98ed8d..00bb5a93acfd 100644
--- a/src/object-store/Cargo.toml
+++ b/src/object-store/Cargo.toml
@@ -17,7 +17,7 @@ futures.workspace = true
lazy_static.workspace = true
md5 = "0.7"
moka = { workspace = true, features = ["future"] }
-opendal = { version = "0.46", features = [
+opendal = { version = "0.47", features = [
"layers-tracing",
"services-azblob",
"services-fs",
diff --git a/src/object-store/src/layers/lru_cache.rs b/src/object-store/src/layers/lru_cache.rs
index bcea36603ca6..ded6afe58bb6 100644
--- a/src/object-store/src/layers/lru_cache.rs
+++ b/src/object-store/src/layers/lru_cache.rs
@@ -14,26 +14,26 @@
use std::sync::Arc;
-use opendal::raw::oio::ReadDyn;
+use opendal::raw::oio::Reader;
use opendal::raw::{
Access, Layer, LayeredAccess, OpDelete, OpList, OpRead, OpWrite, RpDelete, RpList, RpRead,
RpWrite,
};
-use opendal::{Operator, Result};
+use opendal::Result;
mod read_cache;
use common_telemetry::info;
use read_cache::ReadCache;
/// An opendal layer with local LRU file cache supporting.
#[derive(Clone)]
-pub struct LruCacheLayer {
+pub struct LruCacheLayer<C: Access> {
// The read cache
- read_cache: ReadCache,
+ read_cache: ReadCache<C>,
}
-impl LruCacheLayer {
+impl<C: Access> LruCacheLayer<C> {
/// Create a `[LruCacheLayer]` with local file cache and capacity in bytes.
- pub async fn new(file_cache: Operator, capacity: usize) -> Result<Self> {
+ pub async fn new(file_cache: Arc<C>, capacity: usize) -> Result<Self> {
let read_cache = ReadCache::new(file_cache, capacity);
let (entries, bytes) = read_cache.recover_cache().await?;
@@ -52,12 +52,12 @@ impl LruCacheLayer {
/// Returns the read cache statistics info `(EntryCount, SizeInBytes)`.
pub async fn read_cache_stat(&self) -> (u64, u64) {
- self.read_cache.stat().await
+ self.read_cache.cache_stat().await
}
}
-impl<I: Access> Layer<I> for LruCacheLayer {
- type LayeredAccess = LruCacheAccess<I>;
+impl<I: Access, C: Access> Layer<I> for LruCacheLayer<C> {
+ type LayeredAccess = LruCacheAccess<I, C>;
fn layer(&self, inner: I) -> Self::LayeredAccess {
LruCacheAccess {
@@ -68,14 +68,14 @@ impl<I: Access> Layer<I> for LruCacheLayer {
}
#[derive(Debug)]
-pub struct LruCacheAccess<I> {
+pub struct LruCacheAccess<I, C> {
inner: I,
- read_cache: ReadCache,
+ read_cache: ReadCache<C>,
}
-impl<I: Access> LayeredAccess for LruCacheAccess<I> {
+impl<I: Access, C: Access> LayeredAccess for LruCacheAccess<I, C> {
type Inner = I;
- type Reader = Arc<dyn ReadDyn>;
+ type Reader = Reader;
type BlockingReader = I::BlockingReader;
type Writer = I::Writer;
type BlockingWriter = I::BlockingWriter;
@@ -87,7 +87,9 @@ impl<I: Access> LayeredAccess for LruCacheAccess<I> {
}
async fn read(&self, path: &str, args: OpRead) -> Result<(RpRead, Self::Reader)> {
- self.read_cache.read(&self.inner, path, args).await
+ self.read_cache
+ .read_from_cache(&self.inner, path, args)
+ .await
}
async fn write(&self, path: &str, args: OpWrite) -> Result<(RpWrite, Self::Writer)> {
diff --git a/src/object-store/src/layers/lru_cache/read_cache.rs b/src/object-store/src/layers/lru_cache/read_cache.rs
index 81415b8039ca..6519adf766f9 100644
--- a/src/object-store/src/layers/lru_cache/read_cache.rs
+++ b/src/object-store/src/layers/lru_cache/read_cache.rs
@@ -15,12 +15,12 @@
use std::sync::Arc;
use common_telemetry::debug;
-use futures::{FutureExt, StreamExt};
+use futures::FutureExt;
use moka::future::Cache;
use moka::notification::ListenerFuture;
-use opendal::raw::oio::{Read, ReadDyn, Reader};
-use opendal::raw::{Access, BytesRange, OpRead, OpStat, RpRead};
-use opendal::{Buffer, Error as OpendalError, ErrorKind, Operator, Result};
+use opendal::raw::oio::{List, Read, Reader, Write};
+use opendal::raw::{Access, OpDelete, OpList, OpRead, OpStat, OpWrite, RpRead};
+use opendal::{Error as OpendalError, ErrorKind, Result};
use crate::metrics::{
OBJECT_STORE_LRU_CACHE_BYTES, OBJECT_STORE_LRU_CACHE_ENTRIES, OBJECT_STORE_LRU_CACHE_HIT,
@@ -51,23 +51,36 @@ fn can_cache(path: &str) -> bool {
!path.ends_with("_last_checkpoint")
}
-/// Generate an unique cache key for the read path and range.
-fn read_cache_key(path: &str, range: BytesRange) -> String {
- format!("{:x}.cache-{}", md5::compute(path), range.to_header())
+/// Generate a unique cache key for the read path and range.
+fn read_cache_key(path: &str, args: &OpRead) -> String {
+ format!(
+ "{:x}.cache-{}",
+ md5::compute(path),
+ args.range().to_header()
+ )
}
/// Local read cache for files in object storage
-#[derive(Clone, Debug)]
-pub(crate) struct ReadCache {
+#[derive(Debug)]
+pub(crate) struct ReadCache<C> {
/// Local file cache backend
- file_cache: Operator,
+ file_cache: Arc<C>,
/// Local memory cache to track local cache files
mem_cache: Cache<String, ReadResult>,
}
-impl ReadCache {
+impl<C> Clone for ReadCache<C> {
+ fn clone(&self) -> Self {
+ Self {
+ file_cache: self.file_cache.clone(),
+ mem_cache: self.mem_cache.clone(),
+ }
+ }
+}
+
+impl<C: Access> ReadCache<C> {
/// Create a [`ReadCache`] with capacity in bytes.
- pub(crate) fn new(file_cache: Operator, capacity: usize) -> Self {
+ pub(crate) fn new(file_cache: Arc<C>, capacity: usize) -> Self {
let file_cache_cloned = file_cache.clone();
let eviction_listener =
move |read_key: Arc<String>, read_result: ReadResult, cause| -> ListenerFuture {
@@ -79,7 +92,7 @@ impl ReadCache {
if let ReadResult::Success(size) = read_result {
OBJECT_STORE_LRU_CACHE_BYTES.sub(size as i64);
- let result = file_cache_cloned.delete(&read_key).await;
+ let result = file_cache_cloned.delete(&read_key, OpDelete::new()).await;
debug!(
"Deleted local cache file `{}`, result: {:?}, cause: {:?}.",
read_key, result, cause
@@ -104,7 +117,7 @@ impl ReadCache {
}
/// Returns the cache's entry count and total approximate entry size in bytes.
- pub(crate) async fn stat(&self) -> (u64, u64) {
+ pub(crate) async fn cache_stat(&self) -> (u64, u64) {
self.mem_cache.run_pending_tasks().await;
(self.mem_cache.entry_count(), self.mem_cache.weighted_size())
@@ -129,17 +142,17 @@ impl ReadCache {
/// Recover existing cache items from `file_cache` to `mem_cache`.
/// Return entry count and total approximate entry size in bytes.
pub(crate) async fn recover_cache(&self) -> Result<(u64, u64)> {
- let mut pager = self.file_cache.lister("/").await?;
+ let (_, mut pager) = self.file_cache.list("/", OpList::default()).await?;
- while let Some(entry) = pager.next().await.transpose()? {
+ while let Some(entry) = pager.next().await? {
let read_key = entry.path();
// We can't retrieve the metadata from `[opendal::raw::oio::Entry]` directly,
// because it's private field.
let size = {
- let stat = self.file_cache.stat(read_key).await?;
+ let stat = self.file_cache.stat(read_key, OpStat::default()).await?;
- stat.content_length()
+ stat.into_metadata().content_length()
};
OBJECT_STORE_LRU_CACHE_ENTRIES.inc();
@@ -149,26 +162,27 @@ impl ReadCache {
.await;
}
- Ok(self.stat().await)
+ Ok(self.cache_stat().await)
}
/// Returns true when the read cache contains the specific file.
pub(crate) async fn contains_file(&self, path: &str) -> bool {
self.mem_cache.run_pending_tasks().await;
- self.mem_cache.contains_key(path) && self.file_cache.stat(path).await.is_ok()
+ self.mem_cache.contains_key(path)
+ && self.file_cache.stat(path, OpStat::default()).await.is_ok()
}
/// Read from a specific path using the OpRead operation.
/// It will attempt to retrieve the data from the local cache.
/// If the data is not found in the local cache,
- /// it will fallback to retrieving it from remote object storage
+ /// it will fall back to retrieving it from remote object storage
/// and cache the result locally.
- pub(crate) async fn read<I>(
+ pub(crate) async fn read_from_cache<I>(
&self,
inner: &I,
path: &str,
args: OpRead,
- ) -> Result<(RpRead, Arc<dyn ReadDyn>)>
+ ) -> Result<(RpRead, Reader)>
where
I: Access,
{
@@ -176,46 +190,82 @@ impl ReadCache {
return inner.read(path, args).await.map(to_output_reader);
}
- // FIXME: remove this block after opendal v0.47 released.
- let meta = inner.stat(path, OpStat::new()).await?;
- let (rp, reader) = inner.read(path, args).await?;
- let reader: ReadCacheReader<I> = ReadCacheReader {
- path: Arc::new(path.to_string()),
- inner_reader: reader,
- size: meta.into_metadata().content_length(),
- file_cache: self.file_cache.clone(),
- mem_cache: self.mem_cache.clone(),
- };
- Ok((rp, Arc::new(reader)))
+ let read_key = read_cache_key(path, &args);
+
+ let read_result = self
+ .mem_cache
+ .try_get_with(
+ read_key.clone(),
+ self.read_remote(inner, &read_key, path, args.clone()),
+ )
+ .await
+ .map_err(|e| OpendalError::new(e.kind(), e.to_string()))?;
+
+ match read_result {
+ ReadResult::Success(_) => {
+ // There is a concurrent issue here, the local cache may be purged
+ // while reading, we have to fall back to remote read
+ match self.file_cache.read(&read_key, OpRead::default()).await {
+ Ok(ret) => {
+ OBJECT_STORE_LRU_CACHE_HIT
+ .with_label_values(&["success"])
+ .inc();
+ Ok(to_output_reader(ret))
+ }
+ Err(_) => {
+ OBJECT_STORE_LRU_CACHE_MISS.inc();
+ inner.read(path, args).await.map(to_output_reader)
+ }
+ }
+ }
+ ReadResult::NotFound => {
+ OBJECT_STORE_LRU_CACHE_HIT
+ .with_label_values(&["not_found"])
+ .inc();
+
+ Err(OpendalError::new(
+ ErrorKind::NotFound,
+ format!("File not found: {path}"),
+ ))
+ }
+ }
}
-}
-pub struct ReadCacheReader<I: Access> {
- /// Path of the file
- path: Arc<String>,
- /// Remote file reader.
- inner_reader: I::Reader,
- /// FIXME: remove this field after opendal v0.47 released.
- ///
- /// OpenDAL's read_at takes `offset, limit` which means the underlying storage
- /// services could return less data than limit. We store size here as a workaround.
- ///
- /// This API has been refactor into `offset, size` instead. After opendal v0.47 released,
- /// we don't need this anymore.
- size: u64,
- /// Local file cache backend
- file_cache: Operator,
- /// Local memory cache to track local cache files
- mem_cache: Cache<String, ReadResult>,
-}
+ async fn try_write_cache<I>(&self, mut reader: I::Reader, read_key: &str) -> Result<usize>
+ where
+ I: Access,
+ {
+ let (_, mut writer) = self.file_cache.write(read_key, OpWrite::new()).await?;
+ let mut total = 0;
+ loop {
+ let bytes = reader.read().await?;
+ if bytes.is_empty() {
+ break;
+ }
+
+ total += bytes.len();
+ writer.write(bytes).await?;
+ }
+ // Call `close` to ensure data is written.
+ writer.close().await?;
+ Ok(total)
+ }
-impl<I: Access> ReadCacheReader<I> {
- /// TODO: we can return the Buffer directly to avoid another read from cache.
- async fn read_remote(&self, offset: u64, limit: usize) -> Result<ReadResult> {
+ /// Read the file from remote storage. If success, write the content into local cache.
+ async fn read_remote<I>(
+ &self,
+ inner: &I,
+ read_key: &str,
+ path: &str,
+ args: OpRead,
+ ) -> Result<ReadResult>
+ where
+ I: Access,
+ {
OBJECT_STORE_LRU_CACHE_MISS.inc();
- let buf = self.inner_reader.read_at(offset, limit).await?;
- let result = self.try_write_cache(buf, offset).await;
+ let (_, reader) = inner.read(path, args).await?;
+ let result = self.try_write_cache::<I>(reader, read_key).await;
match result {
Ok(read_bytes) => {
@@ -242,59 +292,10 @@ impl<I: Access> ReadCacheReader<I> {
}
}
}
-
- async fn try_write_cache(&self, buf: Buffer, offset: u64) -> Result<usize> {
- let size = buf.len();
- let read_key = read_cache_key(&self.path, BytesRange::new(offset, Some(size as _)));
- self.file_cache.write(&read_key, buf).await?;
- Ok(size)
- }
-}
-
-impl<I: Access> Read for ReadCacheReader<I> {
- async fn read_at(&self, offset: u64, limit: usize) -> Result<Buffer> {
- let size = self.size.min(offset + limit as u64) - offset;
- let read_key = read_cache_key(&self.path, BytesRange::new(offset, Some(size as _)));
-
- let read_result = self
- .mem_cache
- .try_get_with(read_key.clone(), self.read_remote(offset, limit))
- .await
- .map_err(|e| OpendalError::new(e.kind(), &e.to_string()))?;
-
- match read_result {
- ReadResult::Success(_) => {
- // There is a concurrent issue here, the local cache may be purged
- // while reading, we have to fallback to remote read
- match self.file_cache.read(&read_key).await {
- Ok(ret) => {
- OBJECT_STORE_LRU_CACHE_HIT
- .with_label_values(&["success"])
- .inc();
- Ok(ret)
- }
- Err(_) => {
- OBJECT_STORE_LRU_CACHE_MISS.inc();
- self.inner_reader.read_at(offset, limit).await
- }
- }
- }
- ReadResult::NotFound => {
- OBJECT_STORE_LRU_CACHE_HIT
- .with_label_values(&["not_found"])
- .inc();
-
- Err(OpendalError::new(
- ErrorKind::NotFound,
- &format!("File not found: {}", self.path),
- ))
- }
- }
- }
}
fn to_output_reader<R: Read + 'static>(input: (RpRead, R)) -> (RpRead, Reader) {
- (input.0, Arc::new(input.1))
+ (input.0, Box::new(input.1))
}
#[cfg(test)]
diff --git a/src/object-store/src/layers/prometheus.rs b/src/object-store/src/layers/prometheus.rs
index a609ce7203f5..6cc71a3bebcf 100644
--- a/src/object-store/src/layers/prometheus.rs
+++ b/src/object-store/src/layers/prometheus.rs
@@ -63,7 +63,7 @@ fn increment_errors_total(op: Operation, kind: ErrorKind) {
);
}
-/// Please refer to [prometheus](https://docs.rs/prometheus) for every operations.
+/// Please refer to [prometheus](https://docs.rs/prometheus) for every operation.
///
/// # Prometheus Metrics
///
@@ -441,8 +441,8 @@ impl<R> PrometheusMetricWrapper<R> {
}
impl<R: oio::Read> oio::Read for PrometheusMetricWrapper<R> {
- async fn read_at(&self, offset: u64, limit: usize) -> Result<Buffer> {
- self.inner.read_at(offset, limit).await.map_err(|err| {
+ async fn read(&mut self) -> Result<Buffer> {
+ self.inner.read().await.map_err(|err| {
increment_errors_total(self.op, err.kind());
err
})
@@ -450,8 +450,8 @@ impl<R: oio::Read> oio::Read for PrometheusMetricWrapper<R> {
}
impl<R: oio::BlockingRead> oio::BlockingRead for PrometheusMetricWrapper<R> {
- fn read_at(&self, offset: u64, limit: usize) -> opendal::Result<Buffer> {
- self.inner.read_at(offset, limit).map_err(|err| {
+ fn read(&mut self) -> opendal::Result<Buffer> {
+ self.inner.read().map_err(|err| {
increment_errors_total(self.op, err.kind());
err
})
diff --git a/src/object-store/tests/object_store_test.rs b/src/object-store/tests/object_store_test.rs
index a3d3800054c7..b5cedf6e651a 100644
--- a/src/object-store/tests/object_store_test.rs
+++ b/src/object-store/tests/object_store_test.rs
@@ -22,8 +22,10 @@ use object_store::layers::LruCacheLayer;
use object_store::services::{Fs, S3};
use object_store::test_util::TempFolder;
use object_store::{ObjectStore, ObjectStoreBuilder};
+use opendal::raw::oio::{List, Read};
+use opendal::raw::{Access, OpList, OpRead};
use opendal::services::{Azblob, Gcs, Oss};
-use opendal::{EntryMode, Operator, OperatorBuilder};
+use opendal::{EntryMode, OperatorBuilder};
async fn test_object_crud(store: &ObjectStore) -> Result<()> {
// Create object handler.
@@ -227,7 +229,7 @@ async fn test_file_backend_with_lru_cache() -> Result<()> {
.root(&data_dir.path().to_string_lossy())
.atomic_write_dir(&tmp_dir.path().to_string_lossy());
- let store = ObjectStore::new(builder).unwrap().finish();
+ let store = builder.build().unwrap();
let cache_dir = create_temp_dir("test_file_backend_with_lru_cache");
let cache_layer = {
@@ -235,12 +237,14 @@ async fn test_file_backend_with_lru_cache() -> Result<()> {
let _ = builder
.root(&cache_dir.path().to_string_lossy())
.atomic_write_dir(&cache_dir.path().to_string_lossy());
- let file_cache = Operator::new(builder).unwrap().finish();
+ let file_cache = Arc::new(builder.build().unwrap());
LruCacheLayer::new(file_cache, 32).await.unwrap()
};
- let store = store.layer(cache_layer.clone());
+ let store = OperatorBuilder::new(store)
+ .layer(cache_layer.clone())
+ .finish();
test_object_crud(&store).await?;
test_object_list(&store).await?;
@@ -250,31 +254,36 @@ async fn test_file_backend_with_lru_cache() -> Result<()> {
Ok(())
}
-async fn assert_lru_cache(cache_layer: &LruCacheLayer, file_names: &[&str]) {
+async fn assert_lru_cache<C: Access>(cache_layer: &LruCacheLayer<C>, file_names: &[&str]) {
for file_name in file_names {
assert!(cache_layer.contains_file(file_name).await);
}
}
-async fn assert_cache_files(
- store: &Operator,
+async fn assert_cache_files<C: Access>(
+ store: &C,
file_names: &[&str],
file_contents: &[&str],
) -> Result<()> {
- let objects = store.list("/").await?;
+ let (_, mut lister) = store.list("/", OpList::default()).await?;
+ let mut objects = vec![];
+ while let Some(e) = lister.next().await? {
+ objects.push(e);
+ }
// compare the cache file with the expected cache file; ignore orders
for o in objects {
- let position = file_names.iter().position(|&x| x == o.name());
- assert!(position.is_some(), "file not found: {}", o.name());
+ let position = file_names.iter().position(|&x| x == o.path());
+ assert!(position.is_some(), "file not found: {}", o.path());
let position = position.unwrap();
- let bs = store.read(o.path()).await.unwrap();
+ let (_, mut r) = store.read(o.path(), OpRead::default()).await.unwrap();
+ let bs = r.read_all().await.unwrap();
assert_eq!(
file_contents[position],
String::from_utf8(bs.to_vec())?,
"file content not match: {}",
- o.name()
+ o.path()
);
}
@@ -303,7 +312,7 @@ async fn test_object_store_cache_policy() -> Result<()> {
.root(&cache_dir.path().to_string_lossy())
.atomic_write_dir(&atomic_temp_dir.path().to_string_lossy());
let file_cache = Arc::new(builder.build().unwrap());
- let cache_store = OperatorBuilder::new(file_cache.clone()).finish();
+ let cache_store = file_cache.clone();
// create operator for cache dir to verify cache file
let cache_layer = LruCacheLayer::new(cache_store.clone(), 38).await.unwrap();
diff --git a/src/operator/src/statement/copy_table_from.rs b/src/operator/src/statement/copy_table_from.rs
index 94892f10a8c9..a0818d6ea35e 100644
--- a/src/operator/src/statement/copy_table_from.rs
+++ b/src/operator/src/statement/copy_table_from.rs
@@ -156,6 +156,8 @@ impl StatementExecutor {
.await
.context(error::ReadObjectSnafu { path: &path })?
.into_futures_async_read(0..meta.content_length())
+ .await
+ .context(error::ReadObjectSnafu { path: &path })?
.compat();
let metadata = ArrowReaderMetadata::load_async(&mut reader, Default::default())
.await
@@ -301,6 +303,8 @@ impl StatementExecutor {
.await
.context(error::ReadObjectSnafu { path })?
.into_futures_async_read(0..meta.content_length())
+ .await
+ .context(error::ReadObjectSnafu { path })?
.compat();
let builder =
ParquetRecordBatchStreamBuilder::new_with_metadata(reader, metadata.clone());
|
build
|
Upgrade OpenDAL to 0.47 (#4224)
|
0f116c85012794e47d5956dc50312313f664ecb2
|
2024-12-02 10:48:25
|
Weny Xu
|
feat: enable compression for metasrv client (#5078)
| false
|
diff --git a/src/meta-client/src/client/cluster.rs b/src/meta-client/src/client/cluster.rs
index b1c7ff1089a1..72b2307790fa 100644
--- a/src/meta-client/src/client/cluster.rs
+++ b/src/meta-client/src/client/cluster.rs
@@ -31,6 +31,7 @@ use common_meta::rpc::store::{
use common_telemetry::{info, warn};
use snafu::{ensure, ResultExt};
use tokio::sync::RwLock;
+use tonic::codec::CompressionEncoding;
use tonic::transport::Channel;
use tonic::Status;
@@ -173,7 +174,10 @@ impl Inner {
fn make_client(&self, addr: impl AsRef<str>) -> Result<ClusterClient<Channel>> {
let channel = self.channel_manager.get(addr).context(CreateChannelSnafu)?;
- Ok(ClusterClient::new(channel))
+ Ok(ClusterClient::new(channel)
+ .accept_compressed(CompressionEncoding::Gzip)
+ .accept_compressed(CompressionEncoding::Zstd)
+ .send_compressed(CompressionEncoding::Zstd))
}
#[inline]
diff --git a/src/meta-client/src/client/heartbeat.rs b/src/meta-client/src/client/heartbeat.rs
index 47984360b44b..b1214d72df6d 100644
--- a/src/meta-client/src/client/heartbeat.rs
+++ b/src/meta-client/src/client/heartbeat.rs
@@ -23,6 +23,7 @@ use common_telemetry::tracing_context::TracingContext;
use snafu::{ensure, OptionExt, ResultExt};
use tokio::sync::{mpsc, RwLock};
use tokio_stream::wrappers::ReceiverStream;
+use tonic::codec::CompressionEncoding;
use tonic::transport::Channel;
use tonic::Streaming;
@@ -249,7 +250,10 @@ impl Inner {
.get(addr)
.context(error::CreateChannelSnafu)?;
- Ok(HeartbeatClient::new(channel))
+ Ok(HeartbeatClient::new(channel)
+ .accept_compressed(CompressionEncoding::Zstd)
+ .accept_compressed(CompressionEncoding::Gzip)
+ .send_compressed(CompressionEncoding::Zstd))
}
#[inline]
diff --git a/src/meta-client/src/client/procedure.rs b/src/meta-client/src/client/procedure.rs
index eccfe90dff93..2f310ab85d30 100644
--- a/src/meta-client/src/client/procedure.rs
+++ b/src/meta-client/src/client/procedure.rs
@@ -27,6 +27,7 @@ use common_telemetry::tracing_context::TracingContext;
use common_telemetry::{info, warn};
use snafu::{ensure, ResultExt};
use tokio::sync::RwLock;
+use tonic::codec::CompressionEncoding;
use tonic::transport::Channel;
use tonic::Status;
@@ -141,7 +142,10 @@ impl Inner {
.get(addr)
.context(error::CreateChannelSnafu)?;
- Ok(ProcedureServiceClient::new(channel))
+ Ok(ProcedureServiceClient::new(channel)
+ .accept_compressed(CompressionEncoding::Gzip)
+ .accept_compressed(CompressionEncoding::Zstd)
+ .send_compressed(CompressionEncoding::Zstd))
}
#[inline]
diff --git a/src/meta-client/src/client/store.rs b/src/meta-client/src/client/store.rs
index e63f7ade1f5a..4f0fea7e0f23 100644
--- a/src/meta-client/src/client/store.rs
+++ b/src/meta-client/src/client/store.rs
@@ -25,6 +25,7 @@ use common_grpc::channel_manager::ChannelManager;
use common_telemetry::tracing_context::TracingContext;
use snafu::{ensure, OptionExt, ResultExt};
use tokio::sync::RwLock;
+use tonic::codec::CompressionEncoding;
use tonic::transport::Channel;
use crate::client::{load_balance as lb, Id};
@@ -236,7 +237,10 @@ impl Inner {
.get(addr)
.context(error::CreateChannelSnafu)?;
- Ok(StoreClient::new(channel))
+ Ok(StoreClient::new(channel)
+ .accept_compressed(CompressionEncoding::Gzip)
+ .accept_compressed(CompressionEncoding::Zstd)
+ .send_compressed(CompressionEncoding::Zstd))
}
#[inline]
diff --git a/src/meta-srv/src/bootstrap.rs b/src/meta-srv/src/bootstrap.rs
index d5f369a3488d..47afa0ab416b 100644
--- a/src/meta-srv/src/bootstrap.rs
+++ b/src/meta-srv/src/bootstrap.rs
@@ -41,6 +41,7 @@ use tokio::net::TcpListener;
use tokio::sync::mpsc::{self, Receiver, Sender};
#[cfg(feature = "pg_kvbackend")]
use tokio_postgres::NoTls;
+use tonic::codec::CompressionEncoding;
use tonic::transport::server::{Router, TcpIncoming};
use crate::election::etcd::EtcdElection;
@@ -178,14 +179,26 @@ pub async fn bootstrap_metasrv_with_router(
Ok(())
}
+#[macro_export]
+macro_rules! add_compressed_service {
+ ($builder:expr, $server:expr) => {
+ $builder.add_service(
+ $server
+ .accept_compressed(CompressionEncoding::Gzip)
+ .accept_compressed(CompressionEncoding::Zstd)
+ .send_compressed(CompressionEncoding::Gzip)
+ .send_compressed(CompressionEncoding::Zstd),
+ )
+ };
+}
+
pub fn router(metasrv: Arc<Metasrv>) -> Router {
- tonic::transport::Server::builder()
- .accept_http1(true) // for admin services
- .add_service(HeartbeatServer::from_arc(metasrv.clone()))
- .add_service(StoreServer::from_arc(metasrv.clone()))
- .add_service(ClusterServer::from_arc(metasrv.clone()))
- .add_service(ProcedureServiceServer::from_arc(metasrv.clone()))
- .add_service(admin::make_admin_service(metasrv))
+ let mut router = tonic::transport::Server::builder().accept_http1(true); // for admin services
+ let router = add_compressed_service!(router, HeartbeatServer::from_arc(metasrv.clone()));
+ let router = add_compressed_service!(router, StoreServer::from_arc(metasrv.clone()));
+ let router = add_compressed_service!(router, ClusterServer::from_arc(metasrv.clone()));
+ let router = add_compressed_service!(router, ProcedureServiceServer::from_arc(metasrv.clone()));
+ router.add_service(admin::make_admin_service(metasrv))
}
pub async fn metasrv_builder(
diff --git a/src/meta-srv/src/lib.rs b/src/meta-srv/src/lib.rs
index c84e1755cab7..0c5c34370400 100644
--- a/src/meta-srv/src/lib.rs
+++ b/src/meta-srv/src/lib.rs
@@ -38,6 +38,7 @@ pub mod selector;
pub mod service;
pub mod state;
pub mod table_meta_alloc;
+
pub use crate::error::Result;
mod greptimedb_telemetry;
diff --git a/src/meta-srv/src/mocks.rs b/src/meta-srv/src/mocks.rs
index 318fd16c87f2..4991c2ab8a4d 100644
--- a/src/meta-srv/src/mocks.rs
+++ b/src/meta-srv/src/mocks.rs
@@ -24,8 +24,10 @@ use common_meta::key::TableMetadataManager;
use common_meta::kv_backend::etcd::EtcdStore;
use common_meta::kv_backend::memory::MemoryKvBackend;
use common_meta::kv_backend::KvBackendRef;
+use tonic::codec::CompressionEncoding;
use tower::service_fn;
+use crate::add_compressed_service;
use crate::metasrv::builder::MetasrvBuilder;
use crate::metasrv::{Metasrv, MetasrvOptions, SelectorRef};
@@ -80,11 +82,14 @@ pub async fn mock(
let (client, server) = tokio::io::duplex(1024);
let metasrv = Arc::new(metasrv);
let service = metasrv.clone();
+
let _handle = tokio::spawn(async move {
- tonic::transport::Server::builder()
- .add_service(HeartbeatServer::from_arc(service.clone()))
- .add_service(StoreServer::from_arc(service.clone()))
- .add_service(ProcedureServiceServer::from_arc(service.clone()))
+ let mut router = tonic::transport::Server::builder();
+ let router = add_compressed_service!(router, HeartbeatServer::from_arc(service.clone()));
+ let router = add_compressed_service!(router, StoreServer::from_arc(service.clone()));
+ let router =
+ add_compressed_service!(router, ProcedureServiceServer::from_arc(service.clone()));
+ router
.serve_with_incoming(futures::stream::iter(vec![Ok::<_, std::io::Error>(server)]))
.await
});
|
feat
|
enable compression for metasrv client (#5078)
|
8d54d40b21622190fe1d039c2919a1f76de4ab6b
|
2023-05-15 14:59:28
|
Yingwen
|
feat: Add FlushPicker to flush regions periodically (#1559)
| false
|
diff --git a/config/datanode.example.toml b/config/datanode.example.toml
index 607d2fac9b29..d8c300d45f9b 100644
--- a/config/datanode.example.toml
+++ b/config/datanode.example.toml
@@ -52,6 +52,15 @@ gc_duration = '30s'
# Whether to try creating a manifest checkpoint on region opening
checkpoint_on_startup = false
+# Storage flush options
+[storage.flush]
+# Max inflight flush tasks.
+max_flush_tasks = 8
+# Default write buffer size for a region.
+region_write_buffer_size = "32MB"
+# Interval to auto flush a region if it has not flushed yet.
+auto_flush_interval = "1h"
+
# Procedure storage options, see `standalone.example.toml`.
[procedure]
max_retry_times = 3
diff --git a/config/standalone.example.toml b/config/standalone.example.toml
index b00ac213e649..25ec6867c049 100644
--- a/config/standalone.example.toml
+++ b/config/standalone.example.toml
@@ -117,6 +117,15 @@ gc_duration = '30s'
# Whether to try creating a manifest checkpoint on region opening
checkpoint_on_startup = false
+# Storage flush options
+[storage.flush]
+# Max inflight flush tasks.
+max_flush_tasks = 8
+# Default write buffer size for a region.
+region_write_buffer_size = "32MB"
+# Interval to auto flush a region if it has not flushed yet.
+auto_flush_interval = "1h"
+
# Procedure storage options.
[procedure]
# Procedure max retry time.
diff --git a/src/catalog/src/system.rs b/src/catalog/src/system.rs
index 12a7b81c0292..1d8c9649356e 100644
--- a/src/catalog/src/system.rs
+++ b/src/catalog/src/system.rs
@@ -509,7 +509,8 @@ mod tests {
Arc::new(NoopLogStore::default()),
object_store.clone(),
noop_compaction_scheduler,
- ),
+ )
+ .unwrap(),
object_store,
));
(dir, table_engine)
diff --git a/src/datanode/src/datanode.rs b/src/datanode/src/datanode.rs
index 0f9d4e087c70..e0aa1ed21508 100644
--- a/src/datanode/src/datanode.rs
+++ b/src/datanode/src/datanode.rs
@@ -23,7 +23,10 @@ use secrecy::SecretString;
use serde::{Deserialize, Serialize};
use servers::http::HttpOptions;
use servers::Mode;
-use storage::config::EngineConfig as StorageEngineConfig;
+use storage::config::{
+ EngineConfig as StorageEngineConfig, DEFAULT_AUTO_FLUSH_INTERVAL, DEFAULT_MAX_FLUSH_TASKS,
+ DEFAULT_PICKER_SCHEDULE_INTERVAL, DEFAULT_REGION_WRITE_BUFFER_SIZE,
+};
use storage::scheduler::SchedulerConfig;
use crate::error::Result;
@@ -49,6 +52,7 @@ pub struct StorageConfig {
pub store: ObjectStoreConfig,
pub compaction: CompactionConfig,
pub manifest: RegionManifestConfig,
+ pub flush: FlushConfig,
}
#[derive(Debug, Clone, Serialize, Default, Deserialize)]
@@ -203,6 +207,34 @@ impl Default for CompactionConfig {
}
}
+#[derive(Debug, Clone, Serialize, Deserialize, Eq, PartialEq)]
+#[serde(default)]
+pub struct FlushConfig {
+ /// Max inflight flush tasks.
+ pub max_flush_tasks: usize,
+ /// Default write buffer size for a region.
+ pub region_write_buffer_size: ReadableSize,
+ /// Interval to schedule auto flush picker to find region to flush.
+ #[serde(with = "humantime_serde")]
+ pub picker_schedule_interval: Duration,
+ /// Interval to auto flush a region if it has not flushed yet.
+ #[serde(with = "humantime_serde")]
+ pub auto_flush_interval: Duration,
+}
+
+impl Default for FlushConfig {
+ fn default() -> Self {
+ Self {
+ max_flush_tasks: DEFAULT_MAX_FLUSH_TASKS,
+ region_write_buffer_size: DEFAULT_REGION_WRITE_BUFFER_SIZE,
+ picker_schedule_interval: Duration::from_millis(
+ DEFAULT_PICKER_SCHEDULE_INTERVAL.into(),
+ ),
+ auto_flush_interval: Duration::from_millis(DEFAULT_AUTO_FLUSH_INTERVAL.into()),
+ }
+ }
+}
+
impl From<&DatanodeOptions> for SchedulerConfig {
fn from(value: &DatanodeOptions) -> Self {
Self {
@@ -220,6 +252,10 @@ impl From<&DatanodeOptions> for StorageEngineConfig {
max_files_in_l0: value.storage.compaction.max_files_in_level0,
max_purge_tasks: value.storage.compaction.max_purge_tasks,
sst_write_buffer_size: value.storage.compaction.sst_write_buffer_size,
+ max_flush_tasks: value.storage.flush.max_flush_tasks,
+ region_write_buffer_size: value.storage.flush.region_write_buffer_size,
+ picker_schedule_interval: value.storage.flush.picker_schedule_interval,
+ auto_flush_interval: value.storage.flush.auto_flush_interval,
}
}
}
diff --git a/src/datanode/src/instance.rs b/src/datanode/src/instance.rs
index 4433c2193b31..707d8c3232bf 100644
--- a/src/datanode/src/instance.rs
+++ b/src/datanode/src/instance.rs
@@ -120,7 +120,8 @@ impl Instance {
log_store.clone(),
object_store.clone(),
compaction_scheduler,
- ),
+ )
+ .unwrap(),
object_store.clone(),
));
diff --git a/src/mito/src/engine.rs b/src/mito/src/engine.rs
index d644d73f6b07..b163cf010a4c 100644
--- a/src/mito/src/engine.rs
+++ b/src/mito/src/engine.rs
@@ -669,6 +669,12 @@ impl<S: StorageEngine> MitoEngineInner<S> {
.map_err(BoxedError::new)
.context(table_error::TableOperationSnafu)?;
+ self.storage_engine
+ .close(&StorageEngineContext::default())
+ .await
+ .map_err(BoxedError::new)
+ .context(table_error::TableOperationSnafu)?;
+
Ok(())
}
}
diff --git a/src/mito/src/engine/procedure.rs b/src/mito/src/engine/procedure.rs
index 4d3a8b24d08b..a8627b61644d 100644
--- a/src/mito/src/engine/procedure.rs
+++ b/src/mito/src/engine/procedure.rs
@@ -66,7 +66,8 @@ mod procedure_test_util {
Arc::new(NoopLogStore::default()),
object_store.clone(),
compaction_scheduler,
- );
+ )
+ .unwrap();
let table_engine = MitoEngine::new(EngineConfig::default(), storage_engine, object_store);
TestEnv { table_engine, dir }
diff --git a/src/mito/src/engine/tests.rs b/src/mito/src/engine/tests.rs
index 7ff3fa32233a..72bdf0ccda8c 100644
--- a/src/mito/src/engine/tests.rs
+++ b/src/mito/src/engine/tests.rs
@@ -83,7 +83,8 @@ async fn setup_table_with_column_default_constraint() -> (TempDir, String, Table
Arc::new(NoopLogStore::default()),
object_store.clone(),
compaction_scheduler,
- ),
+ )
+ .unwrap(),
object_store,
);
diff --git a/src/mito/src/table.rs b/src/mito/src/table.rs
index bd655ea029ec..19d85eaf7cb0 100644
--- a/src/mito/src/table.rs
+++ b/src/mito/src/table.rs
@@ -35,8 +35,8 @@ use object_store::ObjectStore;
use snafu::{ensure, OptionExt, ResultExt};
use store_api::manifest::{self, Manifest, ManifestVersion, MetaActionIterator};
use store_api::storage::{
- AddColumn, AlterOperation, AlterRequest, ChunkReader, FlushContext, ReadContext, Region,
- RegionMeta, RegionNumber, ScanRequest, SchemaRef, Snapshot, WriteContext, WriteRequest,
+ AddColumn, AlterOperation, AlterRequest, ChunkReader, FlushContext, FlushReason, ReadContext,
+ Region, RegionMeta, RegionNumber, ScanRequest, SchemaRef, Snapshot, WriteContext, WriteRequest,
};
use table::error::{
InvalidTableSnafu, RegionSchemaMismatchSnafu, Result as TableResult, TableOperationSnafu,
@@ -294,7 +294,12 @@ impl<R: Region> Table for MitoTable<R> {
region_number: Option<RegionNumber>,
wait: Option<bool>,
) -> TableResult<()> {
- let flush_ctx = wait.map(|wait| FlushContext { wait }).unwrap_or_default();
+ let flush_ctx = wait
+ .map(|wait| FlushContext {
+ wait,
+ reason: FlushReason::Manually,
+ })
+ .unwrap_or_default();
if let Some(region_number) = region_number {
if let Some(region) = self.regions.get(®ion_number) {
region
diff --git a/src/mito/src/table/test_util.rs b/src/mito/src/table/test_util.rs
index dcde46ba0370..adf0196302ab 100644
--- a/src/mito/src/table/test_util.rs
+++ b/src/mito/src/table/test_util.rs
@@ -155,7 +155,8 @@ pub async fn setup_test_engine_and_table() -> TestEngineComponents {
Arc::new(NoopLogStore::default()),
object_store.clone(),
compaction_scheduler,
- );
+ )
+ .unwrap();
let table_engine = MitoEngine::new(
EngineConfig::default(),
storage_engine.clone(),
diff --git a/src/mito/src/table/test_util/mock_engine.rs b/src/mito/src/table/test_util/mock_engine.rs
index 3b0b7f8ebebc..a1dcedd7a222 100644
--- a/src/mito/src/table/test_util/mock_engine.rs
+++ b/src/mito/src/table/test_util/mock_engine.rs
@@ -326,4 +326,8 @@ impl StorageEngine for MockEngine {
let regions = self.regions.lock().unwrap();
Ok(regions.opened_regions.get(name).cloned())
}
+
+ async fn close(&self, _ctx: &EngineContext) -> Result<()> {
+ Ok(())
+ }
}
diff --git a/src/script/src/manager.rs b/src/script/src/manager.rs
index fdb2f2f17791..771fd295f608 100644
--- a/src/script/src/manager.rs
+++ b/src/script/src/manager.rs
@@ -160,7 +160,8 @@ mod tests {
Arc::new(log_store),
object_store.clone(),
compaction_scheduler,
- ),
+ )
+ .unwrap(),
object_store,
));
let engine_manager = Arc::new(MemoryTableEngineManager::new(mock_engine.clone()));
diff --git a/src/storage/src/config.rs b/src/storage/src/config.rs
index bffcc4ffbaff..f1bc18164acc 100644
--- a/src/storage/src/config.rs
+++ b/src/storage/src/config.rs
@@ -18,6 +18,15 @@ use std::time::Duration;
use common_base::readable_size::ReadableSize;
+/// Default max flush tasks.
+pub const DEFAULT_MAX_FLUSH_TASKS: usize = 8;
+/// Default region write buffer size.
+pub const DEFAULT_REGION_WRITE_BUFFER_SIZE: ReadableSize = ReadableSize::mb(32);
+/// Default interval to trigger auto flush in millis.
+pub const DEFAULT_AUTO_FLUSH_INTERVAL: u32 = 60 * 60 * 1000;
+/// Default interval to schedule the picker to flush automatically in millis.
+pub const DEFAULT_PICKER_SCHEDULE_INTERVAL: u32 = 5 * 60 * 1000;
+
#[derive(Debug, Clone)]
pub struct EngineConfig {
pub manifest_checkpoint_on_startup: bool,
@@ -26,6 +35,14 @@ pub struct EngineConfig {
pub max_files_in_l0: usize,
pub max_purge_tasks: usize,
pub sst_write_buffer_size: ReadableSize,
+ /// Max inflight flush tasks.
+ pub max_flush_tasks: usize,
+ /// Default write buffer size for a region.
+ pub region_write_buffer_size: ReadableSize,
+ /// Interval to schedule the auto flush picker.
+ pub picker_schedule_interval: Duration,
+ /// Interval to auto flush a region if it has not flushed yet.
+ pub auto_flush_interval: Duration,
}
impl Default for EngineConfig {
@@ -37,6 +54,12 @@ impl Default for EngineConfig {
max_files_in_l0: 8,
max_purge_tasks: 32,
sst_write_buffer_size: ReadableSize::mb(8),
+ max_flush_tasks: DEFAULT_MAX_FLUSH_TASKS,
+ region_write_buffer_size: DEFAULT_REGION_WRITE_BUFFER_SIZE,
+ picker_schedule_interval: Duration::from_millis(
+ DEFAULT_PICKER_SCHEDULE_INTERVAL.into(),
+ ),
+ auto_flush_interval: Duration::from_millis(DEFAULT_AUTO_FLUSH_INTERVAL.into()),
}
}
}
diff --git a/src/storage/src/engine.rs b/src/storage/src/engine.rs
index c85984500f01..9d9e81beddb4 100644
--- a/src/storage/src/engine.rs
+++ b/src/storage/src/engine.rs
@@ -17,7 +17,7 @@ use std::sync::{Arc, RwLock};
use std::time::Duration;
use async_trait::async_trait;
-use common_telemetry::logging::debug;
+use common_telemetry::logging::{self, debug};
use object_store::{util, ObjectStore};
use snafu::ResultExt;
use store_api::logstore::LogStore;
@@ -30,12 +30,14 @@ use crate::compaction::CompactionSchedulerRef;
use crate::config::EngineConfig;
use crate::error::{self, Error, Result};
use crate::file_purger::{FilePurgeHandler, FilePurgerRef};
-use crate::flush::{FlushScheduler, FlushSchedulerRef, FlushStrategyRef, SizeBasedStrategy};
+use crate::flush::{
+ FlushScheduler, FlushSchedulerRef, FlushStrategyRef, PickerConfig, SizeBasedStrategy,
+};
use crate::manifest::region::RegionManifest;
use crate::memtable::{DefaultMemtableBuilder, MemtableBuilderRef};
use crate::metadata::RegionMetadata;
use crate::region::{RegionImpl, StoreConfig};
-use crate::scheduler::{LocalScheduler, SchedulerConfig};
+use crate::scheduler::{LocalScheduler, Scheduler, SchedulerConfig};
use crate::sst::FsAccessLayer;
/// [StorageEngine] implementation.
@@ -85,6 +87,16 @@ impl<S: LogStore> StorageEngine for EngineImpl<S> {
fn get_region(&self, _ctx: &EngineContext, name: &str) -> Result<Option<Self::Region>> {
Ok(self.inner.get_region(name))
}
+
+ async fn close(&self, _ctx: &EngineContext) -> Result<()> {
+ logging::info!("Stopping storage engine");
+
+ self.inner.close().await?;
+
+ logging::info!("Storage engine stopped");
+
+ Ok(())
+ }
}
impl<S: LogStore> EngineImpl<S> {
@@ -93,15 +105,15 @@ impl<S: LogStore> EngineImpl<S> {
log_store: Arc<S>,
object_store: ObjectStore,
compaction_scheduler: CompactionSchedulerRef<S>,
- ) -> Self {
- Self {
+ ) -> Result<Self> {
+ Ok(Self {
inner: Arc::new(EngineInner::new(
config,
log_store,
object_store,
compaction_scheduler,
- )),
- }
+ )?),
+ })
}
}
@@ -124,7 +136,7 @@ pub fn region_manifest_dir(parent_dir: &str, region_name: &str) -> String {
/// Also used as a placeholder in the region map when the region isn't ready, e.g. during
/// creating/opening.
#[derive(Debug)]
-enum RegionSlot<S: LogStore> {
+pub(crate) enum RegionSlot<S: LogStore> {
/// The region is during creation.
Creating,
/// The region is during opening.
@@ -208,17 +220,21 @@ impl<'a, S: LogStore> Drop for SlotGuard<'a, S> {
}
/// Region slot map.
-struct RegionMap<S: LogStore>(RwLock<HashMap<String, RegionSlot<S>>>);
+pub struct RegionMap<S: LogStore>(RwLock<HashMap<String, RegionSlot<S>>>);
impl<S: LogStore> RegionMap<S> {
/// Returns a new region map.
- fn new() -> RegionMap<S> {
+ pub fn new() -> RegionMap<S> {
RegionMap(RwLock::new(HashMap::new()))
}
/// Returns the `Some(slot)` if there is existing slot with given `name`, or insert
/// given `slot` and returns `None`.
- fn get_or_occupy_slot(&self, name: &str, slot: RegionSlot<S>) -> Option<RegionSlot<S>> {
+ pub(crate) fn get_or_occupy_slot(
+ &self,
+ name: &str,
+ slot: RegionSlot<S>,
+ ) -> Option<RegionSlot<S>> {
{
// Try to get the region under read lock.
let regions = self.0.read().unwrap();
@@ -258,6 +274,26 @@ impl<S: LogStore> RegionMap<S> {
let mut regions = self.0.write().unwrap();
regions.remove(name);
}
+
+ /// Collects regions.
+ pub(crate) fn list_regions(&self) -> Vec<RegionImpl<S>> {
+ let regions = self.0.read().unwrap();
+ regions
+ .values()
+ .filter_map(|slot| slot.get_ready_region())
+ .collect()
+ }
+
+ /// Clear the region map.
+ fn clear(&self) {
+ self.0.write().unwrap().clear();
+ }
+}
+
+impl<S: LogStore> Default for RegionMap<S> {
+ fn default() -> Self {
+ Self::new()
+ }
}
struct EngineInner<S: LogStore> {
@@ -278,12 +314,19 @@ impl<S: LogStore> EngineInner<S> {
log_store: Arc<S>,
object_store: ObjectStore,
compaction_scheduler: CompactionSchedulerRef<S>,
- ) -> Self {
- // TODO(yingwen): max inflight flush tasks.
+ ) -> Result<Self> {
+ let regions = Arc::new(RegionMap::new());
let flush_scheduler = Arc::new(FlushScheduler::new(
- SchedulerConfig::default(),
+ SchedulerConfig {
+ max_inflight_tasks: config.max_flush_tasks,
+ },
compaction_scheduler.clone(),
- ));
+ regions.clone(),
+ PickerConfig {
+ schedule_interval: config.picker_schedule_interval,
+ auto_flush_interval: config.auto_flush_interval,
+ },
+ )?);
let file_purger = Arc::new(LocalScheduler::new(
SchedulerConfig {
@@ -291,17 +334,19 @@ impl<S: LogStore> EngineInner<S> {
},
FilePurgeHandler,
));
- Self {
+ Ok(Self {
object_store,
log_store,
- regions: Arc::new(RegionMap::new()),
+ regions,
memtable_builder: Arc::new(DefaultMemtableBuilder::default()),
flush_scheduler,
- flush_strategy: Arc::new(SizeBasedStrategy::default()),
+ flush_strategy: Arc::new(SizeBasedStrategy::new(
+ config.region_write_buffer_size.as_bytes() as usize,
+ )),
compaction_scheduler,
file_purger,
config: Arc::new(config),
- }
+ })
}
async fn open_region(&self, name: &str, opts: &OpenOptions) -> Result<Option<RegionImpl<S>>> {
@@ -427,6 +472,22 @@ impl<S: LogStore> EngineInner<S> {
compaction_time_window,
})
}
+
+ async fn close(&self) -> Result<()> {
+ let regions = self.regions.list_regions();
+ for region in regions {
+ // Tolerate failure during closing regions.
+ if let Err(e) = region.close().await {
+ logging::error!(e; "Failed to close region {}", region.id());
+ }
+ }
+ // Clear regions to release references to regions in the region map.
+ self.regions.clear();
+
+ self.compaction_scheduler.stop(true).await?;
+ self.flush_scheduler.stop().await?;
+ self.file_purger.stop(true).await
+ }
}
#[cfg(test)]
@@ -462,7 +523,8 @@ mod tests {
Arc::new(log_store),
object_store,
compaction_scheduler,
- );
+ )
+ .unwrap();
let region_name = "region-0";
let desc = RegionDescBuilder::new(region_name)
diff --git a/src/storage/src/error.rs b/src/storage/src/error.rs
index 692b6bf3466c..786de859bb6f 100644
--- a/src/storage/src/error.rs
+++ b/src/storage/src/error.rs
@@ -487,6 +487,18 @@ pub enum Error {
sequence: SequenceNumber,
location: Location,
},
+
+ #[snafu(display("Failed to start picking task for flush: {}", source))]
+ StartPickTask {
+ #[snafu(backtrace)]
+ source: RuntimeError,
+ },
+
+ #[snafu(display("Failed to stop picking task for flush: {}", source))]
+ StopPickTask {
+ #[snafu(backtrace)]
+ source: RuntimeError,
+ },
}
pub type Result<T> = std::result::Result<T, Error>;
@@ -579,7 +591,9 @@ impl ErrorExt for Error {
StartManifestGcTask { .. }
| StopManifestGcTask { .. }
| IllegalSchedulerState { .. }
- | DuplicateFlush { .. } => StatusCode::Unexpected,
+ | DuplicateFlush { .. }
+ | StartPickTask { .. }
+ | StopPickTask { .. } => StatusCode::Unexpected,
TtlCalculation { source, .. } => source.status_code(),
}
diff --git a/src/storage/src/flush.rs b/src/storage/src/flush.rs
index 2a212fe430a9..20feecdcaac9 100644
--- a/src/storage/src/flush.rs
+++ b/src/storage/src/flush.rs
@@ -12,17 +12,19 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+mod picker;
mod scheduler;
use std::sync::Arc;
use common_telemetry::{logging, timer};
+pub use picker::{FlushPicker, PickerConfig};
pub use scheduler::{FlushHandle, FlushRequest, FlushScheduler, FlushSchedulerRef};
use store_api::logstore::LogStore;
use store_api::storage::consts::WRITE_ROW_GROUP_SIZE;
use store_api::storage::SequenceNumber;
-use crate::config::EngineConfig;
+use crate::config::{EngineConfig, DEFAULT_REGION_WRITE_BUFFER_SIZE};
use crate::error::Result;
use crate::manifest::action::*;
use crate::manifest::region::RegionManifest;
@@ -32,9 +34,6 @@ use crate::region::{RegionWriterRef, SharedDataRef};
use crate::sst::{AccessLayerRef, FileId, FileMeta, Source, SstInfo, WriteOptions};
use crate::wal::Wal;
-/// Default write buffer size (32M).
-const DEFAULT_WRITE_BUFFER_SIZE: usize = 32 * 1024 * 1024;
-
pub trait FlushStrategy: Send + Sync + std::fmt::Debug {
fn should_flush(
&self,
@@ -72,7 +71,7 @@ fn get_mutable_limitation(max_write_buffer_size: usize) -> usize {
impl Default for SizeBasedStrategy {
fn default() -> Self {
- let max_write_buffer_size = DEFAULT_WRITE_BUFFER_SIZE;
+ let max_write_buffer_size = DEFAULT_REGION_WRITE_BUFFER_SIZE.as_bytes() as usize;
Self {
max_write_buffer_size,
mutable_limitation: get_mutable_limitation(max_write_buffer_size),
diff --git a/src/storage/src/flush/picker.rs b/src/storage/src/flush/picker.rs
new file mode 100644
index 000000000000..7c0726f403e6
--- /dev/null
+++ b/src/storage/src/flush/picker.rs
@@ -0,0 +1,201 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::time::Duration;
+
+use async_trait::async_trait;
+use common_telemetry::logging;
+use common_time::util;
+use store_api::logstore::LogStore;
+use store_api::storage::{FlushContext, FlushReason, Region};
+
+use crate::config::{DEFAULT_AUTO_FLUSH_INTERVAL, DEFAULT_PICKER_SCHEDULE_INTERVAL};
+use crate::region::RegionImpl;
+
+/// Config for [FlushPicker].
+pub struct PickerConfig {
+ /// Interval to schedule the picker.
+ pub schedule_interval: Duration,
+ /// Interval to auto flush a region if it has not flushed yet.
+ pub auto_flush_interval: Duration,
+}
+
+impl PickerConfig {
+ /// Returns the auto flush interval in millis or a default value
+ /// if overflow occurs.
+ fn auto_flush_interval_millis(&self) -> i64 {
+ self.auto_flush_interval
+ .as_millis()
+ .try_into()
+ .unwrap_or(DEFAULT_AUTO_FLUSH_INTERVAL.into())
+ }
+}
+
+impl Default for PickerConfig {
+ fn default() -> Self {
+ PickerConfig {
+ schedule_interval: Duration::from_millis(DEFAULT_PICKER_SCHEDULE_INTERVAL.into()),
+ auto_flush_interval: Duration::from_millis(DEFAULT_AUTO_FLUSH_INTERVAL.into()),
+ }
+ }
+}
+
+/// Flush task picker.
+pub struct FlushPicker {
+ /// Interval to flush a region automatically.
+ auto_flush_interval_millis: i64,
+}
+
+impl FlushPicker {
+ /// Returns a new FlushPicker.
+ pub fn new(config: PickerConfig) -> FlushPicker {
+ FlushPicker {
+ auto_flush_interval_millis: config.auto_flush_interval_millis(),
+ }
+ }
+
+ /// Pick regions and flush them by interval.
+ ///
+ /// Returns the number of flushed regions.
+ pub async fn pick_by_interval<T: FlushItem>(&self, regions: &[T]) -> usize {
+ let now = util::current_time_millis();
+ // Flush regions by interval.
+ if let Some(earliest_flush_millis) = now.checked_sub(self.auto_flush_interval_millis) {
+ flush_regions_by_interval(regions, earliest_flush_millis).await
+ } else {
+ 0
+ }
+ }
+}
+
+/// Item for picker to flush.
+#[async_trait]
+pub trait FlushItem {
+ /// Id of the item.
+ fn item_id(&self) -> u64;
+
+ /// Last flush time in millis.
+ fn last_flush_time(&self) -> i64;
+
+ /// Requests the item to schedule a flush for specific `reason`.
+ ///
+ /// The flush job itself should run in background.
+ async fn request_flush(&self, reason: FlushReason);
+}
+
+#[async_trait]
+impl<S: LogStore> FlushItem for RegionImpl<S> {
+ fn item_id(&self) -> u64 {
+ self.id()
+ }
+
+ fn last_flush_time(&self) -> i64 {
+ self.last_flush_millis()
+ }
+
+ async fn request_flush(&self, reason: FlushReason) {
+ let ctx = FlushContext {
+ wait: false,
+ reason,
+ };
+ if let Err(e) = self.flush(&ctx).await {
+ logging::error!(e; "Failed to flush region {}", self.id());
+ }
+ }
+}
+
+/// Auto flush regions based on last flush time.
+///
+/// Returns the number of flushed regions.
+async fn flush_regions_by_interval<T: FlushItem>(
+ regions: &[T],
+ earliest_flush_millis: i64,
+) -> usize {
+ let mut flushed = 0;
+ for region in regions {
+ if region.last_flush_time() < earliest_flush_millis {
+ logging::debug!(
+ "Auto flush region {} due to last flush time ({} < {})",
+ region.item_id(),
+ region.last_flush_time(),
+ earliest_flush_millis,
+ );
+
+ flushed += 1;
+ region.request_flush(FlushReason::Periodically).await;
+ }
+ }
+
+ flushed
+}
+
+#[cfg(test)]
+mod tests {
+ use std::sync::Mutex;
+
+ use super::*;
+
+ struct MockItem {
+ id: u64,
+ last_flush_time: i64,
+ flush_reason: Mutex<Option<FlushReason>>,
+ }
+
+ impl MockItem {
+ fn new(id: u64, last_flush_time: i64) -> MockItem {
+ MockItem {
+ id,
+ last_flush_time,
+ flush_reason: Mutex::new(None),
+ }
+ }
+
+ fn flush_reason(&self) -> Option<FlushReason> {
+ *self.flush_reason.lock().unwrap()
+ }
+ }
+
+ #[async_trait]
+ impl FlushItem for MockItem {
+ fn item_id(&self) -> u64 {
+ self.id
+ }
+
+ fn last_flush_time(&self) -> i64 {
+ self.last_flush_time
+ }
+
+ async fn request_flush(&self, reason: FlushReason) {
+ let mut flush_reason = self.flush_reason.lock().unwrap();
+ *flush_reason = Some(reason);
+ }
+ }
+
+ #[tokio::test]
+ async fn test_pick_by_interval() {
+ let regions = [
+ MockItem::new(0, util::current_time_millis()),
+ MockItem::new(1, util::current_time_millis() - 60 * 1000),
+ ];
+ let picker = FlushPicker::new(PickerConfig {
+ // schedule_interval is unused in this test.
+ schedule_interval: Duration::from_millis(10),
+ auto_flush_interval: Duration::from_millis(30 * 1000),
+ });
+ let flushed = picker.pick_by_interval(®ions).await;
+ assert_eq!(1, flushed);
+ assert!(regions[0].flush_reason().is_none());
+ assert_eq!(Some(FlushReason::Periodically), regions[1].flush_reason());
+ }
+}
diff --git a/src/storage/src/flush/scheduler.rs b/src/storage/src/flush/scheduler.rs
index 9eadd633339d..cfe08cc4e261 100644
--- a/src/storage/src/flush/scheduler.rs
+++ b/src/storage/src/flush/scheduler.rs
@@ -15,6 +15,8 @@
use std::sync::Arc;
use std::time::Duration;
+use async_trait::async_trait;
+use common_runtime::{RepeatedTask, TaskFunction};
use common_telemetry::logging;
use metrics::increment_counter;
use snafu::{ensure, ResultExt};
@@ -25,11 +27,14 @@ use tokio::sync::{oneshot, Notify};
use crate::compaction::{CompactionRequestImpl, CompactionSchedulerRef};
use crate::config::EngineConfig;
-use crate::error::{DuplicateFlushSnafu, Result, WaitFlushSnafu};
-use crate::flush::FlushJob;
+use crate::engine::RegionMap;
+use crate::error::{
+ DuplicateFlushSnafu, Error, Result, StartPickTaskSnafu, StopPickTaskSnafu, WaitFlushSnafu,
+};
+use crate::flush::{FlushJob, FlushPicker, PickerConfig};
use crate::manifest::region::RegionManifest;
use crate::memtable::{MemtableId, MemtableRef};
-use crate::metrics::{FLUSH_ERRORS_TOTAL, FLUSH_REQUESTS_TOTAL};
+use crate::metrics::FLUSH_ERRORS_TOTAL;
use crate::region;
use crate::region::{RegionWriterRef, SharedDataRef};
use crate::scheduler::rate_limit::BoxedRateLimitToken;
@@ -145,20 +150,37 @@ impl FlushHandle {
/// Flush scheduler.
pub struct FlushScheduler<S: LogStore> {
+ /// Flush task scheduler.
scheduler: LocalScheduler<FlushRequest<S>>,
+ /// Auto flush task.
+ auto_flush_task: RepeatedTask<Error>,
}
pub type FlushSchedulerRef<S> = Arc<FlushScheduler<S>>;
impl<S: LogStore> FlushScheduler<S> {
/// Returns a new [FlushScheduler].
- pub fn new(config: SchedulerConfig, compaction_scheduler: CompactionSchedulerRef<S>) -> Self {
+ pub fn new(
+ config: SchedulerConfig,
+ compaction_scheduler: CompactionSchedulerRef<S>,
+ regions: Arc<RegionMap<S>>,
+ picker_config: PickerConfig,
+ ) -> Result<Self> {
let handler = FlushHandler {
compaction_scheduler,
};
- Self {
+ let task_interval = picker_config.schedule_interval;
+ let picker = FlushPicker::new(picker_config);
+ let task_fn = AutoFlushFunction { regions, picker };
+ let auto_flush_task = RepeatedTask::new(task_interval, Box::new(task_fn));
+ auto_flush_task
+ .start(common_runtime::bg_runtime())
+ .context(StartPickTaskSnafu)?;
+
+ Ok(Self {
scheduler: LocalScheduler::new(config, handler),
- }
+ auto_flush_task,
+ })
}
/// Schedules a flush request and return the handle to the flush task.
@@ -183,13 +205,22 @@ impl<S: LogStore> FlushScheduler<S> {
}
);
- increment_counter!(FLUSH_REQUESTS_TOTAL);
-
Ok(FlushHandle {
region_id,
receiver,
})
}
+
+ /// Stop the scheduler.
+ pub async fn stop(&self) -> Result<()> {
+ self.auto_flush_task
+ .stop()
+ .await
+ .context(StopPickTaskSnafu)?;
+ self.scheduler.stop(true).await?;
+
+ Ok(())
+ }
}
struct FlushHandler<S: LogStore> {
@@ -235,6 +266,9 @@ async fn execute_flush<S: LogStore>(
} else {
logging::debug!("Successfully flush region: {}", req.region_id());
+ // Update last flush time.
+ req.shared.update_flush_millis();
+
let compaction_request = CompactionRequestImpl::from(&req);
let max_files_in_l0 = req.engine_config.max_files_in_l0;
let shared_data = req.shared.clone();
@@ -247,6 +281,29 @@ async fn execute_flush<S: LogStore>(
max_files_in_l0,
);
+ // Complete the request.
req.complete(Ok(()));
}
}
+
+/// Task function to pick regions to flush.
+struct AutoFlushFunction<S: LogStore> {
+ /// Regions of the engine.
+ regions: Arc<RegionMap<S>>,
+ picker: FlushPicker,
+}
+
+#[async_trait]
+impl<S: LogStore> TaskFunction<Error> for AutoFlushFunction<S> {
+ async fn call(&mut self) -> Result<()> {
+ // Get all regions.
+ let regions = self.regions.list_regions();
+ self.picker.pick_by_interval(®ions).await;
+
+ Ok(())
+ }
+
+ fn name(&self) -> &str {
+ "FlushPicker-pick-task"
+ }
+}
diff --git a/src/storage/src/metrics.rs b/src/storage/src/metrics.rs
index 640c6b5914ce..9a3926582336 100644
--- a/src/storage/src/metrics.rs
+++ b/src/storage/src/metrics.rs
@@ -22,6 +22,8 @@ pub const FLUSH_REQUESTS_TOTAL: &str = "storage.flush.requests_total";
pub const FLUSH_ERRORS_TOTAL: &str = "storage.flush.errors_total";
/// Elapsed time of a flush job.
pub const FLUSH_ELAPSED: &str = "storage.flush.elapsed";
+/// Reason to flush.
+pub const FLUSH_REASON: &str = "reason";
/// Gauge for open regions
pub const REGION_COUNT: &str = "storage.region_count";
/// Timer for logstore write
diff --git a/src/storage/src/region.rs b/src/storage/src/region.rs
index 69ba0dfd8fa9..461ec3a0c67a 100644
--- a/src/storage/src/region.rs
+++ b/src/storage/src/region.rs
@@ -18,11 +18,13 @@ mod writer;
use std::collections::BTreeMap;
use std::fmt;
+use std::sync::atomic::{AtomicI64, Ordering};
use std::sync::Arc;
use std::time::Duration;
use async_trait::async_trait;
use common_telemetry::logging;
+use common_time::util;
use metrics::{decrement_gauge, increment_gauge};
use snafu::ResultExt;
use store_api::logstore::LogStore;
@@ -235,6 +237,7 @@ impl<S: LogStore> RegionImpl<S> {
id,
name,
version_control: Arc::new(version_control),
+ last_flush_millis: AtomicI64::new(0),
}),
writer: Arc::new(RegionWriter::new(
store_config.memtable_builder,
@@ -315,6 +318,7 @@ impl<S: LogStore> RegionImpl<S> {
id: metadata.id(),
name,
version_control,
+ last_flush_millis: AtomicI64::new(0),
});
let compaction_time_window = store_config
.compaction_time_window
@@ -366,6 +370,11 @@ impl<S: LogStore> RegionImpl<S> {
self.inner.shared.id()
}
+ /// Returns last flush timestamp in millis.
+ pub fn last_flush_millis(&self) -> i64 {
+ self.inner.shared.last_flush_millis()
+ }
+
fn create_version_with_checkpoint(
checkpoint: RegionCheckpoint,
memtable_builder: &MemtableBuilderRef,
@@ -558,6 +567,9 @@ pub struct SharedData {
name: String,
// TODO(yingwen): Maybe no need to use Arc for version control.
pub version_control: VersionControlRef,
+
+ /// Last flush time in millis.
+ last_flush_millis: AtomicI64,
}
impl SharedData {
@@ -570,6 +582,17 @@ impl SharedData {
pub fn name(&self) -> &str {
&self.name
}
+
+ /// Update flush time to current time.
+ pub(crate) fn update_flush_millis(&self) {
+ let now = util::current_time_millis();
+ self.last_flush_millis.store(now, Ordering::Relaxed);
+ }
+
+ /// Returns last flush timestamp in millis.
+ fn last_flush_millis(&self) -> i64 {
+ self.last_flush_millis.load(Ordering::Relaxed)
+ }
}
pub type SharedDataRef = Arc<SharedData>;
diff --git a/src/storage/src/region/tests/alter.rs b/src/storage/src/region/tests/alter.rs
index 90c483bbf125..cfaab4e9e62d 100644
--- a/src/storage/src/region/tests/alter.rs
+++ b/src/storage/src/region/tests/alter.rs
@@ -22,8 +22,8 @@ use datatypes::vectors::{Int64Vector, TimestampMillisecondVector, VectorRef};
use log_store::raft_engine::log_store::RaftEngineLogStore;
use store_api::storage::{
AddColumn, AlterOperation, AlterRequest, Chunk, ChunkReader, ColumnDescriptor,
- ColumnDescriptorBuilder, ColumnId, FlushContext, Region, RegionMeta, ScanRequest, SchemaRef,
- Snapshot, WriteRequest, WriteResponse,
+ ColumnDescriptorBuilder, ColumnId, FlushContext, FlushReason, Region, RegionMeta, ScanRequest,
+ SchemaRef, Snapshot, WriteRequest, WriteResponse,
};
use crate::region::tests::{self, FileTesterBase};
@@ -118,7 +118,12 @@ impl AlterTester {
}
async fn flush(&self, wait: Option<bool>) {
- let ctx = wait.map(|wait| FlushContext { wait }).unwrap_or_default();
+ let ctx = wait
+ .map(|wait| FlushContext {
+ wait,
+ reason: FlushReason::Manually,
+ })
+ .unwrap_or_default();
self.base().region.flush(&ctx).await.unwrap();
}
diff --git a/src/storage/src/region/tests/compact.rs b/src/storage/src/region/tests/compact.rs
index a283b3d32c44..a7416f7aa66c 100644
--- a/src/storage/src/region/tests/compact.rs
+++ b/src/storage/src/region/tests/compact.rs
@@ -23,7 +23,7 @@ use common_test_util::temp_dir::create_temp_dir;
use log_store::raft_engine::log_store::RaftEngineLogStore;
use object_store::services::{Fs, S3};
use object_store::ObjectStore;
-use store_api::storage::{FlushContext, Region, WriteResponse};
+use store_api::storage::{FlushContext, FlushReason, Region, WriteResponse};
use tokio::sync::Notify;
use crate::compaction::{CompactionHandler, SimplePicker};
@@ -191,7 +191,12 @@ impl CompactionTester {
}
async fn flush(&self, wait: Option<bool>) {
- let ctx = wait.map(|wait| FlushContext { wait }).unwrap_or_default();
+ let ctx = wait
+ .map(|wait| FlushContext {
+ wait,
+ reason: FlushReason::Manually,
+ })
+ .unwrap_or_default();
self.base().region.flush(&ctx).await.unwrap();
}
diff --git a/src/storage/src/region/tests/flush.rs b/src/storage/src/region/tests/flush.rs
index 75141e361f4b..4033b3d5385a 100644
--- a/src/storage/src/region/tests/flush.rs
+++ b/src/storage/src/region/tests/flush.rs
@@ -18,7 +18,7 @@ use std::sync::Arc;
use common_test_util::temp_dir::create_temp_dir;
use log_store::raft_engine::log_store::RaftEngineLogStore;
-use store_api::storage::{FlushContext, OpenOptions, Region, WriteResponse};
+use store_api::storage::{FlushContext, FlushReason, OpenOptions, Region, WriteResponse};
use crate::engine;
use crate::flush::FlushStrategyRef;
@@ -91,7 +91,12 @@ impl FlushTester {
}
async fn flush(&self, wait: Option<bool>) {
- let ctx = wait.map(|wait| FlushContext { wait }).unwrap_or_default();
+ let ctx = wait
+ .map(|wait| FlushContext {
+ wait,
+ reason: FlushReason::Manually,
+ })
+ .unwrap_or_default();
self.base().region.flush(&ctx).await.unwrap();
}
}
diff --git a/src/storage/src/region/writer.rs b/src/storage/src/region/writer.rs
index 740c11305a50..991a11a571de 100644
--- a/src/storage/src/region/writer.rs
+++ b/src/storage/src/region/writer.rs
@@ -18,10 +18,13 @@ use std::time::Duration;
use common_base::readable_size::ReadableSize;
use common_telemetry::logging;
use futures::TryStreamExt;
+use metrics::increment_counter;
use snafu::{ensure, ResultExt};
use store_api::logstore::LogStore;
use store_api::manifest::{Manifest, ManifestVersion, MetaAction};
-use store_api::storage::{AlterRequest, FlushContext, SequenceNumber, WriteContext, WriteResponse};
+use store_api::storage::{
+ AlterRequest, FlushContext, FlushReason, SequenceNumber, WriteContext, WriteResponse,
+};
use tokio::sync::{oneshot, Mutex};
use crate::compaction::{CompactionRequestImpl, CompactionSchedulerRef};
@@ -33,6 +36,7 @@ use crate::manifest::action::{
};
use crate::memtable::{Inserter, MemtableBuilderRef, MemtableId, MemtableRef};
use crate::metadata::RegionMetadataRef;
+use crate::metrics::{FLUSH_REASON, FLUSH_REQUESTS_TOTAL};
use crate::proto::wal::WalHeader;
use crate::region::{
CompactContext, RecoverdMetadata, RecoveredMetadataMap, RegionManifest, SharedDataRef,
@@ -281,7 +285,7 @@ impl RegionWriter {
ensure!(!inner.is_closed(), error::ClosedRegionSnafu);
- inner.manual_flush(writer_ctx).await?;
+ inner.manual_flush(writer_ctx, ctx.reason).await?;
if ctx.wait {
if let Some(handle) = inner.flush_handle.take() {
@@ -587,7 +591,8 @@ impl WriterInner {
version_control,
writer_ctx.flush_strategy,
) {
- self.trigger_flush(writer_ctx).await?;
+ self.trigger_flush(writer_ctx, FlushReason::MemtableFull)
+ .await?;
}
Ok(())
@@ -612,12 +617,18 @@ impl WriterInner {
flush_strategy.should_flush(shared, mutable_bytes_allocated, total_bytes_allocated)
}
- async fn trigger_flush<S: LogStore>(&mut self, ctx: &WriterContext<'_, S>) -> Result<()> {
+ async fn trigger_flush<S: LogStore>(
+ &mut self,
+ ctx: &WriterContext<'_, S>,
+ reason: FlushReason,
+ ) -> Result<()> {
let version_control = &ctx.shared.version_control;
let new_mutable = self.alloc_memtable(version_control);
// Freeze all mutable memtables so we can flush them later.
version_control.freeze_mutable(new_mutable);
+ increment_counter!(FLUSH_REQUESTS_TOTAL, FLUSH_REASON => reason.as_str());
+
if let Some(flush_handle) = self.flush_handle.take() {
// Previous flush job is incomplete, wait util it is finished.
// However the last flush job may fail, in which case, we just return error
@@ -719,8 +730,12 @@ impl WriterInner {
Ok(())
}
- async fn manual_flush<S: LogStore>(&mut self, writer_ctx: WriterContext<'_, S>) -> Result<()> {
- self.trigger_flush(&writer_ctx).await?;
+ async fn manual_flush<S: LogStore>(
+ &mut self,
+ writer_ctx: WriterContext<'_, S>,
+ reason: FlushReason,
+ ) -> Result<()> {
+ self.trigger_flush(&writer_ctx, reason).await?;
Ok(())
}
diff --git a/src/storage/src/scheduler.rs b/src/storage/src/scheduler.rs
index 33249fbe28f1..2c8d1c162ad3 100644
--- a/src/storage/src/scheduler.rs
+++ b/src/storage/src/scheduler.rs
@@ -124,6 +124,9 @@ where
self.state.store(STATE_STOP, Ordering::Relaxed);
self.cancel_token.cancel();
+
+ // Clear all requests
+ self.request_queue.write().unwrap().clear();
}
}
diff --git a/src/storage/src/scheduler/dedup_deque.rs b/src/storage/src/scheduler/dedup_deque.rs
index e04c16dcc569..91cef075aeba 100644
--- a/src/storage/src/scheduler/dedup_deque.rs
+++ b/src/storage/src/scheduler/dedup_deque.rs
@@ -76,6 +76,12 @@ impl<K: Eq + Hash + Clone, V> DedupDeque<K, V> {
pub fn is_empty(&self) -> bool {
self.deque.is_empty()
}
+
+ #[inline]
+ pub fn clear(&mut self) {
+ self.deque.clear();
+ self.existing.clear();
+ }
}
impl<K, V> Debug for DedupDeque<K, V>
@@ -111,5 +117,8 @@ mod tests {
assert!(deque.push_back(1, "hello".to_string()));
assert!(!deque.push_back(1, "world".to_string()));
assert_eq!((1, "hello".to_string()), deque.pop_front().unwrap());
+
+ deque.clear();
+ assert!(deque.is_empty());
}
}
diff --git a/src/storage/src/test_util/config_util.rs b/src/storage/src/test_util/config_util.rs
index b969fe460859..ae418b56ee76 100644
--- a/src/storage/src/test_util/config_util.rs
+++ b/src/storage/src/test_util/config_util.rs
@@ -21,9 +21,9 @@ use object_store::ObjectStore;
use store_api::manifest::Manifest;
use crate::compaction::noop::NoopCompactionScheduler;
-use crate::engine;
+use crate::engine::{self, RegionMap};
use crate::file_purger::noop::NoopFilePurgeHandler;
-use crate::flush::{FlushScheduler, SizeBasedStrategy};
+use crate::flush::{FlushScheduler, PickerConfig, SizeBasedStrategy};
use crate::manifest::region::RegionManifest;
use crate::memtable::DefaultMemtableBuilder;
use crate::region::StoreConfig;
@@ -65,10 +65,16 @@ pub async fn new_store_config_with_object_store(
};
let log_store = Arc::new(RaftEngineLogStore::try_new(log_config).await.unwrap());
let compaction_scheduler = Arc::new(NoopCompactionScheduler::default());
- let flush_scheduler = Arc::new(FlushScheduler::new(
- SchedulerConfig::default(),
- compaction_scheduler.clone(),
- ));
+ // We use an empty region map so actually the background worker of the picker is disabled.
+ let flush_scheduler = Arc::new(
+ FlushScheduler::new(
+ SchedulerConfig::default(),
+ compaction_scheduler.clone(),
+ Arc::new(RegionMap::new()),
+ PickerConfig::default(),
+ )
+ .unwrap(),
+ );
let file_purger = Arc::new(LocalScheduler::new(
SchedulerConfig::default(),
NoopFilePurgeHandler,
diff --git a/src/storage/src/test_util/flush_switch.rs b/src/storage/src/test_util/flush_switch.rs
index 6d1b7df9cce8..3b5b2e10c1e4 100644
--- a/src/storage/src/test_util/flush_switch.rs
+++ b/src/storage/src/test_util/flush_switch.rs
@@ -17,6 +17,8 @@ use std::sync::atomic::{AtomicBool, Ordering};
use crate::flush::FlushStrategy;
use crate::region::SharedDataRef;
+/// Controls whether to flush a region while writing the region.
+/// Disable flush by default.
#[derive(Debug, Default)]
pub struct FlushSwitch {
should_flush: AtomicBool,
diff --git a/src/store-api/src/storage.rs b/src/store-api/src/storage.rs
index aba97a3e3b7f..d8978db21f48 100644
--- a/src/store-api/src/storage.rs
+++ b/src/store-api/src/storage.rs
@@ -34,7 +34,7 @@ pub use self::chunk::{Chunk, ChunkReader};
pub use self::descriptors::*;
pub use self::engine::{CreateOptions, EngineContext, OpenOptions, StorageEngine};
pub use self::metadata::RegionMeta;
-pub use self::region::{FlushContext, Region, RegionStat, WriteContext};
+pub use self::region::{FlushContext, FlushReason, Region, RegionStat, WriteContext};
pub use self::requests::{
AddColumn, AlterOperation, AlterRequest, GetRequest, ScanRequest, WriteRequest,
};
diff --git a/src/store-api/src/storage/engine.rs b/src/store-api/src/storage/engine.rs
index 78bc5fc6310c..fb055fa5b548 100644
--- a/src/store-api/src/storage/engine.rs
+++ b/src/store-api/src/storage/engine.rs
@@ -73,6 +73,9 @@ pub trait StorageEngine: Send + Sync + Clone + 'static {
ctx: &EngineContext,
name: &str,
) -> Result<Option<Self::Region>, Self::Error>;
+
+ /// Close the engine.
+ async fn close(&self, ctx: &EngineContext) -> Result<(), Self::Error>;
}
/// Storage engine context.
diff --git a/src/store-api/src/storage/region.rs b/src/store-api/src/storage/region.rs
index de3904bd2e9e..648b15a8769c 100644
--- a/src/store-api/src/storage/region.rs
+++ b/src/store-api/src/storage/region.rs
@@ -110,10 +110,40 @@ pub struct FlushContext {
/// If true, the flush will wait until the flush is done.
/// Default: true
pub wait: bool,
+ /// Flush reason.
+ pub reason: FlushReason,
}
impl Default for FlushContext {
fn default() -> FlushContext {
- FlushContext { wait: true }
+ FlushContext {
+ wait: true,
+ reason: FlushReason::Others,
+ }
+ }
+}
+
+/// Reason of flush operation.
+#[derive(Debug, Clone, Copy, PartialEq, Eq)]
+pub enum FlushReason {
+ /// Other reasons.
+ Others,
+ /// Memtable is full.
+ MemtableFull,
+ /// Flush manually.
+ Manually,
+ /// Auto flush periodically.
+ Periodically,
+}
+
+impl FlushReason {
+ /// Returns reason as `str`.
+ pub fn as_str(&self) -> &'static str {
+ match self {
+ FlushReason::Others => "others",
+ FlushReason::MemtableFull => "memtable_full",
+ FlushReason::Manually => "manually",
+ FlushReason::Periodically => "periodically",
+ }
}
}
diff --git a/src/table-procedure/src/test_util.rs b/src/table-procedure/src/test_util.rs
index 59615a1a7818..63aee29281eb 100644
--- a/src/table-procedure/src/test_util.rs
+++ b/src/table-procedure/src/test_util.rs
@@ -57,7 +57,8 @@ impl TestEnv {
Arc::new(NoopLogStore::default()),
object_store.clone(),
compaction_scheduler,
- );
+ )
+ .unwrap();
let table_engine = Arc::new(MitoEngine::new(
EngineConfig::default(),
storage_engine,
|
feat
|
Add FlushPicker to flush regions periodically (#1559)
|
715e1a321fc21ae546042dfd65d6b211cce26752
|
2023-05-17 09:26:22
|
Huaijin
|
feat: implement /api/v1/labels for prometheus (#1580)
| false
|
diff --git a/src/common/time/src/util.rs b/src/common/time/src/util.rs
index f56591418275..f2d1feeee15f 100644
--- a/src/common/time/src/util.rs
+++ b/src/common/time/src/util.rs
@@ -22,6 +22,13 @@ pub fn current_time_rfc3339() -> String {
chrono::Utc::now().to_rfc3339()
}
+/// Returns the yesterday time in rfc3339 format.
+pub fn yesterday_rfc3339() -> String {
+ let now = chrono::Utc::now();
+ let day_before = now - chrono::Duration::days(1);
+ day_before.to_rfc3339()
+}
+
/// Port of rust unstable features `int_roundings`.
pub(crate) fn div_ceil(this: i64, rhs: i64) -> i64 {
let d = this / rhs;
diff --git a/src/query/src/parser.rs b/src/query/src/parser.rs
index 49971771e80a..05eefa14da87 100644
--- a/src/query/src/parser.rs
+++ b/src/query/src/parser.rs
@@ -37,6 +37,7 @@ use crate::error::{
use crate::metrics::{METRIC_PARSE_PROMQL_ELAPSED, METRIC_PARSE_SQL_ELAPSED};
const DEFAULT_LOOKBACK: u64 = 5 * 60; // 5m
+pub const DEFAULT_LOOKBACK_STRING: &str = "5m";
pub const EXPLAIN_NODE_NAME: &str = "EXPLAIN";
pub const ANALYZE_NODE_NAME: &str = "ANALYZE";
diff --git a/src/servers/src/error.rs b/src/servers/src/error.rs
index 101ac1f44b46..dccebf12f9ba 100644
--- a/src/servers/src/error.rs
+++ b/src/servers/src/error.rs
@@ -281,6 +281,9 @@ pub enum Error {
#[snafu(backtrace)]
source: query::error::Error,
},
+
+ #[snafu(display("{}", reason))]
+ UnexpectedResult { reason: String, location: Location },
}
pub type Result<T> = std::result::Result<T, Error>;
@@ -348,6 +351,8 @@ impl ErrorExt for Error {
InvalidFlushArgument { .. } => StatusCode::InvalidArguments,
ParsePromQL { source, .. } => source.status_code(),
+
+ UnexpectedResult { .. } => StatusCode::Unexpected,
}
}
diff --git a/src/servers/src/prom.rs b/src/servers/src/prom.rs
index eaee673de64f..14256374ab72 100644
--- a/src/servers/src/prom.rs
+++ b/src/servers/src/prom.rs
@@ -13,7 +13,7 @@
// limitations under the License.
//! prom supply the prometheus HTTP API Server compliance
-use std::collections::{BTreeMap, HashMap};
+use std::collections::{BTreeMap, HashMap, HashSet};
use std::net::SocketAddr;
use std::sync::Arc;
@@ -27,7 +27,7 @@ use common_error::status_code::StatusCode;
use common_query::Output;
use common_recordbatch::RecordBatches;
use common_telemetry::info;
-use common_time::util::current_time_rfc3339;
+use common_time::util::{current_time_rfc3339, yesterday_rfc3339};
use datatypes::prelude::ConcreteDataType;
use datatypes::scalars::ScalarVector;
use datatypes::vectors::{Float64Vector, StringVector, TimestampMillisecondVector};
@@ -37,11 +37,12 @@ use promql_parser::parser::{
AggregateExpr, BinaryExpr, Call, Expr as PromqlExpr, MatrixSelector, ParenExpr, SubqueryExpr,
UnaryExpr, ValueType, VectorSelector,
};
-use query::parser::PromQuery;
+use query::parser::{PromQuery, DEFAULT_LOOKBACK_STRING};
use schemars::JsonSchema;
+use serde::de::{self, MapAccess, Visitor};
use serde::{Deserialize, Serialize};
use session::context::{QueryContext, QueryContextRef};
-use snafu::{ensure, OptionExt, ResultExt};
+use snafu::{ensure, Location, OptionExt, ResultExt};
use tokio::sync::oneshot::Sender;
use tokio::sync::{oneshot, Mutex};
use tower::ServiceBuilder;
@@ -51,10 +52,11 @@ use tower_http::trace::TraceLayer;
use crate::auth::UserProviderRef;
use crate::error::{
- AlreadyStartedSnafu, CollectRecordbatchSnafu, InternalSnafu, NotSupportedSnafu, Result,
+ AlreadyStartedSnafu, CollectRecordbatchSnafu, Error, InternalSnafu, NotSupportedSnafu, Result,
StartHttpSnafu,
};
use crate::http::authorize::HttpAuth;
+use crate::prometheus::{FIELD_COLUMN_NAME, TIMESTAMP_COLUMN_NAME};
use crate::server::Server;
pub const PROM_API_VERSION: &str = "v1";
@@ -88,11 +90,12 @@ impl PromServer {
}
pub fn make_app(&self) -> Router {
- // TODO(ruihang): implement format_query, series, labels, values, query_examplars and targets methods
+ // TODO(ruihang): implement format_query, series, values, query_examplars and targets methods
let router = Router::new()
.route("/query", routing::post(instant_query).get(instant_query))
.route("/query_range", routing::post(range_query).get(range_query))
+ .route("/labels", routing::post(labels_query).get(labels_query))
.with_state(self.query_handler.clone());
Router::new()
@@ -175,10 +178,23 @@ pub struct PromData {
pub result: Vec<PromSeries>,
}
+#[derive(Debug, Serialize, Deserialize, JsonSchema, PartialEq)]
+#[serde(untagged)]
+pub enum PromResponse {
+ PromData(PromData),
+ Labels(Vec<String>),
+}
+
+impl Default for PromResponse {
+ fn default() -> Self {
+ PromResponse::PromData(Default::default())
+ }
+}
+
#[derive(Debug, Default, Serialize, Deserialize, JsonSchema, PartialEq)]
pub struct PromJsonResponse {
pub status: String,
- pub data: PromData,
+ pub data: PromResponse,
#[serde(skip_serializing_if = "Option::is_none")]
pub error: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
@@ -196,14 +212,14 @@ impl PromJsonResponse {
{
Json(PromJsonResponse {
status: "error".to_string(),
- data: PromData::default(),
+ data: PromResponse::default(),
error: Some(reason.into()),
error_type: Some(error_type.into()),
warnings: None,
})
}
- pub fn success(data: PromData) -> Json<Self> {
+ pub fn success(data: PromResponse) -> Json<Self> {
Json(PromJsonResponse {
status: "success".to_string(),
data,
@@ -236,10 +252,9 @@ impl PromJsonResponse {
result_type,
)?)
}
- Output::AffectedRows(_) => Self::error(
- "unexpected result",
- "expected data result, but got affected rows",
- ),
+ Output::AffectedRows(_) => {
+ Self::error("Unexpected", "expected data result, but got affected rows")
+ }
};
json
@@ -254,10 +269,10 @@ impl PromJsonResponse {
if err.status_code() == StatusCode::TableNotFound
|| err.status_code() == StatusCode::TableColumnNotFound
{
- Self::success(PromData {
+ Self::success(PromResponse::PromData(PromData {
result_type: result_type_string,
..Default::default()
- })
+ }))
} else {
Self::error(err.status_code().to_string(), err.to_string())
}
@@ -270,7 +285,7 @@ impl PromJsonResponse {
batches: RecordBatches,
metric_name: String,
result_type: Option<ValueType>,
- ) -> Result<PromData> {
+ ) -> Result<PromResponse> {
// infer semantic type of each column from schema.
// TODO(ruihang): wish there is a better way to do this.
let mut timestamp_column_index = None;
@@ -383,10 +398,10 @@ impl PromJsonResponse {
.collect::<Result<Vec<_>>>()?;
let result_type_string = result_type.map(|t| t.to_string()).unwrap_or_default();
- let data = PromData {
+ let data = PromResponse::PromData(PromData {
result_type: result_type_string,
result,
- };
+ });
Ok(data)
}
@@ -463,6 +478,191 @@ pub async fn range_query(
PromJsonResponse::from_query_result(result, metric_name, Some(ValueType::Matrix)).await
}
+#[derive(Debug, Default, Serialize, JsonSchema)]
+struct Matches(Vec<String>);
+
+#[derive(Debug, Default, Serialize, Deserialize, JsonSchema)]
+pub struct LabelsQuery {
+ start: Option<String>,
+ end: Option<String>,
+ #[serde(flatten)]
+ matches: Matches,
+ db: Option<String>,
+}
+
+// Custom Deserialize method to support parsing repeated match[]
+impl<'de> Deserialize<'de> for Matches {
+ fn deserialize<D>(deserializer: D) -> std::result::Result<Matches, D::Error>
+ where
+ D: de::Deserializer<'de>,
+ {
+ struct MatchesVisitor;
+
+ impl<'d> Visitor<'d> for MatchesVisitor {
+ type Value = Vec<String>;
+
+ fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result {
+ formatter.write_str("a string")
+ }
+
+ fn visit_map<M>(self, mut access: M) -> std::result::Result<Self::Value, M::Error>
+ where
+ M: MapAccess<'d>,
+ {
+ let mut matches = Vec::new();
+ while let Some((key, value)) = access.next_entry::<String, String>()? {
+ if key == "match[]" {
+ matches.push(value);
+ }
+ }
+ Ok(matches)
+ }
+ }
+ Ok(Matches(deserializer.deserialize_map(MatchesVisitor)?))
+ }
+}
+
+#[axum_macros::debug_handler]
+pub async fn labels_query(
+ State(handler): State<PromHandlerRef>,
+ Query(params): Query<LabelsQuery>,
+ Form(form_params): Form<LabelsQuery>,
+) -> Json<PromJsonResponse> {
+ let mut queries: Vec<String> = params.matches.0;
+ if queries.is_empty() {
+ queries = form_params.matches.0;
+ }
+ if queries.is_empty() {
+ return PromJsonResponse::error("Unsupported", "match[] parameter is required");
+ }
+
+ let start = params
+ .start
+ .or(form_params.start)
+ .unwrap_or_else(yesterday_rfc3339);
+ let end = params
+ .end
+ .or(form_params.end)
+ .unwrap_or_else(current_time_rfc3339);
+
+ let db = ¶ms.db.unwrap_or(DEFAULT_SCHEMA_NAME.to_string());
+ let (catalog, schema) = super::parse_catalog_and_schema_from_client_database_name(db);
+ let query_ctx = Arc::new(QueryContext::with(catalog, schema));
+
+ let mut labels: HashSet<String> = HashSet::new();
+ labels.insert(METRIC_NAME.to_string());
+
+ for query in queries {
+ let prom_query = PromQuery {
+ query,
+ start: start.clone(),
+ end: end.clone(),
+ // TODO: find a better value for step
+ step: DEFAULT_LOOKBACK_STRING.to_string(),
+ };
+
+ let result = handler.do_query(&prom_query, query_ctx.clone()).await;
+
+ let response = retrieve_labels_name_from_query_result(result, &mut labels).await;
+
+ if let Err(err) = response {
+ // Prometheus won't report error if querying nonexist label and metric
+ if err.status_code() != StatusCode::TableNotFound
+ && err.status_code() != StatusCode::TableColumnNotFound
+ {
+ return PromJsonResponse::error(err.status_code().to_string(), err.to_string());
+ }
+ }
+ }
+
+ labels.remove(TIMESTAMP_COLUMN_NAME);
+ labels.remove(FIELD_COLUMN_NAME);
+
+ let mut sorted_labels: Vec<String> = labels.into_iter().collect();
+ sorted_labels.sort();
+ PromJsonResponse::success(PromResponse::Labels(sorted_labels))
+}
+
+/// Retrieve labels name from query result
+async fn retrieve_labels_name_from_query_result(
+ result: Result<Output>,
+ labels: &mut HashSet<String>,
+) -> Result<()> {
+ match result? {
+ Output::RecordBatches(batches) => {
+ record_batches_to_labels_name(batches, labels)?;
+ Ok(())
+ }
+ Output::Stream(stream) => {
+ let batches = RecordBatches::try_collect(stream)
+ .await
+ .context(CollectRecordbatchSnafu)?;
+ record_batches_to_labels_name(batches, labels)?;
+ Ok(())
+ }
+ Output::AffectedRows(_) => Err(Error::UnexpectedResult {
+ reason: "expected data result, but got affected rows".to_string(),
+ location: Location::default(),
+ }),
+ }
+}
+
+/// Retrieve labels name from record batches
+fn record_batches_to_labels_name(
+ batches: RecordBatches,
+ labels: &mut HashSet<String>,
+) -> Result<()> {
+ let mut column_indices = Vec::new();
+ let mut field_column_indices = Vec::new();
+ for (i, column) in batches.schema().column_schemas().iter().enumerate() {
+ if let ConcreteDataType::Float64(_) = column.data_type {
+ field_column_indices.push(i);
+ }
+ column_indices.push(i);
+ }
+
+ if field_column_indices.is_empty() {
+ return Err(Error::Internal {
+ err_msg: "no value column found".to_string(),
+ });
+ }
+
+ for batch in batches.iter() {
+ let names = column_indices
+ .iter()
+ .map(|c| batches.schema().column_name_by_index(*c).to_string())
+ .collect::<Vec<_>>();
+
+ let field_columns = field_column_indices
+ .iter()
+ .map(|i| {
+ batch
+ .column(*i)
+ .as_any()
+ .downcast_ref::<Float64Vector>()
+ .unwrap()
+ })
+ .collect::<Vec<_>>();
+
+ for row_index in 0..batch.num_rows() {
+ // if all field columns are null, skip this row
+ if field_columns
+ .iter()
+ .all(|c| c.get_data(row_index).is_none())
+ {
+ continue;
+ }
+
+ // if a field is not null, record the tag name and return
+ names.iter().for_each(|name| {
+ labels.insert(name.to_string());
+ });
+ return Ok(());
+ }
+ }
+ Ok(())
+}
+
pub(crate) fn retrieve_metric_name_and_result_type(
promql: &str,
) -> Option<(String, Option<ValueType>)> {
diff --git a/src/servers/src/prometheus.rs b/src/servers/src/prometheus.rs
index 1725d1149c1c..848a714ec315 100644
--- a/src/servers/src/prometheus.rs
+++ b/src/servers/src/prometheus.rs
@@ -31,8 +31,8 @@ use snap::raw::{Decoder, Encoder};
use crate::error::{self, Result};
-const TIMESTAMP_COLUMN_NAME: &str = "greptime_timestamp";
-const FIELD_COLUMN_NAME: &str = "greptime_value";
+pub const TIMESTAMP_COLUMN_NAME: &str = "greptime_timestamp";
+pub const FIELD_COLUMN_NAME: &str = "greptime_value";
pub const METRIC_NAME_LABEL: &str = "__name__";
/// Metrics for push gateway protocol
diff --git a/tests-integration/tests/grpc.rs b/tests-integration/tests/grpc.rs
index 9b620b241f2d..60c79c9c9828 100644
--- a/tests-integration/tests/grpc.rs
+++ b/tests-integration/tests/grpc.rs
@@ -22,7 +22,7 @@ use api::v1::{
use client::{Client, Database, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use common_catalog::consts::{MIN_USER_TABLE_ID, MITO_ENGINE};
use common_query::Output;
-use servers::prom::{PromData, PromJsonResponse, PromSeries};
+use servers::prom::{PromData, PromJsonResponse, PromResponse, PromSeries};
use servers::server::Server;
use tests_integration::test_util::{setup_grpc_server, StorageType};
@@ -366,7 +366,7 @@ pub async fn test_prom_gateway_query(store_type: StorageType) {
let instant_query_result = serde_json::from_slice::<PromJsonResponse>(&json_bytes).unwrap();
let expected = PromJsonResponse {
status: "success".to_string(),
- data: PromData {
+ data: PromResponse::PromData(PromData {
result_type: "vector".to_string(),
result: vec![
PromSeries {
@@ -390,7 +390,7 @@ pub async fn test_prom_gateway_query(store_type: StorageType) {
..Default::default()
},
],
- },
+ }),
error: None,
error_type: None,
warnings: None,
@@ -417,7 +417,7 @@ pub async fn test_prom_gateway_query(store_type: StorageType) {
let range_query_result = serde_json::from_slice::<PromJsonResponse>(&json_bytes).unwrap();
let expected = PromJsonResponse {
status: "success".to_string(),
- data: PromData {
+ data: PromResponse::PromData(PromData {
result_type: "matrix".to_string(),
result: vec![
PromSeries {
@@ -441,7 +441,7 @@ pub async fn test_prom_gateway_query(store_type: StorageType) {
..Default::default()
},
],
- },
+ }),
error: None,
error_type: None,
warnings: None,
diff --git a/tests-integration/tests/http.rs b/tests-integration/tests/http.rs
index 8ee4093ac1cb..f7b76544d456 100644
--- a/tests-integration/tests/http.rs
+++ b/tests-integration/tests/http.rs
@@ -313,6 +313,28 @@ pub async fn test_prom_http_api(store_type: StorageType) {
.await;
assert_eq!(res.status(), StatusCode::OK);
+ // labels
+ let res = client.get("/api/v1/labels?match[]=up").send().await;
+ assert_eq!(res.status(), StatusCode::OK);
+ let res = client
+ .post("/api/v1/labels?match[]=up")
+ .header("Content-Type", "application/x-www-form-urlencoded")
+ .send()
+ .await;
+ assert_eq!(res.status(), StatusCode::OK);
+ // labels query with multiple match[] params
+ let res = client
+ .get("/api/v1/labels?match[]=up&match[]=down")
+ .send()
+ .await;
+ assert_eq!(res.status(), StatusCode::OK);
+ let res = client
+ .post("/api/v1/labels?match[]=up&match[]=down")
+ .header("Content-Type", "application/x-www-form-urlencoded")
+ .send()
+ .await;
+ assert_eq!(res.status(), StatusCode::OK);
+
guard.remove_all().await;
}
|
feat
|
implement /api/v1/labels for prometheus (#1580)
|
2dd86b686fb429ba8f43a221b3aac40c59c6aebc
|
2023-06-19 17:25:59
|
LFC
|
feat: extend region leases in Metasrv (#1784)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index 71e481b5aee9..36ace61d4127 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -1801,6 +1801,7 @@ name = "common-meta"
version = "0.4.0"
dependencies = [
"api",
+ "async-trait",
"chrono",
"common-catalog",
"common-error",
@@ -4096,7 +4097,7 @@ checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b"
[[package]]
name = "greptime-proto"
version = "0.1.0"
-source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=aee86f4a68c59873961c9b99ee7ed6a4341bf773#aee86f4a68c59873961c9b99ee7ed6a4341bf773"
+source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=5d5eb65bb985ff47b3a417fb2505e315e2f5c319#5d5eb65bb985ff47b3a417fb2505e315e2f5c319"
dependencies = [
"prost",
"serde",
@@ -5185,6 +5186,7 @@ dependencies = [
"serde_json",
"servers",
"snafu",
+ "store-api",
"table",
"tokio",
"tokio-stream",
diff --git a/Cargo.toml b/Cargo.toml
index 3a3ccf501ddf..a183bbddfef4 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -72,7 +72,7 @@ datafusion-sql = { git = "https://github.com/waynexia/arrow-datafusion.git", rev
datafusion-substrait = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "63e52dde9e44cac4b1f6c6e6b6bf6368ba3bd323" }
futures = "0.3"
futures-util = "0.3"
-greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "aee86f4a68c59873961c9b99ee7ed6a4341bf773" }
+greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "5d5eb65bb985ff47b3a417fb2505e315e2f5c319" }
itertools = "0.10"
parquet = "40.0"
paste = "1.0"
diff --git a/src/catalog/src/helper.rs b/src/catalog/src/helper.rs
index 83a0a84a1705..8520bc3953bd 100644
--- a/src/catalog/src/helper.rs
+++ b/src/catalog/src/helper.rs
@@ -91,7 +91,7 @@ pub fn build_table_regional_prefix(
}
/// Table global info has only one key across all datanodes so it does not have `node_id` field.
-#[derive(Clone)]
+#[derive(Clone, Hash, Eq, PartialEq)]
pub struct TableGlobalKey {
pub catalog_name: String,
pub schema_name: String,
@@ -124,6 +124,14 @@ impl TableGlobalKey {
table_name: captures[3].to_string(),
})
}
+
+ pub fn to_raw_key(&self) -> Vec<u8> {
+ self.to_string().into_bytes()
+ }
+
+ pub fn try_from_raw_key(key: &[u8]) -> Result<Self, Error> {
+ Self::parse(String::from_utf8_lossy(key))
+ }
}
/// Table global info contains necessary info for a datanode to create table regions, including
@@ -141,6 +149,10 @@ impl TableGlobalValue {
pub fn table_id(&self) -> TableId {
self.table_info.ident.table_id
}
+
+ pub fn engine(&self) -> &str {
+ &self.table_info.meta.engine
+ }
}
/// Table regional info that varies between datanode, so it contains a `node_id` field.
diff --git a/src/catalog/src/remote.rs b/src/catalog/src/remote.rs
index 617ec32096ba..da32e53bf173 100644
--- a/src/catalog/src/remote.rs
+++ b/src/catalog/src/remote.rs
@@ -29,9 +29,6 @@ mod manager;
#[cfg(feature = "testing")]
pub mod mock;
-
-// FIXME(LFC): Used in next PR.
-#[allow(dead_code)]
pub mod region_alive_keeper;
#[derive(Debug, Clone)]
diff --git a/src/catalog/src/remote/region_alive_keeper.rs b/src/catalog/src/remote/region_alive_keeper.rs
index 51192c9d889c..327e846b3b7a 100644
--- a/src/catalog/src/remote/region_alive_keeper.rs
+++ b/src/catalog/src/remote/region_alive_keeper.rs
@@ -16,10 +16,15 @@ use std::collections::HashMap;
use std::future::Future;
use std::sync::Arc;
+use async_trait::async_trait;
+use common_meta::error::InvalidProtoMsgSnafu;
+use common_meta::heartbeat::handler::{
+ HandleControl, HeartbeatResponseHandler, HeartbeatResponseHandlerContext,
+};
use common_meta::ident::TableIdent;
use common_meta::RegionIdent;
use common_telemetry::{debug, error, info, warn};
-use snafu::ResultExt;
+use snafu::{OptionExt, ResultExt};
use store_api::storage::RegionNumber;
use table::engine::manager::TableEngineManagerRef;
use table::engine::{CloseTableResult, EngineContext, TableEngineRef};
@@ -35,6 +40,12 @@ use crate::error::{Result, TableEngineNotFoundSnafu};
pub struct RegionAliveKeepers {
table_engine_manager: TableEngineManagerRef,
keepers: Arc<Mutex<HashMap<TableIdent, Arc<RegionAliveKeeper>>>>,
+
+ /// The epoch when [RegionAliveKeepers] is created. It's used to get a monotonically non-decreasing
+ /// elapsed time when submitting heartbeats to Metasrv (because [Instant] is monotonically
+ /// non-decreasing). The heartbeat request will carry the duration since this epoch, and the
+ /// duration acts like an "invariant point" for region's keep alive lease.
+ epoch: Instant,
}
impl RegionAliveKeepers {
@@ -42,6 +53,7 @@ impl RegionAliveKeepers {
Self {
table_engine_manager,
keepers: Arc::new(Mutex::new(HashMap::new())),
+ epoch: Instant::now(),
}
}
@@ -107,6 +119,50 @@ impl RegionAliveKeepers {
keeper.start(heartbeat_interval_millis).await;
}
}
+
+ pub fn epoch(&self) -> Instant {
+ self.epoch
+ }
+}
+
+#[async_trait]
+impl HeartbeatResponseHandler for RegionAliveKeepers {
+ fn is_acceptable(&self, ctx: &HeartbeatResponseHandlerContext) -> bool {
+ !ctx.response.region_leases.is_empty()
+ }
+
+ async fn handle(
+ &self,
+ ctx: &mut HeartbeatResponseHandlerContext,
+ ) -> common_meta::error::Result<HandleControl> {
+ let leases = ctx.response.region_leases.drain(..).collect::<Vec<_>>();
+ for lease in leases {
+ let table_ident: TableIdent = match lease
+ .table_ident
+ .context(InvalidProtoMsgSnafu {
+ err_msg: "'table_ident' is missing in RegionLease",
+ })
+ .and_then(|x| x.try_into())
+ {
+ Ok(x) => x,
+ Err(e) => {
+ error!(e; "");
+ continue;
+ }
+ };
+
+ let Some(keeper) = self.keepers.lock().await.get(&table_ident).cloned() else {
+ // Alive keeper could be affected by lagging msg, just warn and ignore.
+ warn!("Alive keeper for table {table_ident} is not found!");
+ continue;
+ };
+
+ let start_instant = self.epoch + Duration::from_millis(lease.duration_since_epoch);
+ let deadline = start_instant + Duration::from_secs(lease.lease_seconds);
+ keeper.keep_lived(lease.regions, deadline).await;
+ }
+ Ok(HandleControl::Continue)
+ }
}
/// [RegionAliveKeeper] starts a countdown for each region in a table. When deadline is reached,
@@ -309,8 +365,11 @@ impl CountdownTask {
debug!("Reset deadline to region {region} of table {table_ident} to {deadline:?}");
countdown.set(tokio::time::sleep_until(deadline));
}
- // Else the countdown could be not started yet, or during startup protection.
- // Can be safely ignored.
+ // Else the countdown could be either:
+ // - not started yet;
+ // - during startup protection;
+ // - received a lagging heartbeat message.
+ // All can be safely ignored.
},
None => {
info!(
@@ -367,6 +426,8 @@ mod test {
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc;
+ use api::v1::meta::{HeartbeatResponse, RegionLease};
+ use common_meta::heartbeat::mailbox::HeartbeatMailbox;
use datatypes::schema::RawSchema;
use table::engine::manager::MemoryTableEngineManager;
use table::engine::{TableEngine, TableReference};
@@ -377,8 +438,7 @@ mod test {
use super::*;
use crate::remote::mock::MockTableEngine;
- #[tokio::test(flavor = "multi_thread")]
- async fn test_region_alive_keepers() {
+ async fn prepare_keepers() -> (TableIdent, RegionAliveKeepers) {
let table_engine = Arc::new(MockTableEngine::default());
let table_engine_manager = Arc::new(MemoryTableEngineManager::new(table_engine));
let keepers = RegionAliveKeepers::new(table_engine_manager);
@@ -410,13 +470,82 @@ mod test {
table_options: TableOptions::default(),
engine: "MockTableEngine".to_string(),
}));
-
keepers
.register_table(table_ident.clone(), table)
.await
.unwrap();
assert!(keepers.keepers.lock().await.contains_key(&table_ident));
+ (table_ident, keepers)
+ }
+
+ #[tokio::test(flavor = "multi_thread")]
+ async fn test_handle_heartbeat_response() {
+ let (table_ident, keepers) = prepare_keepers().await;
+
+ keepers.start(5000).await;
+ let startup_protection_until = Instant::now() + Duration::from_secs(21);
+
+ let duration_since_epoch = (Instant::now() - keepers.epoch).as_millis() as _;
+ let lease_seconds = 100;
+ let response = HeartbeatResponse {
+ region_leases: vec![RegionLease {
+ table_ident: Some(table_ident.clone().into()),
+ regions: vec![1, 3], // Not extending region 2's lease time.
+ duration_since_epoch,
+ lease_seconds,
+ }],
+ ..Default::default()
+ };
+ let keep_alive_until = keepers.epoch
+ + Duration::from_millis(duration_since_epoch)
+ + Duration::from_secs(lease_seconds);
+
+ let (tx, _) = mpsc::channel(8);
+ let mailbox = Arc::new(HeartbeatMailbox::new(tx));
+ let mut ctx = HeartbeatResponseHandlerContext::new(mailbox, response);
+
+ assert!(keepers.handle(&mut ctx).await.unwrap() == HandleControl::Continue);
+
+ // sleep to wait for background task spawned in `handle`
+ tokio::time::sleep(Duration::from_secs(1)).await;
+
+ async fn test(
+ keeper: &Arc<RegionAliveKeeper>,
+ region_number: RegionNumber,
+ startup_protection_until: Instant,
+ keep_alive_until: Instant,
+ is_kept_live: bool,
+ ) {
+ let handles = keeper.countdown_task_handles.lock().await;
+ let deadline = deadline(&handles.get(®ion_number).unwrap().tx).await;
+ if is_kept_live {
+ assert!(deadline > startup_protection_until && deadline == keep_alive_until);
+ } else {
+ assert!(deadline <= startup_protection_until);
+ }
+ }
+
+ let keeper = &keepers
+ .keepers
+ .lock()
+ .await
+ .get(&table_ident)
+ .cloned()
+ .unwrap();
+
+ // Test region 1 and 3 is kept lived. Their deadlines are updated to desired instant.
+ test(keeper, 1, startup_protection_until, keep_alive_until, true).await;
+ test(keeper, 3, startup_protection_until, keep_alive_until, true).await;
+
+ // Test region 2 is not kept lived. It's deadline is not updated: still during startup protection period.
+ test(keeper, 2, startup_protection_until, keep_alive_until, false).await;
+ }
+
+ #[tokio::test(flavor = "multi_thread")]
+ async fn test_region_alive_keepers() {
+ let (table_ident, keepers) = prepare_keepers().await;
+
keepers
.register_region(&RegionIdent {
cluster_id: 1,
diff --git a/src/common/meta/Cargo.toml b/src/common/meta/Cargo.toml
index e6e68b66ecf7..e91a9bbbad11 100644
--- a/src/common/meta/Cargo.toml
+++ b/src/common/meta/Cargo.toml
@@ -6,6 +6,7 @@ license.workspace = true
[dependencies]
api = { path = "../../api" }
+async-trait.workspace = true
common-catalog = { path = "../catalog" }
common-error = { path = "../error" }
common-runtime = { path = "../runtime" }
diff --git a/src/common/meta/src/heartbeat/handler.rs b/src/common/meta/src/heartbeat/handler.rs
index 567a7921345a..9b24955af3ea 100644
--- a/src/common/meta/src/heartbeat/handler.rs
+++ b/src/common/meta/src/heartbeat/handler.rs
@@ -15,6 +15,7 @@
use std::sync::Arc;
use api::v1::meta::HeartbeatResponse;
+use async_trait::async_trait;
use common_telemetry::error;
use crate::error::Result;
@@ -57,14 +58,16 @@ impl HeartbeatResponseHandlerContext {
/// [`HeartbeatResponseHandler::is_acceptable`] returns true if handler can handle incoming [`HeartbeatResponseHandlerContext`].
///
/// [`HeartbeatResponseHandler::handle`] handles all or part of incoming [`HeartbeatResponseHandlerContext`].
+#[async_trait]
pub trait HeartbeatResponseHandler: Send + Sync {
fn is_acceptable(&self, ctx: &HeartbeatResponseHandlerContext) -> bool;
- fn handle(&self, ctx: &mut HeartbeatResponseHandlerContext) -> Result<HandleControl>;
+ async fn handle(&self, ctx: &mut HeartbeatResponseHandlerContext) -> Result<HandleControl>;
}
+#[async_trait]
pub trait HeartbeatResponseHandlerExecutor: Send + Sync {
- fn handle(&self, ctx: HeartbeatResponseHandlerContext) -> Result<()>;
+ async fn handle(&self, ctx: HeartbeatResponseHandlerContext) -> Result<()>;
}
pub struct HandlerGroupExecutor {
@@ -77,14 +80,15 @@ impl HandlerGroupExecutor {
}
}
+#[async_trait]
impl HeartbeatResponseHandlerExecutor for HandlerGroupExecutor {
- fn handle(&self, mut ctx: HeartbeatResponseHandlerContext) -> Result<()> {
+ async fn handle(&self, mut ctx: HeartbeatResponseHandlerContext) -> Result<()> {
for handler in &self.handlers {
if !handler.is_acceptable(&ctx) {
continue;
}
- match handler.handle(&mut ctx) {
+ match handler.handle(&mut ctx).await {
Ok(HandleControl::Done) => break,
Ok(HandleControl::Continue) => {}
Err(e) => {
diff --git a/src/common/meta/src/heartbeat/handler/parse_mailbox_message.rs b/src/common/meta/src/heartbeat/handler/parse_mailbox_message.rs
index bc7044011517..fb9d1702fd24 100644
--- a/src/common/meta/src/heartbeat/handler/parse_mailbox_message.rs
+++ b/src/common/meta/src/heartbeat/handler/parse_mailbox_message.rs
@@ -12,6 +12,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+use async_trait::async_trait;
+
use crate::error::Result;
use crate::heartbeat::handler::{
HandleControl, HeartbeatResponseHandler, HeartbeatResponseHandlerContext,
@@ -21,12 +23,13 @@ use crate::heartbeat::utils::mailbox_message_to_incoming_message;
#[derive(Default)]
pub struct ParseMailboxMessageHandler;
+#[async_trait]
impl HeartbeatResponseHandler for ParseMailboxMessageHandler {
fn is_acceptable(&self, _ctx: &HeartbeatResponseHandlerContext) -> bool {
true
}
- fn handle(&self, ctx: &mut HeartbeatResponseHandlerContext) -> Result<HandleControl> {
+ async fn handle(&self, ctx: &mut HeartbeatResponseHandlerContext) -> Result<HandleControl> {
if let Some(message) = &ctx.response.mailbox_message {
if message.payload.is_some() {
// mailbox_message_to_incoming_message will raise an error if payload is none
diff --git a/src/common/meta/src/ident.rs b/src/common/meta/src/ident.rs
index cfc08fa7bc83..522a242e2274 100644
--- a/src/common/meta/src/ident.rs
+++ b/src/common/meta/src/ident.rs
@@ -14,7 +14,7 @@
use std::fmt::{Display, Formatter};
-use api::v1::meta::TableIdent as RawTableIdent;
+use api::v1::meta::{TableIdent as RawTableIdent, TableName};
use serde::{Deserialize, Serialize};
use snafu::OptionExt;
@@ -55,3 +55,17 @@ impl TryFrom<RawTableIdent> for TableIdent {
})
}
}
+
+impl From<TableIdent> for RawTableIdent {
+ fn from(table_ident: TableIdent) -> Self {
+ Self {
+ table_id: table_ident.table_id,
+ engine: table_ident.engine,
+ table_name: Some(TableName {
+ catalog_name: table_ident.catalog,
+ schema_name: table_ident.schema,
+ table_name: table_ident.table,
+ }),
+ }
+ }
+}
diff --git a/src/datanode/src/heartbeat.rs b/src/datanode/src/heartbeat.rs
index c240af2c55b7..87275b66136c 100644
--- a/src/datanode/src/heartbeat.rs
+++ b/src/datanode/src/heartbeat.rs
@@ -17,6 +17,7 @@ use std::sync::Arc;
use std::time::Duration;
use api::v1::meta::{HeartbeatRequest, NodeStat, Peer};
+use catalog::remote::region_alive_keeper::RegionAliveKeepers;
use catalog::{datanode_stat, CatalogManagerRef};
use common_meta::heartbeat::handler::{
HeartbeatResponseHandlerContext, HeartbeatResponseHandlerExecutorRef,
@@ -42,6 +43,7 @@ pub struct HeartbeatTask {
catalog_manager: CatalogManagerRef,
interval: u64,
resp_handler_executor: HeartbeatResponseHandlerExecutorRef,
+ region_alive_keepers: Arc<RegionAliveKeepers>,
}
impl Drop for HeartbeatTask {
@@ -59,6 +61,7 @@ impl HeartbeatTask {
meta_client: Arc<MetaClient>,
catalog_manager: CatalogManagerRef,
resp_handler_executor: HeartbeatResponseHandlerExecutorRef,
+ region_alive_keepers: Arc<RegionAliveKeepers>,
) -> Self {
Self {
node_id,
@@ -69,6 +72,7 @@ impl HeartbeatTask {
catalog_manager,
interval: 5_000, // default interval is set to 5 secs
resp_handler_executor,
+ region_alive_keepers,
}
}
@@ -94,7 +98,7 @@ impl HeartbeatTask {
}
let ctx = HeartbeatResponseHandlerContext::new(mailbox.clone(), res);
- if let Err(e) = Self::handle_response(ctx, handler_executor.clone()) {
+ if let Err(e) = Self::handle_response(ctx, handler_executor.clone()).await {
error!(e; "Error while handling heartbeat response");
}
if !running.load(Ordering::Acquire) {
@@ -106,13 +110,14 @@ impl HeartbeatTask {
Ok(tx)
}
- fn handle_response(
+ async fn handle_response(
ctx: HeartbeatResponseHandlerContext,
handler_executor: HeartbeatResponseHandlerExecutorRef,
) -> Result<()> {
trace!("heartbeat response: {:?}", ctx.response);
handler_executor
.handle(ctx)
+ .await
.context(error::HandleHeartbeatResponseSnafu)
}
@@ -131,8 +136,7 @@ impl HeartbeatTask {
let addr = resolve_addr(&self.server_addr, &self.server_hostname);
info!("Starting heartbeat to Metasrv with interval {interval}. My node id is {node_id}, address is {addr}.");
- // TODO(LFC): Continued in next PR.
- // self.region_alive_keepers.start(interval).await;
+ self.region_alive_keepers.start(interval).await;
let meta_client = self.meta_client.clone();
let catalog_manager_clone = self.catalog_manager.clone();
@@ -150,6 +154,7 @@ impl HeartbeatTask {
)
.await?;
+ let epoch = self.region_alive_keepers.epoch();
common_runtime::spawn_bg(async move {
let sleep = tokio::time::sleep(Duration::from_millis(0));
tokio::pin!(sleep);
@@ -195,6 +200,7 @@ impl HeartbeatTask {
..Default::default()
}),
region_stats,
+ duration_since_epoch: (Instant::now() - epoch).as_millis() as u64,
..Default::default()
};
sleep.as_mut().reset(Instant::now() + Duration::from_millis(interval));
diff --git a/src/datanode/src/heartbeat/handler/close_region.rs b/src/datanode/src/heartbeat/handler/close_region.rs
index 638c1aa014e8..1dc0157fe723 100644
--- a/src/datanode/src/heartbeat/handler/close_region.rs
+++ b/src/datanode/src/heartbeat/handler/close_region.rs
@@ -14,6 +14,7 @@
use std::sync::Arc;
+use async_trait::async_trait;
use catalog::remote::region_alive_keeper::RegionAliveKeepers;
use catalog::{CatalogManagerRef, DeregisterTableRequest};
use common_catalog::format_full_table_name;
@@ -38,6 +39,7 @@ pub struct CloseRegionHandler {
region_alive_keepers: Arc<RegionAliveKeepers>,
}
+#[async_trait]
impl HeartbeatResponseHandler for CloseRegionHandler {
fn is_acceptable(&self, ctx: &HeartbeatResponseHandlerContext) -> bool {
matches!(
@@ -46,7 +48,7 @@ impl HeartbeatResponseHandler for CloseRegionHandler {
)
}
- fn handle(&self, ctx: &mut HeartbeatResponseHandlerContext) -> MetaResult<HandleControl> {
+ async fn handle(&self, ctx: &mut HeartbeatResponseHandlerContext) -> MetaResult<HandleControl> {
let Some((meta, Instruction::CloseRegion(region_ident))) = ctx.incoming_message.take() else {
unreachable!("CloseRegionHandler: should be guarded by 'is_acceptable'");
};
diff --git a/src/datanode/src/heartbeat/handler/open_region.rs b/src/datanode/src/heartbeat/handler/open_region.rs
index 71b4863f6d64..e56116a48ff5 100644
--- a/src/datanode/src/heartbeat/handler/open_region.rs
+++ b/src/datanode/src/heartbeat/handler/open_region.rs
@@ -14,6 +14,7 @@
use std::sync::Arc;
+use async_trait::async_trait;
use catalog::error::Error as CatalogError;
use catalog::remote::region_alive_keeper::RegionAliveKeepers;
use catalog::{CatalogManagerRef, RegisterTableRequest};
@@ -39,6 +40,7 @@ pub struct OpenRegionHandler {
region_alive_keepers: Arc<RegionAliveKeepers>,
}
+#[async_trait]
impl HeartbeatResponseHandler for OpenRegionHandler {
fn is_acceptable(&self, ctx: &HeartbeatResponseHandlerContext) -> bool {
matches!(
@@ -47,7 +49,7 @@ impl HeartbeatResponseHandler for OpenRegionHandler {
)
}
- fn handle(&self, ctx: &mut HeartbeatResponseHandlerContext) -> MetaResult<HandleControl> {
+ async fn handle(&self, ctx: &mut HeartbeatResponseHandlerContext) -> MetaResult<HandleControl> {
let Some((meta, Instruction::OpenRegion(region_ident))) = ctx.incoming_message.take() else {
unreachable!("OpenRegionHandler: should be guarded by 'is_acceptable'");
};
diff --git a/src/datanode/src/instance.rs b/src/datanode/src/instance.rs
index 72336f95b7a1..5e5a63006fbf 100644
--- a/src/datanode/src/instance.rs
+++ b/src/datanode/src/instance.rs
@@ -215,8 +215,9 @@ impl Instance {
Arc::new(CloseRegionHandler::new(
catalog_manager.clone(),
engine_manager.clone(),
- region_alive_keepers,
+ region_alive_keepers.clone(),
)),
+ region_alive_keepers.clone(),
]);
let heartbeat_task = Some(HeartbeatTask::new(
@@ -226,6 +227,7 @@ impl Instance {
meta_client,
catalog_manager.clone(),
Arc::new(handlers_executor),
+ region_alive_keepers,
));
(catalog_manager as CatalogManagerRef, None, heartbeat_task)
diff --git a/src/datanode/src/tests.rs b/src/datanode/src/tests.rs
index 6a278cefa4a2..1796a6c8755b 100644
--- a/src/datanode/src/tests.rs
+++ b/src/datanode/src/tests.rs
@@ -75,7 +75,8 @@ async fn test_close_region_handler() {
executor.clone(),
mailbox.clone(),
close_region_instruction(),
- );
+ )
+ .await;
let (_, reply) = rx.recv().await.unwrap();
assert_matches!(
reply,
@@ -89,7 +90,8 @@ async fn test_close_region_handler() {
executor.clone(),
mailbox.clone(),
close_region_instruction(),
- );
+ )
+ .await;
let (_, reply) = rx.recv().await.unwrap();
assert_matches!(
reply,
@@ -112,7 +114,8 @@ async fn test_close_region_handler() {
cluster_id: 1,
datanode_id: 2,
}),
- );
+ )
+ .await;
let (_, reply) = rx.recv().await.unwrap();
assert_matches!(
reply,
@@ -149,7 +152,7 @@ async fn test_open_region_handler() {
prepare_table(instance.inner()).await;
// Opens a opened table
- handle_instruction(executor.clone(), mailbox.clone(), open_region_instruction());
+ handle_instruction(executor.clone(), mailbox.clone(), open_region_instruction()).await;
let (_, reply) = rx.recv().await.unwrap();
assert_matches!(
reply,
@@ -172,7 +175,8 @@ async fn test_open_region_handler() {
cluster_id: 1,
datanode_id: 2,
}),
- );
+ )
+ .await;
let (_, reply) = rx.recv().await.unwrap();
assert_matches!(
reply,
@@ -184,7 +188,8 @@ async fn test_open_region_handler() {
executor.clone(),
mailbox.clone(),
close_region_instruction(),
- );
+ )
+ .await;
let (_, reply) = rx.recv().await.unwrap();
assert_matches!(
reply,
@@ -193,7 +198,7 @@ async fn test_open_region_handler() {
assert_test_table_not_found(instance.inner()).await;
// Opens demo table
- handle_instruction(executor.clone(), mailbox.clone(), open_region_instruction());
+ handle_instruction(executor.clone(), mailbox.clone(), open_region_instruction()).await;
let (_, reply) = rx.recv().await.unwrap();
assert_matches!(
reply,
@@ -228,7 +233,7 @@ pub fn test_message_meta(id: u64, subject: &str, to: &str, from: &str) -> Messag
}
}
-fn handle_instruction(
+async fn handle_instruction(
executor: Arc<dyn HeartbeatResponseHandlerExecutor>,
mailbox: Arc<HeartbeatMailbox>,
instruction: Instruction,
@@ -237,7 +242,7 @@ fn handle_instruction(
let mut ctx: HeartbeatResponseHandlerContext =
HeartbeatResponseHandlerContext::new(mailbox, response);
ctx.incoming_message = Some((test_message_meta(1, "hi", "foo", "bar"), instruction));
- executor.handle(ctx).unwrap();
+ executor.handle(ctx).await.unwrap();
}
fn close_region_instruction() -> Instruction {
diff --git a/src/frontend/src/heartbeat.rs b/src/frontend/src/heartbeat.rs
index 72644fb25e1f..edf608573bba 100644
--- a/src/frontend/src/heartbeat.rs
+++ b/src/frontend/src/heartbeat.rs
@@ -84,7 +84,7 @@ impl HeartbeatTask {
Ok(Some(resp)) => {
debug!("Receiving heartbeat response: {:?}", resp);
let ctx = HeartbeatResponseHandlerContext::new(mailbox.clone(), resp);
- if let Err(e) = capture_self.handle_response(ctx) {
+ if let Err(e) = capture_self.handle_response(ctx).await {
error!(e; "Error while handling heartbeat response");
}
}
@@ -153,9 +153,10 @@ impl HeartbeatTask {
});
}
- fn handle_response(&self, ctx: HeartbeatResponseHandlerContext) -> Result<()> {
+ async fn handle_response(&self, ctx: HeartbeatResponseHandlerContext) -> Result<()> {
self.resp_handler_executor
.handle(ctx)
+ .await
.context(error::HandleHeartbeatResponseSnafu)
}
diff --git a/src/frontend/src/heartbeat/handler/invalidate_table_cache.rs b/src/frontend/src/heartbeat/handler/invalidate_table_cache.rs
index 111c1ae86f2f..e728a1f93953 100644
--- a/src/frontend/src/heartbeat/handler/invalidate_table_cache.rs
+++ b/src/frontend/src/heartbeat/handler/invalidate_table_cache.rs
@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+use async_trait::async_trait;
use catalog::helper::TableGlobalKey;
use catalog::remote::KvCacheInvalidatorRef;
use common_meta::error::Result as MetaResult;
@@ -30,6 +31,7 @@ pub struct InvalidateTableCacheHandler {
table_route_cache_invalidator: TableRouteCacheInvalidatorRef,
}
+#[async_trait]
impl HeartbeatResponseHandler for InvalidateTableCacheHandler {
fn is_acceptable(&self, ctx: &HeartbeatResponseHandlerContext) -> bool {
matches!(
@@ -38,7 +40,7 @@ impl HeartbeatResponseHandler for InvalidateTableCacheHandler {
)
}
- fn handle(&self, ctx: &mut HeartbeatResponseHandlerContext) -> MetaResult<HandleControl> {
+ async fn handle(&self, ctx: &mut HeartbeatResponseHandlerContext) -> MetaResult<HandleControl> {
// TODO(weny): considers introducing a macro
let Some((meta, Instruction::InvalidateTableCache(table_ident))) = ctx.incoming_message.take() else {
unreachable!("InvalidateTableCacheHandler: should be guarded by 'is_acceptable'");
diff --git a/src/frontend/src/heartbeat/handler/tests.rs b/src/frontend/src/heartbeat/handler/tests.rs
index c066ad601b59..e80b52ae77b5 100644
--- a/src/frontend/src/heartbeat/handler/tests.rs
+++ b/src/frontend/src/heartbeat/handler/tests.rs
@@ -90,7 +90,8 @@ async fn test_invalidate_table_cache_handler() {
table_id: 0,
engine: "mito".to_string(),
}),
- );
+ )
+ .await;
let (_, reply) = rx.recv().await.unwrap();
assert_matches!(
@@ -126,7 +127,8 @@ async fn test_invalidate_table_cache_handler() {
table_id: 0,
engine: "mito".to_string(),
}),
- );
+ )
+ .await;
let (_, reply) = rx.recv().await.unwrap();
assert_matches!(
@@ -144,7 +146,7 @@ pub fn test_message_meta(id: u64, subject: &str, to: &str, from: &str) -> Messag
}
}
-fn handle_instruction(
+async fn handle_instruction(
executor: Arc<dyn HeartbeatResponseHandlerExecutor>,
mailbox: Arc<HeartbeatMailbox>,
instruction: Instruction,
@@ -153,5 +155,5 @@ fn handle_instruction(
let mut ctx: HeartbeatResponseHandlerContext =
HeartbeatResponseHandlerContext::new(mailbox, response);
ctx.incoming_message = Some((test_message_meta(1, "hi", "foo", "bar"), instruction));
- executor.handle(ctx).unwrap();
+ executor.handle(ctx).await.unwrap();
}
diff --git a/src/meta-srv/Cargo.toml b/src/meta-srv/Cargo.toml
index 539a3140230c..ca50243ed9d1 100644
--- a/src/meta-srv/Cargo.toml
+++ b/src/meta-srv/Cargo.toml
@@ -38,6 +38,7 @@ regex = "1.6"
serde = "1.0"
serde_json = "1.0"
snafu.workspace = true
+store-api = { path = "../store-api" }
table = { path = "../table" }
tokio.workspace = true
tokio-stream = { version = "0.1", features = ["net"] }
diff --git a/src/meta-srv/src/error.rs b/src/meta-srv/src/error.rs
index 3dd295f4ecdd..3ed8819d6e11 100644
--- a/src/meta-srv/src/error.rs
+++ b/src/meta-srv/src/error.rs
@@ -354,6 +354,12 @@ pub enum Error {
source: common_meta::error::Error,
},
+ #[snafu(display("Failed to convert proto data, source: {}", source))]
+ ConvertProtoData {
+ location: Location,
+ source: common_meta::error::Error,
+ },
+
// this error is used for custom error mapping
// please do not delete it
#[snafu(display("Other error, source: {}", source))]
@@ -442,7 +448,9 @@ impl ErrorExt for Error {
Error::RegionFailoverCandidatesNotFound { .. } => StatusCode::RuntimeResourcesExhausted,
Error::RegisterProcedureLoader { source, .. } => source.status_code(),
- Error::TableRouteConversion { source, .. } => source.status_code(),
+ Error::TableRouteConversion { source, .. } | Error::ConvertProtoData { source, .. } => {
+ source.status_code()
+ }
Error::Other { source, .. } => source.status_code(),
}
}
diff --git a/src/meta-srv/src/handler.rs b/src/meta-srv/src/handler.rs
index 658f45d3a5be..84acb376c463 100644
--- a/src/meta-srv/src/handler.rs
+++ b/src/meta-srv/src/handler.rs
@@ -19,8 +19,8 @@ use std::time::Duration;
use api::v1::meta::mailbox_message::Payload;
use api::v1::meta::{
- HeartbeatRequest, HeartbeatResponse, MailboxMessage, RequestHeader, ResponseHeader, Role,
- PROTOCOL_VERSION,
+ HeartbeatRequest, HeartbeatResponse, MailboxMessage, RegionLease, RequestHeader,
+ ResponseHeader, Role, PROTOCOL_VERSION,
};
pub use check_leader_handler::CheckLeaderHandler;
pub use collect_stats_handler::CollectStatsHandler;
@@ -54,6 +54,7 @@ pub mod mailbox_handler;
pub mod node_stat;
mod on_leader_start;
mod persist_stats_handler;
+pub(crate) mod region_lease_handler;
mod response_header_handler;
#[async_trait::async_trait]
@@ -73,6 +74,7 @@ pub struct HeartbeatAccumulator {
pub header: Option<ResponseHeader>,
pub instructions: Vec<Instruction>,
pub stat: Option<Stat>,
+ pub region_leases: Vec<RegionLease>,
}
impl HeartbeatAccumulator {
@@ -233,7 +235,7 @@ impl HeartbeatHandlerGroup {
let header = std::mem::take(&mut acc.header);
let res = HeartbeatResponse {
header,
- mailbox_message: acc.into_mailbox_message(),
+ region_leases: acc.region_leases,
..Default::default()
};
Ok(res)
diff --git a/src/meta-srv/src/handler/failure_handler.rs b/src/meta-srv/src/handler/failure_handler.rs
index 953efaf6af96..39bd454cfba5 100644
--- a/src/meta-srv/src/handler/failure_handler.rs
+++ b/src/meta-srv/src/handler/failure_handler.rs
@@ -36,6 +36,7 @@ pub(crate) struct DatanodeHeartbeat {
pub struct RegionFailureHandler {
failure_detect_runner: FailureDetectRunner,
+ region_failover_manager: Arc<RegionFailoverManager>,
}
impl RegionFailureHandler {
@@ -45,13 +46,19 @@ impl RegionFailureHandler {
) -> Result<Self> {
region_failover_manager.try_start()?;
- let mut failure_detect_runner = FailureDetectRunner::new(election, region_failover_manager);
+ let mut failure_detect_runner =
+ FailureDetectRunner::new(election, region_failover_manager.clone());
failure_detect_runner.start().await;
Ok(Self {
failure_detect_runner,
+ region_failover_manager,
})
}
+
+ pub(crate) fn region_failover_manager(&self) -> &Arc<RegionFailoverManager> {
+ &self.region_failover_manager
+ }
}
#[async_trait]
diff --git a/src/meta-srv/src/handler/region_lease_handler.rs b/src/meta-srv/src/handler/region_lease_handler.rs
new file mode 100644
index 000000000000..6eeb0ef0bb2e
--- /dev/null
+++ b/src/meta-srv/src/handler/region_lease_handler.rs
@@ -0,0 +1,226 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::collections::HashMap;
+use std::sync::Arc;
+
+use api::v1::meta::{HeartbeatRequest, RegionLease, Role};
+use async_trait::async_trait;
+use catalog::helper::TableGlobalKey;
+use common_meta::ident::TableIdent;
+use common_meta::ClusterId;
+use store_api::storage::RegionNumber;
+
+use crate::error::Result;
+use crate::handler::{HeartbeatAccumulator, HeartbeatHandler};
+use crate::metasrv::Context;
+use crate::procedure::region_failover::{RegionFailoverKey, RegionFailoverManager};
+use crate::service::store::kv::KvStoreRef;
+use crate::table_routes;
+
+/// The lease seconds of a region. It's set by two default heartbeat intervals (5 second × 2) plus
+/// two roundtrip time (2 second × 2 × 2), plus some extra buffer (2 second).
+// TODO(LFC): Make region lease seconds calculated from Datanode heartbeat configuration.
+pub(crate) const REGION_LEASE_SECONDS: u64 = 20;
+
+pub(crate) struct RegionLeaseHandler {
+ kv_store: KvStoreRef,
+ region_failover_manager: Option<Arc<RegionFailoverManager>>,
+}
+
+impl RegionLeaseHandler {
+ pub(crate) fn new(
+ kv_store: KvStoreRef,
+ region_failover_manager: Option<Arc<RegionFailoverManager>>,
+ ) -> Self {
+ Self {
+ kv_store,
+ region_failover_manager,
+ }
+ }
+
+ /// Filter out the regions that are currently in failover.
+ /// It's meaningless to extend the lease of a region if it is in failover.
+ fn filter_failover_regions(
+ &self,
+ cluster_id: ClusterId,
+ table_ident: &TableIdent,
+ regions: Vec<RegionNumber>,
+ ) -> Vec<RegionNumber> {
+ if let Some(region_failover_manager) = &self.region_failover_manager {
+ let mut region_failover_key = RegionFailoverKey {
+ cluster_id,
+ table_ident: table_ident.clone(),
+ region_number: 0,
+ };
+
+ regions
+ .into_iter()
+ .filter(|region| {
+ region_failover_key.region_number = *region;
+ !region_failover_manager.is_region_failover_running(®ion_failover_key)
+ })
+ .collect()
+ } else {
+ regions
+ }
+ }
+}
+
+#[async_trait]
+impl HeartbeatHandler for RegionLeaseHandler {
+ fn is_acceptable(&self, role: Role) -> bool {
+ role == Role::Datanode
+ }
+
+ async fn handle(
+ &self,
+ req: &HeartbeatRequest,
+ _: &mut Context,
+ acc: &mut HeartbeatAccumulator,
+ ) -> Result<()> {
+ let Some(stat) = acc.stat.as_ref() else { return Ok(()) };
+
+ let mut datanode_regions = HashMap::new();
+ stat.region_stats.iter().for_each(|x| {
+ let key = TableGlobalKey {
+ catalog_name: x.catalog.to_string(),
+ schema_name: x.schema.to_string(),
+ table_name: x.table.to_string(),
+ };
+ datanode_regions
+ .entry(key)
+ .or_insert_with(Vec::new)
+ .push(table::engine::region_number(x.id));
+ });
+
+ // TODO(LFC): Retrieve table global values from some cache here.
+ let table_global_values = table_routes::batch_get_table_global_value(
+ &self.kv_store,
+ datanode_regions.keys().collect::<Vec<_>>(),
+ )
+ .await?;
+
+ let mut region_leases = Vec::with_capacity(datanode_regions.len());
+ for (table_global_key, local_regions) in datanode_regions {
+ let Some(Some(table_global_value)) = table_global_values.get(&table_global_key) else { continue };
+
+ let Some(global_regions) = table_global_value.regions_id_map.get(&stat.id) else { continue };
+
+ // Filter out the designated regions from table global metadata for the given table on the given Datanode.
+ let designated_regions = local_regions
+ .into_iter()
+ .filter(|x| global_regions.contains(x))
+ .collect::<Vec<_>>();
+
+ let table_ident = TableIdent {
+ catalog: table_global_key.catalog_name.to_string(),
+ schema: table_global_key.schema_name.to_string(),
+ table: table_global_key.table_name.to_string(),
+ table_id: table_global_value.table_id(),
+ engine: table_global_value.engine().to_string(),
+ };
+ let designated_regions =
+ self.filter_failover_regions(stat.cluster_id, &table_ident, designated_regions);
+
+ region_leases.push(RegionLease {
+ table_ident: Some(table_ident.into()),
+ regions: designated_regions,
+ duration_since_epoch: req.duration_since_epoch,
+ lease_seconds: REGION_LEASE_SECONDS,
+ });
+ }
+ acc.region_leases = region_leases;
+ Ok(())
+ }
+}
+
+#[cfg(test)]
+mod test {
+ use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
+
+ use super::*;
+ use crate::handler::node_stat::{RegionStat, Stat};
+ use crate::metasrv::builder::MetaSrvBuilder;
+ use crate::test_util;
+
+ #[tokio::test]
+ async fn test_handle_region_lease() {
+ let region_failover_manager = test_util::create_region_failover_manager();
+ let kv_store = region_failover_manager
+ .create_context()
+ .selector_ctx
+ .kv_store
+ .clone();
+
+ let table_name = "my_table";
+ let _ = table_routes::tests::prepare_table_global_value(&kv_store, table_name).await;
+
+ let table_ident = TableIdent {
+ catalog: DEFAULT_CATALOG_NAME.to_string(),
+ schema: DEFAULT_SCHEMA_NAME.to_string(),
+ table: table_name.to_string(),
+ table_id: 1,
+ engine: "mito".to_string(),
+ };
+ region_failover_manager
+ .running_procedures()
+ .write()
+ .unwrap()
+ .insert(RegionFailoverKey {
+ cluster_id: 1,
+ table_ident: table_ident.clone(),
+ region_number: 1,
+ });
+
+ let handler = RegionLeaseHandler::new(kv_store, Some(region_failover_manager));
+
+ let req = HeartbeatRequest {
+ duration_since_epoch: 1234,
+ ..Default::default()
+ };
+
+ let builder = MetaSrvBuilder::new();
+ let metasrv = builder.build().await.unwrap();
+ let ctx = &mut metasrv.new_ctx();
+
+ let acc = &mut HeartbeatAccumulator::default();
+ let new_region_stat = |region_id: u64| -> RegionStat {
+ RegionStat {
+ id: region_id,
+ catalog: DEFAULT_CATALOG_NAME.to_string(),
+ schema: DEFAULT_SCHEMA_NAME.to_string(),
+ table: table_name.to_string(),
+ ..Default::default()
+ }
+ };
+ acc.stat = Some(Stat {
+ cluster_id: 1,
+ id: 1,
+ region_stats: vec![new_region_stat(1), new_region_stat(2), new_region_stat(3)],
+ ..Default::default()
+ });
+
+ handler.handle(&req, ctx, acc).await.unwrap();
+
+ // region 1 is during failover and region 3 is not in table global value,
+ // so only region 2's lease is extended.
+ assert_eq!(acc.region_leases.len(), 1);
+ let lease = acc.region_leases.remove(0);
+ assert_eq!(lease.table_ident.unwrap(), table_ident.into());
+ assert_eq!(lease.regions, vec![2]);
+ assert_eq!(lease.duration_since_epoch, 1234);
+ assert_eq!(lease.lease_seconds, REGION_LEASE_SECONDS);
+ }
+}
diff --git a/src/meta-srv/src/metasrv/builder.rs b/src/meta-srv/src/metasrv/builder.rs
index 155791161edc..f10bae9a6939 100644
--- a/src/meta-srv/src/metasrv/builder.rs
+++ b/src/meta-srv/src/metasrv/builder.rs
@@ -20,6 +20,7 @@ use common_procedure::local::{LocalManager, ManagerConfig};
use crate::cluster::MetaPeerClient;
use crate::error::Result;
use crate::handler::mailbox_handler::MailboxHandler;
+use crate::handler::region_lease_handler::RegionLeaseHandler;
use crate::handler::{
CheckLeaderHandler, CollectStatsHandler, HeartbeatHandlerGroup, HeartbeatMailbox,
KeepLeaseHandler, OnLeaderStartHandler, PersistStatsHandler, Pushers, RegionFailureHandler,
@@ -170,6 +171,13 @@ impl MetaSrvBuilder {
)
};
+ let region_lease_handler = RegionLeaseHandler::new(
+ kv_store.clone(),
+ region_failover_handler
+ .as_ref()
+ .map(|x| x.region_failover_manager().clone()),
+ );
+
let group = HeartbeatHandlerGroup::new(pushers);
let keep_lease_handler = KeepLeaseHandler::new(kv_store.clone());
group.add_handler(ResponseHeaderHandler::default()).await;
@@ -184,6 +192,7 @@ impl MetaSrvBuilder {
if let Some(region_failover_handler) = region_failover_handler {
group.add_handler(region_failover_handler).await;
}
+ group.add_handler(region_lease_handler).await;
group.add_handler(PersistStatsHandler::default()).await;
group
}
diff --git a/src/meta-srv/src/procedure/region_failover.rs b/src/meta-srv/src/procedure/region_failover.rs
index 7424dc613916..828f69eb5469 100644
--- a/src/meta-srv/src/procedure/region_failover.rs
+++ b/src/meta-srv/src/procedure/region_failover.rs
@@ -21,12 +21,13 @@ mod update_metadata;
use std::collections::HashSet;
use std::fmt::Debug;
-use std::sync::{Arc, Mutex};
+use std::sync::{Arc, RwLock};
use std::time::Duration;
use async_trait::async_trait;
use catalog::helper::TableGlobalKey;
-use common_meta::RegionIdent;
+use common_meta::ident::TableIdent;
+use common_meta::{ClusterId, RegionIdent};
use common_procedure::error::{
Error as ProcedureError, FromJsonSnafu, Result as ProcedureResult, ToJsonSnafu,
};
@@ -38,6 +39,7 @@ use common_telemetry::{error, info, warn};
use failover_start::RegionFailoverStart;
use serde::{Deserialize, Serialize};
use snafu::ResultExt;
+use store_api::storage::RegionNumber;
use crate::error::{Error, RegisterProcedureLoaderSnafu, Result};
use crate::lock::DistLockRef;
@@ -48,26 +50,41 @@ use crate::service::store::ext::KvStoreExt;
const OPEN_REGION_MESSAGE_TIMEOUT: Duration = Duration::from_secs(30);
const CLOSE_REGION_MESSAGE_TIMEOUT: Duration = Duration::from_secs(2);
+/// A key for the preventing running multiple failover procedures for the same region.
+#[derive(PartialEq, Eq, Hash, Clone)]
+pub(crate) struct RegionFailoverKey {
+ pub(crate) cluster_id: ClusterId,
+ pub(crate) table_ident: TableIdent,
+ pub(crate) region_number: RegionNumber,
+}
+
+impl From<RegionIdent> for RegionFailoverKey {
+ fn from(region_ident: RegionIdent) -> Self {
+ Self {
+ cluster_id: region_ident.cluster_id,
+ table_ident: region_ident.table_ident,
+ region_number: region_ident.region_number,
+ }
+ }
+}
+
pub(crate) struct RegionFailoverManager {
mailbox: MailboxRef,
procedure_manager: ProcedureManagerRef,
selector: SelectorRef,
selector_ctx: SelectorContext,
dist_lock: DistLockRef,
- running_procedures: Arc<Mutex<HashSet<RegionIdent>>>,
+ running_procedures: Arc<RwLock<HashSet<RegionFailoverKey>>>,
}
-struct FailoverProcedureGuard<'a> {
- running_procedures: Arc<Mutex<HashSet<RegionIdent>>>,
- failed_region: &'a RegionIdent,
+struct FailoverProcedureGuard {
+ running_procedures: Arc<RwLock<HashSet<RegionFailoverKey>>>,
+ key: RegionFailoverKey,
}
-impl Drop for FailoverProcedureGuard<'_> {
+impl Drop for FailoverProcedureGuard {
fn drop(&mut self) {
- self.running_procedures
- .lock()
- .unwrap()
- .remove(self.failed_region);
+ self.running_procedures.write().unwrap().remove(&self.key);
}
}
@@ -85,11 +102,11 @@ impl RegionFailoverManager {
selector,
selector_ctx,
dist_lock,
- running_procedures: Arc::new(Mutex::new(HashSet::new())),
+ running_procedures: Arc::new(RwLock::new(HashSet::new())),
}
}
- fn create_context(&self) -> RegionFailoverContext {
+ pub(crate) fn create_context(&self) -> RegionFailoverContext {
RegionFailoverContext {
mailbox: self.mailbox.clone(),
selector: self.selector.clone(),
@@ -113,19 +130,36 @@ impl RegionFailoverManager {
})
}
- fn insert_running_procedures(&self, failed_region: &RegionIdent) -> bool {
- let mut procedures = self.running_procedures.lock().unwrap();
- if procedures.contains(failed_region) {
- return false;
+ pub(crate) fn is_region_failover_running(&self, key: &RegionFailoverKey) -> bool {
+ self.running_procedures.read().unwrap().contains(key)
+ }
+
+ fn insert_running_procedures(
+ &self,
+ failed_region: &RegionIdent,
+ ) -> Option<FailoverProcedureGuard> {
+ let key = RegionFailoverKey::from(failed_region.clone());
+ let mut procedures = self.running_procedures.write().unwrap();
+ if procedures.insert(key.clone()) {
+ Some(FailoverProcedureGuard {
+ running_procedures: self.running_procedures.clone(),
+ key,
+ })
+ } else {
+ None
}
- procedures.insert(failed_region.clone())
+ }
+
+ #[cfg(test)]
+ pub(crate) fn running_procedures(&self) -> Arc<RwLock<HashSet<RegionFailoverKey>>> {
+ self.running_procedures.clone()
}
pub(crate) async fn do_region_failover(&self, failed_region: &RegionIdent) -> Result<()> {
- if !self.insert_running_procedures(failed_region) {
+ let Some(guard) = self.insert_running_procedures(failed_region) else {
warn!("Region failover procedure for region {failed_region} is already running!");
return Ok(());
- }
+ };
if !self.table_exists(failed_region).await? {
// The table could be dropped before the failure detector knows it. Then the region
@@ -142,13 +176,9 @@ impl RegionFailoverManager {
info!("Starting region failover procedure {procedure_id} for region {failed_region:?}");
let procedure_manager = self.procedure_manager.clone();
- let running_procedures = self.running_procedures.clone();
let failed_region = failed_region.clone();
common_runtime::spawn_bg(async move {
- let _guard = FailoverProcedureGuard {
- running_procedures,
- failed_region: &failed_region,
- };
+ let _ = guard;
let watcher = &mut match procedure_manager.submit(procedure_with_id).await {
Ok(watcher) => watcher,
@@ -178,7 +208,7 @@ impl RegionFailoverManager {
let table_global_value = self
.selector_ctx
.kv_store
- .get(table_global_key.to_string().into_bytes())
+ .get(table_global_key.to_raw_key())
.await?;
Ok(table_global_value.is_some())
}
@@ -232,7 +262,8 @@ trait State: Sync + Send + Debug {
/// │ │ │
/// └─────────┘ │ Sends "Close Region" request
/// │ to the failed Datanode, and
-/// ┌─────────┐ │ wait for 2 seconds
+/// | wait for the Region lease expiry
+/// ┌─────────┐ │ seconds
/// │ │ │
/// │ ┌──▼────▼──────┐
/// Wait candidate │ │ActivateRegion◄───────────────────────┐
@@ -260,7 +291,6 @@ trait State: Sync + Send + Debug {
/// │ Broadcast Invalidate Table
/// │ Cache
/// │
-/// │
/// ┌────────▼────────┐
/// │RegionFailoverEnd│
/// └─────────────────┘
diff --git a/src/meta-srv/src/procedure/region_failover/deactivate_region.rs b/src/meta-srv/src/procedure/region_failover/deactivate_region.rs
index b24e188c05fa..15ea43625582 100644
--- a/src/meta-srv/src/procedure/region_failover/deactivate_region.rs
+++ b/src/meta-srv/src/procedure/region_failover/deactivate_region.rs
@@ -28,6 +28,7 @@ use super::{RegionFailoverContext, State};
use crate::error::{
Error, Result, RetryLaterSnafu, SerializeToJsonSnafu, UnexpectedInstructionReplySnafu,
};
+use crate::handler::region_lease_handler::REGION_LEASE_SECONDS;
use crate::handler::HeartbeatMailbox;
use crate::procedure::region_failover::CLOSE_REGION_MESSAGE_TIMEOUT;
use crate::service::mailbox::{Channel, MailboxReceiver};
@@ -35,11 +36,15 @@ use crate::service::mailbox::{Channel, MailboxReceiver};
#[derive(Serialize, Deserialize, Debug)]
pub(super) struct DeactivateRegion {
candidate: Peer,
+ region_lease_expiry_seconds: u64,
}
impl DeactivateRegion {
pub(super) fn new(candidate: Peer) -> Self {
- Self { candidate }
+ Self {
+ candidate,
+ region_lease_expiry_seconds: REGION_LEASE_SECONDS * 2,
+ }
}
async fn send_close_region_message(
@@ -95,15 +100,21 @@ impl DeactivateRegion {
}
Err(e) if matches!(e, Error::MailboxTimeout { .. }) => {
// Since we are in a region failover situation, the Datanode that the failed region
- // resides might be unreachable. So region deactivation is happened in a "try our
- // best" effort, do not retry if mailbox received timeout.
- // However, if the region failover procedure is also used in a planned maintenance
- // situation in the future, a proper retry is a must.
+ // resides might be unreachable. So we wait for the region lease to expire. The
+ // region would be closed by its own [RegionAliveKeeper].
+ self.wait_for_region_lease_expiry().await;
Ok(Box::new(ActivateRegion::new(self.candidate)))
}
Err(e) => Err(e),
}
}
+
+ /// Sleep for `region_lease_expiry_seconds`, to make sure the region is closed (by its
+ /// region alive keeper). This is critical for region not being opened in multiple Datanodes
+ /// simultaneously.
+ async fn wait_for_region_lease_expiry(&self) {
+ tokio::time::sleep(Duration::from_secs(self.region_lease_expiry_seconds)).await;
+ }
}
#[async_trait]
@@ -120,8 +131,8 @@ impl State for DeactivateRegion {
let mailbox_receiver = match result {
Ok(mailbox_receiver) => mailbox_receiver,
Err(e) if matches!(e, Error::PusherNotFound { .. }) => {
- // The Datanode could be unreachable and deregistered from pushers,
- // so simply advancing to the next state here.
+ // See the mailbox received timeout situation comments above.
+ self.wait_for_region_lease_expiry().await;
return Ok(Box::new(ActivateRegion::new(self.candidate)));
}
Err(e) => return Err(e),
@@ -212,7 +223,10 @@ mod tests {
let mut env = TestingEnvBuilder::new().build().await;
let failed_region = env.failed_region(1).await;
- let state = DeactivateRegion::new(Peer::new(2, ""));
+ let state = DeactivateRegion {
+ candidate: Peer::new(2, ""),
+ region_lease_expiry_seconds: 2,
+ };
let mailbox_receiver = state
.send_close_region_message(&env.context, &failed_region, Duration::from_millis(100))
.await
diff --git a/src/meta-srv/src/table_routes.rs b/src/meta-srv/src/table_routes.rs
index 39b5ed28a303..35e1a8a02b33 100644
--- a/src/meta-srv/src/table_routes.rs
+++ b/src/meta-srv/src/table_routes.rs
@@ -12,13 +12,17 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+use std::collections::HashMap;
+
use api::v1::meta::{PutRequest, TableRouteValue};
use catalog::helper::{TableGlobalKey, TableGlobalValue};
use common_meta::key::TableRouteKey;
+use common_meta::rpc::store::{BatchGetRequest, BatchGetResponse};
use snafu::{OptionExt, ResultExt};
use crate::error::{
- DecodeTableRouteSnafu, InvalidCatalogValueSnafu, Result, TableRouteNotFoundSnafu,
+ ConvertProtoDataSnafu, DecodeTableRouteSnafu, InvalidCatalogValueSnafu, Result,
+ TableRouteNotFoundSnafu,
};
use crate::service::store::ext::KvStoreExt;
use crate::service::store::kv::KvStoreRef;
@@ -27,12 +31,40 @@ pub async fn get_table_global_value(
kv_store: &KvStoreRef,
key: &TableGlobalKey,
) -> Result<Option<TableGlobalValue>> {
- let key = key.to_string().into_bytes();
- let kv = kv_store.get(key).await?;
+ let kv = kv_store.get(key.to_raw_key()).await?;
kv.map(|kv| TableGlobalValue::from_bytes(kv.value).context(InvalidCatalogValueSnafu))
.transpose()
}
+pub(crate) async fn batch_get_table_global_value(
+ kv_store: &KvStoreRef,
+ keys: Vec<&TableGlobalKey>,
+) -> Result<HashMap<TableGlobalKey, Option<TableGlobalValue>>> {
+ let req = BatchGetRequest {
+ keys: keys.iter().map(|x| x.to_raw_key()).collect::<Vec<_>>(),
+ };
+ let mut resp: BatchGetResponse = kv_store
+ .batch_get(req.into())
+ .await?
+ .try_into()
+ .context(ConvertProtoDataSnafu)?;
+
+ let kvs = resp.take_kvs();
+ let mut result = HashMap::with_capacity(kvs.len());
+ for kv in kvs {
+ let key = TableGlobalKey::try_from_raw_key(kv.key()).context(InvalidCatalogValueSnafu)?;
+ let value = TableGlobalValue::from_bytes(kv.value()).context(InvalidCatalogValueSnafu)?;
+ result.insert(key, Some(value));
+ }
+
+ for key in keys {
+ if !result.contains_key(key) {
+ result.insert(key.clone(), None);
+ }
+ }
+ Ok(result)
+}
+
pub(crate) async fn put_table_global_value(
kv_store: &KvStoreRef,
key: &TableGlobalKey,
@@ -40,7 +72,7 @@ pub(crate) async fn put_table_global_value(
) -> Result<()> {
let req = PutRequest {
header: None,
- key: key.to_string().into_bytes(),
+ key: key.to_raw_key(),
value: value.as_bytes().context(InvalidCatalogValueSnafu)?,
prev_kv: false,
};
@@ -228,12 +260,12 @@ pub(crate) mod tests {
async fn test_put_and_get_table_global_value() {
let kv_store = Arc::new(MemStore::new()) as _;
- let key = TableGlobalKey {
+ let not_exist_key = TableGlobalKey {
catalog_name: "not_exist_catalog".to_string(),
schema_name: "not_exist_schema".to_string(),
table_name: "not_exist_table".to_string(),
};
- assert!(get_table_global_value(&kv_store, &key)
+ assert!(get_table_global_value(&kv_store, ¬_exist_key)
.await
.unwrap()
.is_none());
@@ -244,6 +276,12 @@ pub(crate) mod tests {
.unwrap()
.unwrap();
assert_eq!(actual, value);
+
+ let keys = vec![¬_exist_key, &key];
+ let result = batch_get_table_global_value(&kv_store, keys).await.unwrap();
+ assert_eq!(result.len(), 2);
+ assert!(result.get(¬_exist_key).unwrap().is_none());
+ assert_eq!(result.get(&key).unwrap().as_ref().unwrap(), &value);
}
#[tokio::test]
diff --git a/src/table/src/engine.rs b/src/table/src/engine.rs
index 68c0ca1a50ad..d0fc69cf212f 100644
--- a/src/table/src/engine.rs
+++ b/src/table/src/engine.rs
@@ -28,7 +28,7 @@ use crate::TableRef;
pub mod manager;
/// Represents a resolved path to a table of the form “catalog.schema.table”
-#[derive(Debug, PartialEq)]
+#[derive(Debug, PartialEq, Eq, Hash)]
pub struct TableReference<'a> {
pub catalog: &'a str,
pub schema: &'a str,
|
feat
|
extend region leases in Metasrv (#1784)
|
422d18da8bbdaba3b3a9b93bea6ef9bc3b76ab2f
|
2024-12-19 09:12:05
|
Ruihang Xia
|
feat: bump opendal and switch prometheus layer to the upstream impl (#5179)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index a0225cf27dbe..fa8ba34d1a3b 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -866,18 +866,6 @@ dependencies = [
"rand",
]
-[[package]]
-name = "backon"
-version = "0.4.4"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d67782c3f868daa71d3533538e98a8e13713231969def7536e8039606fc46bf0"
-dependencies = [
- "fastrand",
- "futures-core",
- "pin-project",
- "tokio",
-]
-
[[package]]
name = "backon"
version = "1.2.0"
@@ -2228,7 +2216,7 @@ version = "0.12.0"
dependencies = [
"async-stream",
"async-trait",
- "backon 1.2.0",
+ "backon",
"common-base",
"common-error",
"common-macro",
@@ -7386,13 +7374,13 @@ checksum = "b410bbe7e14ab526a0e86877eb47c6996a2bd7746f027ba551028c925390e4e9"
[[package]]
name = "opendal"
-version = "0.49.2"
+version = "0.50.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9b04d09b9822c2f75a1d2fc513a2c1279c70e91e7407936fffdf6a6976ec530a"
+checksum = "cb28bb6c64e116ceaf8dd4e87099d3cfea4a58e85e62b104fef74c91afba0f44"
dependencies = [
"anyhow",
"async-trait",
- "backon 0.4.4",
+ "backon",
"base64 0.22.1",
"bytes",
"chrono",
@@ -7405,6 +7393,7 @@ dependencies = [
"md-5",
"once_cell",
"percent-encoding",
+ "prometheus",
"quick-xml 0.36.2",
"reqsign",
"reqwest",
@@ -9387,9 +9376,9 @@ dependencies = [
[[package]]
name = "reqsign"
-version = "0.16.0"
+version = "0.16.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "03dd4ba7c3901dd43e6b8c7446a760d45bc1ea4301002e1a6fa48f97c3a796fa"
+checksum = "eb0075a66c8bfbf4cc8b70dca166e722e1f55a3ea9250ecbb85f4d92a5f64149"
dependencies = [
"anyhow",
"async-trait",
diff --git a/src/common/datasource/src/object_store/fs.rs b/src/common/datasource/src/object_store/fs.rs
index f87311f517b7..5ffbbfa3148a 100644
--- a/src/common/datasource/src/object_store/fs.rs
+++ b/src/common/datasource/src/object_store/fs.rs
@@ -27,7 +27,7 @@ pub fn build_fs_backend(root: &str) -> Result<ObjectStore> {
DefaultLoggingInterceptor,
))
.layer(object_store::layers::TracingLayer)
- .layer(object_store::layers::PrometheusMetricsLayer::new(true))
+ .layer(object_store::layers::build_prometheus_metrics_layer(true))
.finish();
Ok(object_store)
}
diff --git a/src/common/datasource/src/object_store/s3.rs b/src/common/datasource/src/object_store/s3.rs
index e141621b899b..0d83eb7a98b8 100644
--- a/src/common/datasource/src/object_store/s3.rs
+++ b/src/common/datasource/src/object_store/s3.rs
@@ -89,7 +89,7 @@ pub fn build_s3_backend(
DefaultLoggingInterceptor,
))
.layer(object_store::layers::TracingLayer)
- .layer(object_store::layers::PrometheusMetricsLayer::new(true))
+ .layer(object_store::layers::build_prometheus_metrics_layer(true))
.finish())
}
diff --git a/src/common/procedure/src/local/runner.rs b/src/common/procedure/src/local/runner.rs
index c2d15001fba3..bf277a0e72e5 100644
--- a/src/common/procedure/src/local/runner.rs
+++ b/src/common/procedure/src/local/runner.rs
@@ -544,7 +544,7 @@ mod tests {
use common_test_util::temp_dir::create_temp_dir;
use futures_util::future::BoxFuture;
use futures_util::FutureExt;
- use object_store::ObjectStore;
+ use object_store::{EntryMode, ObjectStore};
use tokio::sync::mpsc;
use super::*;
@@ -578,7 +578,11 @@ mod tests {
) {
let dir = proc_path!(procedure_store, "{procedure_id}/");
let lister = object_store.list(&dir).await.unwrap();
- let mut files_in_dir: Vec<_> = lister.into_iter().map(|de| de.name().to_string()).collect();
+ let mut files_in_dir: Vec<_> = lister
+ .into_iter()
+ .filter(|x| x.metadata().mode() == EntryMode::FILE)
+ .map(|de| de.name().to_string())
+ .collect();
files_in_dir.sort_unstable();
assert_eq!(files, files_in_dir);
}
diff --git a/src/datanode/src/error.rs b/src/datanode/src/error.rs
index 9fbd46e16009..61a4eae12883 100644
--- a/src/datanode/src/error.rs
+++ b/src/datanode/src/error.rs
@@ -193,6 +193,14 @@ pub enum Error {
location: Location,
},
+ #[snafu(display("Failed to build http client"))]
+ BuildHttpClient {
+ #[snafu(implicit)]
+ location: Location,
+ #[snafu(source)]
+ error: reqwest::Error,
+ },
+
#[snafu(display("Missing required field: {}", name))]
MissingRequiredField {
name: String,
@@ -406,9 +414,10 @@ impl ErrorExt for Error {
| MissingKvBackend { .. }
| TomlFormat { .. } => StatusCode::InvalidArguments,
- PayloadNotExist { .. } | Unexpected { .. } | WatchAsyncTaskChange { .. } => {
- StatusCode::Unexpected
- }
+ PayloadNotExist { .. }
+ | Unexpected { .. }
+ | WatchAsyncTaskChange { .. }
+ | BuildHttpClient { .. } => StatusCode::Unexpected,
AsyncTaskExecute { source, .. } => source.status_code(),
diff --git a/src/datanode/src/store.rs b/src/datanode/src/store.rs
index c78afe448e0c..52a1cba982e1 100644
--- a/src/datanode/src/store.rs
+++ b/src/datanode/src/store.rs
@@ -32,7 +32,7 @@ use object_store::{Access, Error, HttpClient, ObjectStore, ObjectStoreBuilder, O
use snafu::prelude::*;
use crate::config::{HttpClientConfig, ObjectStoreConfig, DEFAULT_OBJECT_STORE_CACHE_SIZE};
-use crate::error::{self, CreateDirSnafu, Result};
+use crate::error::{self, BuildHttpClientSnafu, CreateDirSnafu, Result};
pub(crate) async fn new_raw_object_store(
store: &ObjectStoreConfig,
@@ -236,7 +236,8 @@ pub(crate) fn build_http_client(config: &HttpClientConfig) -> Result<HttpClient>
builder.timeout(config.timeout)
};
- HttpClient::build(http_builder).context(error::InitBackendSnafu)
+ let client = http_builder.build().context(BuildHttpClientSnafu)?;
+ Ok(HttpClient::with(client))
}
struct PrintDetailedError;
diff --git a/src/file-engine/src/manifest.rs b/src/file-engine/src/manifest.rs
index 6310c3ccb912..6bf5ee104ba2 100644
--- a/src/file-engine/src/manifest.rs
+++ b/src/file-engine/src/manifest.rs
@@ -46,7 +46,7 @@ impl FileRegionManifest {
pub async fn store(&self, region_dir: &str, object_store: &ObjectStore) -> Result<()> {
let path = ®ion_manifest_path(region_dir);
let exist = object_store
- .is_exist(path)
+ .exists(path)
.await
.context(CheckObjectSnafu { path })?;
ensure!(!exist, ManifestExistsSnafu { path });
diff --git a/src/file-engine/src/region.rs b/src/file-engine/src/region.rs
index a5af6822285e..673d352b1e63 100644
--- a/src/file-engine/src/region.rs
+++ b/src/file-engine/src/region.rs
@@ -130,7 +130,7 @@ mod tests {
assert_eq!(region.metadata.primary_key, vec![1]);
assert!(object_store
- .is_exist("create_region_dir/manifest/_file_manifest")
+ .exists("create_region_dir/manifest/_file_manifest")
.await
.unwrap());
@@ -198,13 +198,13 @@ mod tests {
.unwrap();
assert!(object_store
- .is_exist("drop_region_dir/manifest/_file_manifest")
+ .exists("drop_region_dir/manifest/_file_manifest")
.await
.unwrap());
FileRegion::drop(®ion, &object_store).await.unwrap();
assert!(!object_store
- .is_exist("drop_region_dir/manifest/_file_manifest")
+ .exists("drop_region_dir/manifest/_file_manifest")
.await
.unwrap());
diff --git a/src/metric-engine/src/test_util.rs b/src/metric-engine/src/test_util.rs
index c5f7a2b4a32c..d0f8cf5028e6 100644
--- a/src/metric-engine/src/test_util.rs
+++ b/src/metric-engine/src/test_util.rs
@@ -313,12 +313,12 @@ mod test {
let region_dir = "test_metric_region";
// assert metadata region's dir
let metadata_region_dir = join_dir(region_dir, METADATA_REGION_SUBDIR);
- let exist = object_store.is_exist(&metadata_region_dir).await.unwrap();
+ let exist = object_store.exists(&metadata_region_dir).await.unwrap();
assert!(exist);
// assert data region's dir
let data_region_dir = join_dir(region_dir, DATA_REGION_SUBDIR);
- let exist = object_store.is_exist(&data_region_dir).await.unwrap();
+ let exist = object_store.exists(&data_region_dir).await.unwrap();
assert!(exist);
// check mito engine
diff --git a/src/mito2/src/cache/file_cache.rs b/src/mito2/src/cache/file_cache.rs
index 9e5742ca0410..eb112530cad7 100644
--- a/src/mito2/src/cache/file_cache.rs
+++ b/src/mito2/src/cache/file_cache.rs
@@ -286,7 +286,7 @@ impl FileCache {
}
async fn get_reader(&self, file_path: &str) -> object_store::Result<Option<Reader>> {
- if self.local_store.is_exist(file_path).await? {
+ if self.local_store.exists(file_path).await? {
Ok(Some(self.local_store.reader(file_path).await?))
} else {
Ok(None)
@@ -480,7 +480,7 @@ mod tests {
cache.memory_index.run_pending_tasks().await;
// The file also not exists.
- assert!(!local_store.is_exist(&file_path).await.unwrap());
+ assert!(!local_store.exists(&file_path).await.unwrap());
assert_eq!(0, cache.memory_index.weighted_size());
}
diff --git a/src/mito2/src/engine/create_test.rs b/src/mito2/src/engine/create_test.rs
index 48b04dc86d91..4bcc55934034 100644
--- a/src/mito2/src/engine/create_test.rs
+++ b/src/mito2/src/engine/create_test.rs
@@ -192,12 +192,12 @@ async fn test_engine_create_with_custom_store() {
assert!(object_store_manager
.find("Gcs")
.unwrap()
- .is_exist(region_dir)
+ .exists(region_dir)
.await
.unwrap());
assert!(!object_store_manager
.default_object_store()
- .is_exist(region_dir)
+ .exists(region_dir)
.await
.unwrap());
}
diff --git a/src/mito2/src/engine/drop_test.rs b/src/mito2/src/engine/drop_test.rs
index 7d719f778be9..5d0c5afbf06e 100644
--- a/src/mito2/src/engine/drop_test.rs
+++ b/src/mito2/src/engine/drop_test.rs
@@ -71,7 +71,7 @@ async fn test_engine_drop_region() {
assert!(!env
.get_object_store()
.unwrap()
- .is_exist(&join_path(®ion_dir, DROPPING_MARKER_FILE))
+ .exists(&join_path(®ion_dir, DROPPING_MARKER_FILE))
.await
.unwrap());
@@ -93,7 +93,7 @@ async fn test_engine_drop_region() {
listener.wait().await;
let object_store = env.get_object_store().unwrap();
- assert!(!object_store.is_exist(®ion_dir).await.unwrap());
+ assert!(!object_store.exists(®ion_dir).await.unwrap());
}
#[tokio::test]
@@ -167,13 +167,13 @@ async fn test_engine_drop_region_for_custom_store() {
assert!(object_store_manager
.find("Gcs")
.unwrap()
- .is_exist(&custom_region_dir)
+ .exists(&custom_region_dir)
.await
.unwrap());
assert!(object_store_manager
.find("default")
.unwrap()
- .is_exist(&global_region_dir)
+ .exists(&global_region_dir)
.await
.unwrap());
@@ -190,13 +190,13 @@ async fn test_engine_drop_region_for_custom_store() {
assert!(!object_store_manager
.find("Gcs")
.unwrap()
- .is_exist(&custom_region_dir)
+ .exists(&custom_region_dir)
.await
.unwrap());
assert!(object_store_manager
.find("default")
.unwrap()
- .is_exist(&global_region_dir)
+ .exists(&global_region_dir)
.await
.unwrap());
}
diff --git a/src/mito2/src/engine/open_test.rs b/src/mito2/src/engine/open_test.rs
index 6752bbd04b12..a3b51514c287 100644
--- a/src/mito2/src/engine/open_test.rs
+++ b/src/mito2/src/engine/open_test.rs
@@ -228,13 +228,13 @@ async fn test_engine_region_open_with_custom_store() {
let object_store_manager = env.get_object_store_manager().unwrap();
assert!(!object_store_manager
.default_object_store()
- .is_exist(region.access_layer.region_dir())
+ .exists(region.access_layer.region_dir())
.await
.unwrap());
assert!(object_store_manager
.find("Gcs")
.unwrap()
- .is_exist(region.access_layer.region_dir())
+ .exists(region.access_layer.region_dir())
.await
.unwrap());
}
diff --git a/src/mito2/src/manifest/tests/checkpoint.rs b/src/mito2/src/manifest/tests/checkpoint.rs
index 692f40422b17..6f2c92bc5e09 100644
--- a/src/mito2/src/manifest/tests/checkpoint.rs
+++ b/src/mito2/src/manifest/tests/checkpoint.rs
@@ -84,6 +84,7 @@ async fn manager_without_checkpoint() {
// check files
let mut expected = vec![
+ "/",
"00000000000000000010.json",
"00000000000000000009.json",
"00000000000000000008.json",
@@ -130,6 +131,7 @@ async fn manager_with_checkpoint_distance_1() {
// check files
let mut expected = vec![
+ "/",
"00000000000000000009.checkpoint",
"00000000000000000010.checkpoint",
"00000000000000000010.json",
diff --git a/src/mito2/src/sst/file_purger.rs b/src/mito2/src/sst/file_purger.rs
index 76c7a7150328..81251c91a564 100644
--- a/src/mito2/src/sst/file_purger.rs
+++ b/src/mito2/src/sst/file_purger.rs
@@ -185,7 +185,7 @@ mod tests {
scheduler.stop(true).await.unwrap();
- assert!(!object_store.is_exist(&path).await.unwrap());
+ assert!(!object_store.exists(&path).await.unwrap());
}
#[tokio::test]
@@ -247,7 +247,7 @@ mod tests {
scheduler.stop(true).await.unwrap();
- assert!(!object_store.is_exist(&path).await.unwrap());
- assert!(!object_store.is_exist(&index_path).await.unwrap());
+ assert!(!object_store.exists(&path).await.unwrap());
+ assert!(!object_store.exists(&index_path).await.unwrap());
}
}
diff --git a/src/mito2/src/worker/handle_open.rs b/src/mito2/src/worker/handle_open.rs
index d4a13a134597..01eaf1765224 100644
--- a/src/mito2/src/worker/handle_open.rs
+++ b/src/mito2/src/worker/handle_open.rs
@@ -51,7 +51,7 @@ impl<S: LogStore> RegionWorkerLoop<S> {
// Check if this region is pending drop. And clean the entire dir if so.
if !self.dropping_regions.is_region_exists(region_id)
&& object_store
- .is_exist(&join_path(&request.region_dir, DROPPING_MARKER_FILE))
+ .exists(&join_path(&request.region_dir, DROPPING_MARKER_FILE))
.await
.context(OpenDalSnafu)?
{
diff --git a/src/object-store/Cargo.toml b/src/object-store/Cargo.toml
index 72e0e2bfbe46..b82be7376a72 100644
--- a/src/object-store/Cargo.toml
+++ b/src/object-store/Cargo.toml
@@ -17,8 +17,9 @@ futures.workspace = true
lazy_static.workspace = true
md5 = "0.7"
moka = { workspace = true, features = ["future"] }
-opendal = { version = "0.49", features = [
+opendal = { version = "0.50", features = [
"layers-tracing",
+ "layers-prometheus",
"services-azblob",
"services-fs",
"services-gcs",
diff --git a/src/object-store/src/layers.rs b/src/object-store/src/layers.rs
index b2145aa6b0e5..20108ab63c52 100644
--- a/src/object-store/src/layers.rs
+++ b/src/object-store/src/layers.rs
@@ -13,8 +13,37 @@
// limitations under the License.
mod lru_cache;
-mod prometheus;
pub use lru_cache::*;
pub use opendal::layers::*;
-pub use prometheus::PrometheusMetricsLayer;
+pub use prometheus::build_prometheus_metrics_layer;
+
+mod prometheus {
+ use std::sync::{Mutex, OnceLock};
+
+ use opendal::layers::PrometheusLayer;
+
+ static PROMETHEUS_LAYER: OnceLock<Mutex<PrometheusLayer>> = OnceLock::new();
+
+ pub fn build_prometheus_metrics_layer(with_path_label: bool) -> PrometheusLayer {
+ PROMETHEUS_LAYER
+ .get_or_init(|| {
+ // This logical tries to extract parent path from the object storage operation
+ // the function also relies on assumption that the region path is built from
+ // pattern `<data|index>/catalog/schema/table_id/....`
+ //
+ // We'll get the data/catalog/schema from path.
+ let path_level = if with_path_label { 3 } else { 0 };
+
+ let layer = PrometheusLayer::builder()
+ .path_label(path_level)
+ .register_default()
+ .unwrap();
+
+ Mutex::new(layer)
+ })
+ .lock()
+ .unwrap()
+ .clone()
+ }
+}
diff --git a/src/object-store/src/layers/lru_cache/read_cache.rs b/src/object-store/src/layers/lru_cache/read_cache.rs
index f88b36784d15..874b17280d9c 100644
--- a/src/object-store/src/layers/lru_cache/read_cache.rs
+++ b/src/object-store/src/layers/lru_cache/read_cache.rs
@@ -156,9 +156,12 @@ impl<C: Access> ReadCache<C> {
let size = entry.metadata().content_length();
OBJECT_STORE_LRU_CACHE_ENTRIES.inc();
OBJECT_STORE_LRU_CACHE_BYTES.add(size as i64);
- self.mem_cache
- .insert(read_key.to_string(), ReadResult::Success(size as u32))
- .await;
+ // ignore root path
+ if entry.path() != "/" {
+ self.mem_cache
+ .insert(read_key.to_string(), ReadResult::Success(size as u32))
+ .await;
+ }
}
Ok(self.cache_stat().await)
diff --git a/src/object-store/src/layers/prometheus.rs b/src/object-store/src/layers/prometheus.rs
deleted file mode 100644
index fef83a91468a..000000000000
--- a/src/object-store/src/layers/prometheus.rs
+++ /dev/null
@@ -1,584 +0,0 @@
-// Copyright 2023 Greptime Team
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//! code originally from <https://github.com/apache/incubator-opendal/blob/main/core/src/layers/prometheus.rs>, make a tiny change to avoid crash in multi thread env
-
-use std::fmt::{Debug, Formatter};
-
-use common_telemetry::debug;
-use lazy_static::lazy_static;
-use opendal::raw::*;
-use opendal::{Buffer, ErrorKind};
-use prometheus::{
- exponential_buckets, histogram_opts, register_histogram_vec, register_int_counter_vec,
- Histogram, HistogramTimer, HistogramVec, IntCounterVec,
-};
-
-use crate::util::extract_parent_path;
-
-type Result<T> = std::result::Result<T, opendal::Error>;
-
-lazy_static! {
- static ref REQUESTS_TOTAL: IntCounterVec = register_int_counter_vec!(
- "opendal_requests_total",
- "Total times of all kinds of operation being called",
- &["scheme", "operation", "path"],
- )
- .unwrap();
- static ref REQUESTS_DURATION_SECONDS: HistogramVec = register_histogram_vec!(
- histogram_opts!(
- "opendal_requests_duration_seconds",
- "Histogram of the time spent on specific operation",
- exponential_buckets(0.01, 2.0, 16).unwrap()
- ),
- &["scheme", "operation", "path"]
- )
- .unwrap();
- static ref BYTES_TOTAL: HistogramVec = register_histogram_vec!(
- histogram_opts!(
- "opendal_bytes_total",
- "Total size of sync or async Read/Write",
- exponential_buckets(0.01, 2.0, 16).unwrap()
- ),
- &["scheme", "operation", "path"]
- )
- .unwrap();
-}
-
-#[inline]
-fn increment_errors_total(op: Operation, kind: ErrorKind) {
- debug!(
- "Prometheus statistics metrics error, operation {} error {}",
- op.into_static(),
- kind.into_static()
- );
-}
-
-/// Please refer to [prometheus](https://docs.rs/prometheus) for every operation.
-///
-/// # Prometheus Metrics
-///
-/// In this section, we will introduce three metrics that are currently being exported by opendal. These metrics are essential for understanding the behavior and performance of opendal.
-///
-///
-/// | Metric Name | Type | Description | Labels |
-/// |-----------------------------------|-----------|------------------------------------------------------|---------------------|
-/// | opendal_requests_total | Counter | Total times of all kinds of operation being called | scheme, operation |
-/// | opendal_requests_duration_seconds | Histogram | Histogram of the time spent on specific operation | scheme, operation |
-/// | opendal_bytes_total | Histogram | Total size of sync or async Read/Write | scheme, operation |
-///
-/// For a more detailed explanation of these metrics and how they are used, please refer to the [Prometheus documentation](https://prometheus.io/docs/introduction/overview/).
-///
-/// # Histogram Configuration
-///
-/// The metric buckets for these histograms are automatically generated based on the `exponential_buckets(0.01, 2.0, 16)` configuration.
-#[derive(Default, Debug, Clone)]
-pub struct PrometheusMetricsLayer {
- pub path_label: bool,
-}
-
-impl PrometheusMetricsLayer {
- pub fn new(path_label: bool) -> Self {
- Self { path_label }
- }
-}
-
-impl<A: Access> Layer<A> for PrometheusMetricsLayer {
- type LayeredAccess = PrometheusAccess<A>;
-
- fn layer(&self, inner: A) -> Self::LayeredAccess {
- let meta = inner.info();
- let scheme = meta.scheme();
-
- PrometheusAccess {
- inner,
- scheme: scheme.to_string(),
- path_label: self.path_label,
- }
- }
-}
-
-#[derive(Clone)]
-pub struct PrometheusAccess<A: Access> {
- inner: A,
- scheme: String,
- path_label: bool,
-}
-
-impl<A: Access> PrometheusAccess<A> {
- fn get_path_label<'a>(&self, path: &'a str) -> &'a str {
- if self.path_label {
- extract_parent_path(path)
- } else {
- ""
- }
- }
-}
-
-impl<A: Access> Debug for PrometheusAccess<A> {
- fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
- f.debug_struct("PrometheusAccessor")
- .field("inner", &self.inner)
- .finish_non_exhaustive()
- }
-}
-
-impl<A: Access> LayeredAccess for PrometheusAccess<A> {
- type Inner = A;
- type Reader = PrometheusMetricWrapper<A::Reader>;
- type BlockingReader = PrometheusMetricWrapper<A::BlockingReader>;
- type Writer = PrometheusMetricWrapper<A::Writer>;
- type BlockingWriter = PrometheusMetricWrapper<A::BlockingWriter>;
- type Lister = A::Lister;
- type BlockingLister = A::BlockingLister;
-
- fn inner(&self) -> &Self::Inner {
- &self.inner
- }
-
- async fn create_dir(&self, path: &str, args: OpCreateDir) -> Result<RpCreateDir> {
- let path_label = self.get_path_label(path);
- REQUESTS_TOTAL
- .with_label_values(&[&self.scheme, Operation::CreateDir.into_static(), path_label])
- .inc();
-
- let timer = REQUESTS_DURATION_SECONDS
- .with_label_values(&[&self.scheme, Operation::CreateDir.into_static(), path_label])
- .start_timer();
- let create_res = self.inner.create_dir(path, args).await;
-
- timer.observe_duration();
- create_res.inspect_err(|e| {
- increment_errors_total(Operation::CreateDir, e.kind());
- })
- }
-
- async fn read(&self, path: &str, args: OpRead) -> Result<(RpRead, Self::Reader)> {
- let path_label = self.get_path_label(path);
- REQUESTS_TOTAL
- .with_label_values(&[&self.scheme, Operation::Read.into_static(), path_label])
- .inc();
-
- let timer = REQUESTS_DURATION_SECONDS
- .with_label_values(&[&self.scheme, Operation::Read.into_static(), path_label])
- .start_timer();
-
- let (rp, r) = self.inner.read(path, args).await.inspect_err(|e| {
- increment_errors_total(Operation::Read, e.kind());
- })?;
-
- Ok((
- rp,
- PrometheusMetricWrapper::new(
- r,
- Operation::Read,
- BYTES_TOTAL.with_label_values(&[
- &self.scheme,
- Operation::Read.into_static(),
- path_label,
- ]),
- timer,
- ),
- ))
- }
-
- async fn write(&self, path: &str, args: OpWrite) -> Result<(RpWrite, Self::Writer)> {
- let path_label = self.get_path_label(path);
- REQUESTS_TOTAL
- .with_label_values(&[&self.scheme, Operation::Write.into_static(), path_label])
- .inc();
-
- let timer = REQUESTS_DURATION_SECONDS
- .with_label_values(&[&self.scheme, Operation::Write.into_static(), path_label])
- .start_timer();
-
- let (rp, r) = self.inner.write(path, args).await.inspect_err(|e| {
- increment_errors_total(Operation::Write, e.kind());
- })?;
-
- Ok((
- rp,
- PrometheusMetricWrapper::new(
- r,
- Operation::Write,
- BYTES_TOTAL.with_label_values(&[
- &self.scheme,
- Operation::Write.into_static(),
- path_label,
- ]),
- timer,
- ),
- ))
- }
-
- async fn stat(&self, path: &str, args: OpStat) -> Result<RpStat> {
- let path_label = self.get_path_label(path);
- REQUESTS_TOTAL
- .with_label_values(&[&self.scheme, Operation::Stat.into_static(), path_label])
- .inc();
- let timer = REQUESTS_DURATION_SECONDS
- .with_label_values(&[&self.scheme, Operation::Stat.into_static(), path_label])
- .start_timer();
-
- let stat_res = self.inner.stat(path, args).await;
- timer.observe_duration();
- stat_res.inspect_err(|e| {
- increment_errors_total(Operation::Stat, e.kind());
- })
- }
-
- async fn delete(&self, path: &str, args: OpDelete) -> Result<RpDelete> {
- let path_label = self.get_path_label(path);
- REQUESTS_TOTAL
- .with_label_values(&[&self.scheme, Operation::Delete.into_static(), path_label])
- .inc();
-
- let timer = REQUESTS_DURATION_SECONDS
- .with_label_values(&[&self.scheme, Operation::Delete.into_static(), path_label])
- .start_timer();
-
- let delete_res = self.inner.delete(path, args).await;
- timer.observe_duration();
- delete_res.inspect_err(|e| {
- increment_errors_total(Operation::Delete, e.kind());
- })
- }
-
- async fn list(&self, path: &str, args: OpList) -> Result<(RpList, Self::Lister)> {
- let path_label = self.get_path_label(path);
- REQUESTS_TOTAL
- .with_label_values(&[&self.scheme, Operation::List.into_static(), path_label])
- .inc();
-
- let timer = REQUESTS_DURATION_SECONDS
- .with_label_values(&[&self.scheme, Operation::List.into_static(), path_label])
- .start_timer();
-
- let list_res = self.inner.list(path, args).await;
-
- timer.observe_duration();
- list_res.inspect_err(|e| {
- increment_errors_total(Operation::List, e.kind());
- })
- }
-
- async fn batch(&self, args: OpBatch) -> Result<RpBatch> {
- REQUESTS_TOTAL
- .with_label_values(&[&self.scheme, Operation::Batch.into_static(), ""])
- .inc();
-
- let timer = REQUESTS_DURATION_SECONDS
- .with_label_values(&[&self.scheme, Operation::Batch.into_static(), ""])
- .start_timer();
- let result = self.inner.batch(args).await;
-
- timer.observe_duration();
- result.inspect_err(|e| {
- increment_errors_total(Operation::Batch, e.kind());
- })
- }
-
- async fn presign(&self, path: &str, args: OpPresign) -> Result<RpPresign> {
- let path_label = self.get_path_label(path);
- REQUESTS_TOTAL
- .with_label_values(&[&self.scheme, Operation::Presign.into_static(), path_label])
- .inc();
-
- let timer = REQUESTS_DURATION_SECONDS
- .with_label_values(&[&self.scheme, Operation::Presign.into_static(), path_label])
- .start_timer();
- let result = self.inner.presign(path, args).await;
- timer.observe_duration();
-
- result.inspect_err(|e| {
- increment_errors_total(Operation::Presign, e.kind());
- })
- }
-
- fn blocking_create_dir(&self, path: &str, args: OpCreateDir) -> Result<RpCreateDir> {
- let path_label = self.get_path_label(path);
- REQUESTS_TOTAL
- .with_label_values(&[
- &self.scheme,
- Operation::BlockingCreateDir.into_static(),
- path_label,
- ])
- .inc();
-
- let timer = REQUESTS_DURATION_SECONDS
- .with_label_values(&[
- &self.scheme,
- Operation::BlockingCreateDir.into_static(),
- path_label,
- ])
- .start_timer();
- let result = self.inner.blocking_create_dir(path, args);
-
- timer.observe_duration();
-
- result.inspect_err(|e| {
- increment_errors_total(Operation::BlockingCreateDir, e.kind());
- })
- }
-
- fn blocking_read(&self, path: &str, args: OpRead) -> Result<(RpRead, Self::BlockingReader)> {
- let path_label = self.get_path_label(path);
- REQUESTS_TOTAL
- .with_label_values(&[
- &self.scheme,
- Operation::BlockingRead.into_static(),
- path_label,
- ])
- .inc();
-
- let timer = REQUESTS_DURATION_SECONDS
- .with_label_values(&[
- &self.scheme,
- Operation::BlockingRead.into_static(),
- path_label,
- ])
- .start_timer();
-
- self.inner
- .blocking_read(path, args)
- .map(|(rp, r)| {
- (
- rp,
- PrometheusMetricWrapper::new(
- r,
- Operation::BlockingRead,
- BYTES_TOTAL.with_label_values(&[
- &self.scheme,
- Operation::BlockingRead.into_static(),
- path_label,
- ]),
- timer,
- ),
- )
- })
- .inspect_err(|e| {
- increment_errors_total(Operation::BlockingRead, e.kind());
- })
- }
-
- fn blocking_write(&self, path: &str, args: OpWrite) -> Result<(RpWrite, Self::BlockingWriter)> {
- let path_label = self.get_path_label(path);
- REQUESTS_TOTAL
- .with_label_values(&[
- &self.scheme,
- Operation::BlockingWrite.into_static(),
- path_label,
- ])
- .inc();
-
- let timer = REQUESTS_DURATION_SECONDS
- .with_label_values(&[
- &self.scheme,
- Operation::BlockingWrite.into_static(),
- path_label,
- ])
- .start_timer();
-
- self.inner
- .blocking_write(path, args)
- .map(|(rp, r)| {
- (
- rp,
- PrometheusMetricWrapper::new(
- r,
- Operation::BlockingWrite,
- BYTES_TOTAL.with_label_values(&[
- &self.scheme,
- Operation::BlockingWrite.into_static(),
- path_label,
- ]),
- timer,
- ),
- )
- })
- .inspect_err(|e| {
- increment_errors_total(Operation::BlockingWrite, e.kind());
- })
- }
-
- fn blocking_stat(&self, path: &str, args: OpStat) -> Result<RpStat> {
- let path_label = self.get_path_label(path);
- REQUESTS_TOTAL
- .with_label_values(&[
- &self.scheme,
- Operation::BlockingStat.into_static(),
- path_label,
- ])
- .inc();
-
- let timer = REQUESTS_DURATION_SECONDS
- .with_label_values(&[
- &self.scheme,
- Operation::BlockingStat.into_static(),
- path_label,
- ])
- .start_timer();
- let result = self.inner.blocking_stat(path, args);
- timer.observe_duration();
- result.inspect_err(|e| {
- increment_errors_total(Operation::BlockingStat, e.kind());
- })
- }
-
- fn blocking_delete(&self, path: &str, args: OpDelete) -> Result<RpDelete> {
- let path_label = self.get_path_label(path);
- REQUESTS_TOTAL
- .with_label_values(&[
- &self.scheme,
- Operation::BlockingDelete.into_static(),
- path_label,
- ])
- .inc();
-
- let timer = REQUESTS_DURATION_SECONDS
- .with_label_values(&[
- &self.scheme,
- Operation::BlockingDelete.into_static(),
- path_label,
- ])
- .start_timer();
- let result = self.inner.blocking_delete(path, args);
- timer.observe_duration();
-
- result.inspect_err(|e| {
- increment_errors_total(Operation::BlockingDelete, e.kind());
- })
- }
-
- fn blocking_list(&self, path: &str, args: OpList) -> Result<(RpList, Self::BlockingLister)> {
- let path_label = self.get_path_label(path);
- REQUESTS_TOTAL
- .with_label_values(&[
- &self.scheme,
- Operation::BlockingList.into_static(),
- path_label,
- ])
- .inc();
-
- let timer = REQUESTS_DURATION_SECONDS
- .with_label_values(&[
- &self.scheme,
- Operation::BlockingList.into_static(),
- path_label,
- ])
- .start_timer();
- let result = self.inner.blocking_list(path, args);
- timer.observe_duration();
-
- result.inspect_err(|e| {
- increment_errors_total(Operation::BlockingList, e.kind());
- })
- }
-}
-
-pub struct PrometheusMetricWrapper<R> {
- inner: R,
-
- op: Operation,
- bytes_counter: Histogram,
- _requests_duration_timer: HistogramTimer,
- bytes: u64,
-}
-
-impl<R> Drop for PrometheusMetricWrapper<R> {
- fn drop(&mut self) {
- self.bytes_counter.observe(self.bytes as f64);
- }
-}
-
-impl<R> PrometheusMetricWrapper<R> {
- fn new(
- inner: R,
- op: Operation,
- bytes_counter: Histogram,
- requests_duration_timer: HistogramTimer,
- ) -> Self {
- Self {
- inner,
- op,
- bytes_counter,
- _requests_duration_timer: requests_duration_timer,
- bytes: 0,
- }
- }
-}
-
-impl<R: oio::Read> oio::Read for PrometheusMetricWrapper<R> {
- async fn read(&mut self) -> Result<Buffer> {
- self.inner.read().await.inspect_err(|err| {
- increment_errors_total(self.op, err.kind());
- })
- }
-}
-
-impl<R: oio::BlockingRead> oio::BlockingRead for PrometheusMetricWrapper<R> {
- fn read(&mut self) -> opendal::Result<Buffer> {
- self.inner.read().inspect_err(|err| {
- increment_errors_total(self.op, err.kind());
- })
- }
-}
-
-impl<R: oio::Write> oio::Write for PrometheusMetricWrapper<R> {
- async fn write(&mut self, bs: Buffer) -> Result<()> {
- let bytes = bs.len();
- match self.inner.write(bs).await {
- Ok(_) => {
- self.bytes += bytes as u64;
- Ok(())
- }
- Err(err) => {
- increment_errors_total(self.op, err.kind());
- Err(err)
- }
- }
- }
-
- async fn close(&mut self) -> Result<()> {
- self.inner.close().await.inspect_err(|err| {
- increment_errors_total(self.op, err.kind());
- })
- }
-
- async fn abort(&mut self) -> Result<()> {
- self.inner.close().await.inspect_err(|err| {
- increment_errors_total(self.op, err.kind());
- })
- }
-}
-
-impl<R: oio::BlockingWrite> oio::BlockingWrite for PrometheusMetricWrapper<R> {
- fn write(&mut self, bs: Buffer) -> Result<()> {
- let bytes = bs.len();
- self.inner
- .write(bs)
- .map(|_| {
- self.bytes += bytes as u64;
- })
- .inspect_err(|err| {
- increment_errors_total(self.op, err.kind());
- })
- }
-
- fn close(&mut self) -> Result<()> {
- self.inner.close().inspect_err(|err| {
- increment_errors_total(self.op, err.kind());
- })
- }
-}
diff --git a/src/object-store/src/util.rs b/src/object-store/src/util.rs
index fc0a031ab953..271da33e853c 100644
--- a/src/object-store/src/util.rs
+++ b/src/object-store/src/util.rs
@@ -15,19 +15,12 @@
use std::fmt::Display;
use common_telemetry::{debug, error, trace};
-use futures::TryStreamExt;
use opendal::layers::{LoggingInterceptor, LoggingLayer, TracingLayer};
use opendal::raw::{AccessorInfo, Operation};
-use opendal::{Entry, ErrorKind, Lister};
+use opendal::ErrorKind;
-use crate::layers::PrometheusMetricsLayer;
use crate::ObjectStore;
-/// Collect all entries from the [Lister].
-pub async fn collect(stream: Lister) -> Result<Vec<Entry>, opendal::Error> {
- stream.try_collect::<Vec<_>>().await
-}
-
/// Join two paths and normalize the output dir.
///
/// The output dir is always ends with `/`. e.g.
@@ -127,26 +120,12 @@ pub fn normalize_path(path: &str) -> String {
p
}
-// This logical tries to extract parent path from the object storage operation
-// the function also relies on assumption that the region path is built from
-// pattern `<data|index>/catalog/schema/table_id/....`
-//
-// this implementation tries to extract at most 3 levels of parent path
-pub(crate) fn extract_parent_path(path: &str) -> &str {
- // split the path into `catalog`, `schema` and others
- path.char_indices()
- .filter(|&(_, c)| c == '/')
- // we get the data/catalog/schema from path, split at the 3rd /
- .nth(2)
- .map_or(path, |(i, _)| &path[..i])
-}
-
/// Attaches instrument layers to the object store.
pub fn with_instrument_layers(object_store: ObjectStore, path_label: bool) -> ObjectStore {
object_store
.layer(LoggingLayer::new(DefaultLoggingInterceptor))
.layer(TracingLayer)
- .layer(PrometheusMetricsLayer::new(path_label))
+ .layer(crate::layers::build_prometheus_metrics_layer(path_label))
}
static LOGGING_TARGET: &str = "opendal::services";
@@ -263,28 +242,4 @@ mod tests {
assert_eq!("/abc", join_path("//", "/abc"));
assert_eq!("abc/def", join_path("abc/", "//def"));
}
-
- #[test]
- fn test_path_extraction() {
- assert_eq!(
- "data/greptime/public",
- extract_parent_path("data/greptime/public/1024/1024_0000000000/")
- );
-
- assert_eq!(
- "data/greptime/public",
- extract_parent_path("data/greptime/public/1/")
- );
-
- assert_eq!(
- "data/greptime/public",
- extract_parent_path("data/greptime/public")
- );
-
- assert_eq!("data/greptime/", extract_parent_path("data/greptime/"));
-
- assert_eq!("data/", extract_parent_path("data/"));
-
- assert_eq!("/", extract_parent_path("/"));
- }
}
diff --git a/src/object-store/tests/object_store_test.rs b/src/object-store/tests/object_store_test.rs
index 497decffabfc..7e81b965fbed 100644
--- a/src/object-store/tests/object_store_test.rs
+++ b/src/object-store/tests/object_store_test.rs
@@ -65,23 +65,38 @@ async fn test_object_list(store: &ObjectStore) -> Result<()> {
store.write(p3, "Hello, object3!").await?;
// List objects
- let entries = store.list("/").await?;
+ let entries = store
+ .list("/")
+ .await?
+ .into_iter()
+ .filter(|x| x.metadata().mode() == EntryMode::FILE)
+ .collect::<Vec<_>>();
assert_eq!(3, entries.len());
store.delete(p1).await?;
store.delete(p3).await?;
// List objects again
- // Only o2 is exists
- let entries = store.list("/").await?;
+ // Only o2 and root exist
+ let entries = store
+ .list("/")
+ .await?
+ .into_iter()
+ .filter(|x| x.metadata().mode() == EntryMode::FILE)
+ .collect::<Vec<_>>();
assert_eq!(1, entries.len());
- assert_eq!(p2, entries.first().unwrap().path());
+ assert_eq!(p2, entries[0].path());
let content = store.read(p2).await?;
assert_eq!("Hello, object2!", String::from_utf8(content.to_vec())?);
store.delete(p2).await?;
- let entries = store.list("/").await?;
+ let entries = store
+ .list("/")
+ .await?
+ .into_iter()
+ .filter(|x| x.metadata().mode() == EntryMode::FILE)
+ .collect::<Vec<_>>();
assert!(entries.is_empty());
assert!(store.read(p1).await.is_err());
@@ -252,7 +267,7 @@ async fn test_file_backend_with_lru_cache() -> Result<()> {
async fn assert_lru_cache<C: Access>(cache_layer: &LruCacheLayer<C>, file_names: &[&str]) {
for file_name in file_names {
- assert!(cache_layer.contains_file(file_name).await);
+ assert!(cache_layer.contains_file(file_name).await, "{file_name}");
}
}
@@ -264,7 +279,9 @@ async fn assert_cache_files<C: Access>(
let (_, mut lister) = store.list("/", OpList::default()).await?;
let mut objects = vec![];
while let Some(e) = lister.next().await? {
- objects.push(e);
+ if e.mode() == EntryMode::FILE {
+ objects.push(e);
+ }
}
// compare the cache file with the expected cache file; ignore orders
@@ -332,9 +349,9 @@ async fn test_object_store_cache_policy() -> Result<()> {
assert_cache_files(
&cache_store,
&[
- "6d29752bdc6e4d5ba5483b96615d6c48.cache-bytes=0-14",
- "ecfe0dce85de452eb0a325158e7bfb75.cache-bytes=7-14",
- "ecfe0dce85de452eb0a325158e7bfb75.cache-bytes=0-14",
+ "6d29752bdc6e4d5ba5483b96615d6c48.cache-bytes=0-",
+ "ecfe0dce85de452eb0a325158e7bfb75.cache-bytes=7-",
+ "ecfe0dce85de452eb0a325158e7bfb75.cache-bytes=0-",
],
&["Hello, object1!", "object2!", "Hello, object2!"],
)
@@ -342,9 +359,9 @@ async fn test_object_store_cache_policy() -> Result<()> {
assert_lru_cache(
&cache_layer,
&[
- "6d29752bdc6e4d5ba5483b96615d6c48.cache-bytes=0-14",
- "ecfe0dce85de452eb0a325158e7bfb75.cache-bytes=7-14",
- "ecfe0dce85de452eb0a325158e7bfb75.cache-bytes=0-14",
+ "6d29752bdc6e4d5ba5483b96615d6c48.cache-bytes=0-",
+ "ecfe0dce85de452eb0a325158e7bfb75.cache-bytes=7-",
+ "ecfe0dce85de452eb0a325158e7bfb75.cache-bytes=0-",
],
)
.await;
@@ -355,13 +372,13 @@ async fn test_object_store_cache_policy() -> Result<()> {
assert_eq!(cache_layer.read_cache_stat().await, (1, 15));
assert_cache_files(
&cache_store,
- &["6d29752bdc6e4d5ba5483b96615d6c48.cache-bytes=0-14"],
+ &["6d29752bdc6e4d5ba5483b96615d6c48.cache-bytes=0-"],
&["Hello, object1!"],
)
.await?;
assert_lru_cache(
&cache_layer,
- &["6d29752bdc6e4d5ba5483b96615d6c48.cache-bytes=0-14"],
+ &["6d29752bdc6e4d5ba5483b96615d6c48.cache-bytes=0-"],
)
.await;
@@ -388,8 +405,8 @@ async fn test_object_store_cache_policy() -> Result<()> {
assert_cache_files(
&cache_store,
&[
- "6d29752bdc6e4d5ba5483b96615d6c48.cache-bytes=0-14",
- "a8b1dc21e24bb55974e3e68acc77ed52.cache-bytes=0-14",
+ "6d29752bdc6e4d5ba5483b96615d6c48.cache-bytes=0-",
+ "a8b1dc21e24bb55974e3e68acc77ed52.cache-bytes=0-",
"a8b1dc21e24bb55974e3e68acc77ed52.cache-bytes=0-4",
],
&["Hello, object1!", "Hello, object3!", "Hello"],
@@ -398,8 +415,8 @@ async fn test_object_store_cache_policy() -> Result<()> {
assert_lru_cache(
&cache_layer,
&[
- "6d29752bdc6e4d5ba5483b96615d6c48.cache-bytes=0-14",
- "a8b1dc21e24bb55974e3e68acc77ed52.cache-bytes=0-14",
+ "6d29752bdc6e4d5ba5483b96615d6c48.cache-bytes=0-",
+ "a8b1dc21e24bb55974e3e68acc77ed52.cache-bytes=0-",
"a8b1dc21e24bb55974e3e68acc77ed52.cache-bytes=0-4",
],
)
@@ -416,7 +433,7 @@ async fn test_object_store_cache_policy() -> Result<()> {
&cache_store,
&[
"6d29752bdc6e4d5ba5483b96615d6c48.cache-bytes=1-14",
- "a8b1dc21e24bb55974e3e68acc77ed52.cache-bytes=0-14",
+ "a8b1dc21e24bb55974e3e68acc77ed52.cache-bytes=0-",
"a8b1dc21e24bb55974e3e68acc77ed52.cache-bytes=0-4",
],
&["ello, object1!", "Hello, object3!", "Hello"],
@@ -426,7 +443,7 @@ async fn test_object_store_cache_policy() -> Result<()> {
&cache_layer,
&[
"6d29752bdc6e4d5ba5483b96615d6c48.cache-bytes=1-14",
- "a8b1dc21e24bb55974e3e68acc77ed52.cache-bytes=0-14",
+ "a8b1dc21e24bb55974e3e68acc77ed52.cache-bytes=0-",
"a8b1dc21e24bb55974e3e68acc77ed52.cache-bytes=0-4",
],
)
@@ -448,7 +465,7 @@ async fn test_object_store_cache_policy() -> Result<()> {
&cache_layer,
&[
"6d29752bdc6e4d5ba5483b96615d6c48.cache-bytes=1-14",
- "a8b1dc21e24bb55974e3e68acc77ed52.cache-bytes=0-14",
+ "a8b1dc21e24bb55974e3e68acc77ed52.cache-bytes=0-",
"a8b1dc21e24bb55974e3e68acc77ed52.cache-bytes=0-4",
],
)
|
feat
|
bump opendal and switch prometheus layer to the upstream impl (#5179)
|
f34a99ff5a03a41728d93faa9b5dc3892edc9f1b
|
2022-11-08 08:39:46
|
LFC
|
feat: use regex to filter out not supported MySQL stmt (#396)
| false
|
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 2dbc9c6e6199..83a064a734d7 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -9,7 +9,7 @@ repos:
rev: e6a795bc6b2c0958f9ef52af4863bbd7cc17238f
hooks:
- id: cargo-sort
- args: ["--workspace"]
+ args: ["--workspace", "--print"]
- repo: https://github.com/doublify/pre-commit-rust
rev: v1.0
diff --git a/Cargo.lock b/Cargo.lock
index f21f03eafd88..beec8ed491a7 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -3350,9 +3350,9 @@ dependencies = [
[[package]]
name = "once_cell"
-version = "1.15.0"
+version = "1.16.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e82dad04139b71a90c080c8463fe0dc7902db5192d939bd0950f074d014339e1"
+checksum = "86f0b0d4bf799edbc74508c1e8bf170ff5f41238e5f8225603ca7caaae2b7860"
[[package]]
name = "oorandom"
@@ -5030,12 +5030,14 @@ dependencies = [
"metrics",
"mysql_async",
"num_cpus",
+ "once_cell",
"openmetrics-parser",
"opensrv-mysql",
"pgwire",
"prost 0.11.0",
"query",
"rand 0.8.5",
+ "regex",
"schemars",
"script",
"serde",
diff --git a/src/common/recordbatch/src/lib.rs b/src/common/recordbatch/src/lib.rs
index 5b93b7a5df6c..f641365b83c0 100644
--- a/src/common/recordbatch/src/lib.rs
+++ b/src/common/recordbatch/src/lib.rs
@@ -4,9 +4,12 @@ mod recordbatch;
pub mod util;
use std::pin::Pin;
+use std::sync::Arc;
+use datafusion::arrow_print;
pub use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
-use datatypes::schema::SchemaRef;
+use datatypes::prelude::VectorRef;
+use datatypes::schema::{Schema, SchemaRef};
use error::Result;
use futures::task::{Context, Poll};
use futures::Stream;
@@ -54,6 +57,35 @@ pub struct RecordBatches {
}
impl RecordBatches {
+ pub fn try_from_columns<I: IntoIterator<Item = VectorRef>>(
+ schema: SchemaRef,
+ columns: I,
+ ) -> Result<Self> {
+ let batches = vec![RecordBatch::new(schema.clone(), columns)?];
+ Ok(Self { schema, batches })
+ }
+
+ #[inline]
+ pub fn empty() -> Self {
+ Self {
+ schema: Arc::new(Schema::new(vec![])),
+ batches: vec![],
+ }
+ }
+
+ pub fn iter(&self) -> impl Iterator<Item = &RecordBatch> {
+ self.batches.iter()
+ }
+
+ pub fn pretty_print(&self) -> String {
+ arrow_print::write(
+ &self
+ .iter()
+ .map(|x| x.df_recordbatch.clone())
+ .collect::<Vec<_>>(),
+ )
+ }
+
pub fn try_new(schema: SchemaRef, batches: Vec<RecordBatch>) -> Result<Self> {
for batch in batches.iter() {
ensure!(
@@ -124,7 +156,26 @@ mod tests {
use super::*;
#[test]
- fn test_recordbatches() {
+ fn test_recordbatches_try_from_columns() {
+ let schema = Arc::new(Schema::new(vec![ColumnSchema::new(
+ "a",
+ ConcreteDataType::int32_datatype(),
+ false,
+ )]));
+ let result = RecordBatches::try_from_columns(
+ schema.clone(),
+ vec![Arc::new(StringVector::from(vec!["hello", "world"])) as _],
+ );
+ assert!(result.is_err());
+
+ let v: VectorRef = Arc::new(Int32Vector::from_slice(&[1, 2]));
+ let expected = vec![RecordBatch::new(schema.clone(), vec![v.clone()]).unwrap()];
+ let r = RecordBatches::try_from_columns(schema, vec![v]).unwrap();
+ assert_eq!(r.take(), expected);
+ }
+
+ #[test]
+ fn test_recordbatches_try_new() {
let column_a = ColumnSchema::new("a", ConcreteDataType::int32_datatype(), false);
let column_b = ColumnSchema::new("b", ConcreteDataType::string_datatype(), false);
let column_c = ColumnSchema::new("c", ConcreteDataType::boolean_datatype(), false);
@@ -150,6 +201,15 @@ mod tests {
);
let batches = RecordBatches::try_new(schema1.clone(), vec![batch1.clone()]).unwrap();
+ let expected = "\
++---+-------+
+| a | b |
++---+-------+
+| 1 | hello |
+| 2 | world |
++---+-------+";
+ assert_eq!(batches.pretty_print(), expected);
+
assert_eq!(schema1, batches.schema());
assert_eq!(vec![batch1], batches.take());
}
diff --git a/src/frontend/src/instance.rs b/src/frontend/src/instance.rs
index ffc420d13935..706af59c2cae 100644
--- a/src/frontend/src/instance.rs
+++ b/src/frontend/src/instance.rs
@@ -285,8 +285,6 @@ mod tests {
admin_expr, admin_result, column, column::SemanticType, object_expr, object_result,
select_expr, Column, ExprHeader, MutateResult, SelectExpr,
};
- use datafusion::arrow_print;
- use datafusion_common::record_batch::RecordBatch as DfRecordBatch;
use datatypes::schema::ColumnDefaultConstraint;
use datatypes::value::Value;
@@ -327,12 +325,7 @@ mod tests {
let output = SqlQueryHandler::do_query(&*instance, sql).await.unwrap();
match output {
Output::RecordBatches(recordbatches) => {
- let recordbatches = recordbatches
- .take()
- .into_iter()
- .map(|r| r.df_recordbatch)
- .collect::<Vec<DfRecordBatch>>();
- let pretty_print = arrow_print::write(&recordbatches);
+ let pretty_print = recordbatches.pretty_print();
let pretty_print = pretty_print.lines().collect::<Vec<&str>>();
let expected = vec![
"+----------------+---------------------+-----+--------+-----------+",
@@ -352,12 +345,7 @@ mod tests {
let output = SqlQueryHandler::do_query(&*instance, sql).await.unwrap();
match output {
Output::RecordBatches(recordbatches) => {
- let recordbatches = recordbatches
- .take()
- .into_iter()
- .map(|r| r.df_recordbatch)
- .collect::<Vec<DfRecordBatch>>();
- let pretty_print = arrow_print::write(&recordbatches);
+ let pretty_print = recordbatches.pretty_print();
let pretty_print = pretty_print.lines().collect::<Vec<&str>>();
let expected = vec![
"+----------------+---------------------+-----+--------+-----------+",
diff --git a/src/servers/Cargo.toml b/src/servers/Cargo.toml
index a83bf0744cb7..d5b3582868f1 100644
--- a/src/servers/Cargo.toml
+++ b/src/servers/Cargo.toml
@@ -26,10 +26,12 @@ hyper = { version = "0.14", features = ["full"] }
influxdb_line_protocol = { git = "https://github.com/evenyag/influxdb_iox", branch = "feat/line-protocol" }
metrics = "0.20"
num_cpus = "1.13"
+once_cell = "1.16"
openmetrics-parser = "0.4"
opensrv-mysql = "0.1"
pgwire = { version = "0.4" }
prost = "0.11"
+regex = "1.6"
schemars = "0.8"
serde = "1.0"
serde_json = "1.0"
diff --git a/src/servers/src/mysql/federated.rs b/src/servers/src/mysql/federated.rs
new file mode 100644
index 000000000000..0ae9fc2ba5c5
--- /dev/null
+++ b/src/servers/src/mysql/federated.rs
@@ -0,0 +1,374 @@
+//! Use regex to filter out some MySQL federated components' emitted statements.
+//! Inspired by Databend's "[mysql_federated.rs](https://github.com/datafuselabs/databend/blob/ac706bf65845e6895141c96c0a10bad6fdc2d367/src/query/service/src/servers/mysql/mysql_federated.rs)".
+
+use std::collections::HashMap;
+use std::sync::Arc;
+
+use common_query::Output;
+use common_recordbatch::RecordBatches;
+use datatypes::prelude::ConcreteDataType;
+use datatypes::schema::{ColumnSchema, Schema};
+use datatypes::vectors::StringVector;
+use once_cell::sync::Lazy;
+use regex::bytes::RegexSet;
+use regex::Regex;
+
+// TODO(LFC): Include GreptimeDB's version and git commit tag etc.
+const MYSQL_VERSION: &str = "8.0.26";
+
+static SELECT_VAR_PATTERN: Lazy<Regex> = Lazy::new(|| Regex::new("(?i)^(SELECT @@(.*))").unwrap());
+static MYSQL_CONN_JAVA_PATTERN: Lazy<Regex> =
+ Lazy::new(|| Regex::new("(?i)^(/\\* mysql-connector-java(.*))").unwrap());
+static SHOW_LOWER_CASE_PATTERN: Lazy<Regex> =
+ Lazy::new(|| Regex::new("(?i)^(SHOW VARIABLES LIKE 'lower_case_table_names'(.*))").unwrap());
+static SHOW_COLLATION_PATTERN: Lazy<Regex> =
+ Lazy::new(|| Regex::new("(?i)^(show collation where(.*))").unwrap());
+static SHOW_VARIABLES_PATTERN: Lazy<Regex> =
+ Lazy::new(|| Regex::new("(?i)^(SHOW VARIABLES(.*))").unwrap());
+static SELECT_VERSION_PATTERN: Lazy<Regex> =
+ Lazy::new(|| Regex::new(r"(?i)^(SELECT VERSION\(\s*\))").unwrap());
+
+// SELECT TIMEDIFF(NOW(), UTC_TIMESTAMP());
+static SELECT_TIME_DIFF_FUNC_PATTERN: Lazy<Regex> =
+ Lazy::new(|| Regex::new("(?i)^(SELECT TIMEDIFF\\(NOW\\(\\), UTC_TIMESTAMP\\(\\)\\))").unwrap());
+
+// sqlalchemy < 1.4.30
+static SHOW_SQL_MODE_PATTERN: Lazy<Regex> =
+ Lazy::new(|| Regex::new("(?i)^(SHOW VARIABLES LIKE 'sql_mode'(.*))").unwrap());
+
+static OTHER_NOT_SUPPORTED_STMT: Lazy<RegexSet> = Lazy::new(|| {
+ RegexSet::new(&[
+ // Txn.
+ "(?i)^(ROLLBACK(.*))",
+ "(?i)^(COMMIT(.*))",
+ "(?i)^(START(.*))",
+
+ // Set.
+ "(?i)^(SET NAMES(.*))",
+ "(?i)^(SET character_set_results(.*))",
+ "(?i)^(SET net_write_timeout(.*))",
+ "(?i)^(SET FOREIGN_KEY_CHECKS(.*))",
+ "(?i)^(SET AUTOCOMMIT(.*))",
+ "(?i)^(SET SQL_LOG_BIN(.*))",
+ "(?i)^(SET sql_mode(.*))",
+ "(?i)^(SET SQL_SELECT_LIMIT(.*))",
+ "(?i)^(SET @@(.*))",
+
+ "(?i)^(SHOW COLLATION)",
+ "(?i)^(SHOW CHARSET)",
+
+ // mysqldump.
+ "(?i)^(SET SESSION(.*))",
+ "(?i)^(SET SQL_QUOTE_SHOW_CREATE(.*))",
+ "(?i)^(LOCK TABLES(.*))",
+ "(?i)^(UNLOCK TABLES(.*))",
+ "(?i)^(SELECT LOGFILE_GROUP_NAME, FILE_NAME, TOTAL_EXTENTS, INITIAL_SIZE, ENGINE, EXTRA FROM INFORMATION_SCHEMA.FILES(.*))",
+
+ // mydumper.
+ "(?i)^(/\\*!80003 SET(.*) \\*/)$",
+ "(?i)^(SHOW MASTER STATUS)",
+ "(?i)^(SHOW ALL SLAVES STATUS)",
+ "(?i)^(LOCK BINLOG FOR BACKUP)",
+ "(?i)^(LOCK TABLES FOR BACKUP)",
+ "(?i)^(UNLOCK BINLOG(.*))",
+ "(?i)^(/\\*!40101 SET(.*) \\*/)$",
+
+ // DBeaver.
+ "(?i)^(SHOW WARNINGS)",
+ "(?i)^(/\\* ApplicationName=(.*)SHOW WARNINGS)",
+ "(?i)^(/\\* ApplicationName=(.*)SHOW PLUGINS)",
+ "(?i)^(/\\* ApplicationName=(.*)SHOW COLLATION)",
+ "(?i)^(/\\* ApplicationName=(.*)SHOW CHARSET)",
+ "(?i)^(/\\* ApplicationName=(.*)SHOW ENGINES)",
+ "(?i)^(/\\* ApplicationName=(.*)SELECT @@(.*))",
+ "(?i)^(/\\* ApplicationName=(.*)SHOW @@(.*))",
+ "(?i)^(/\\* ApplicationName=(.*)SET net_write_timeout(.*))",
+ "(?i)^(/\\* ApplicationName=(.*)SET SQL_SELECT_LIMIT(.*))",
+ "(?i)^(/\\* ApplicationName=(.*)SHOW VARIABLES(.*))",
+
+ // pt-toolkit
+ "(?i)^(/\\*!40101 SET(.*) \\*/)$",
+
+ // mysqldump 5.7.16
+ "(?i)^(/\\*!40100 SET(.*) \\*/)$",
+ "(?i)^(/\\*!40103 SET(.*) \\*/)$",
+ "(?i)^(/\\*!40111 SET(.*) \\*/)$",
+ "(?i)^(/\\*!40101 SET(.*) \\*/)$",
+ "(?i)^(/\\*!40014 SET(.*) \\*/)$",
+ "(?i)^(/\\*!40000 SET(.*) \\*/)$",
+ ]).unwrap()
+});
+
+static VAR_VALUES: Lazy<HashMap<&str, &str>> = Lazy::new(|| {
+ HashMap::from([
+ ("tx_isolation", "REPEATABLE-READ"),
+ ("session.tx_isolation", "REPEATABLE-READ"),
+ ("transaction_isolation", "REPEATABLE-READ"),
+ ("session.transaction_isolation", "REPEATABLE-READ"),
+ ("session.transaction_read_only", "0"),
+ ("time_zone", "UTC"),
+ ("system_time_zone", "UTC"),
+ ("max_allowed_packet", "134217728"),
+ ("interactive_timeout", "31536000"),
+ ("wait_timeout", "31536000"),
+ ("net_write_timeout", "31536000"),
+ ("version_comment", "Greptime"),
+ ])
+});
+
+// Recordbatches for select function.
+// Format:
+// |function_name|
+// |value|
+fn select_function(name: &str, value: &str) -> RecordBatches {
+ let schema = Arc::new(Schema::new(vec![ColumnSchema::new(
+ name,
+ ConcreteDataType::string_datatype(),
+ true,
+ )]));
+ let columns = vec![Arc::new(StringVector::from(vec![value])) as _];
+ RecordBatches::try_from_columns(schema, columns)
+ // unwrap is safe because the schema and data are definitely able to form a recordbatch, they are all string type
+ .unwrap()
+}
+
+// Recordbatches for show variable statement.
+// Format is:
+// | Variable_name | Value |
+// | xx | yy |
+fn show_variables(name: &str, value: &str) -> RecordBatches {
+ let schema = Arc::new(Schema::new(vec![
+ ColumnSchema::new("Variable_name", ConcreteDataType::string_datatype(), true),
+ ColumnSchema::new("Value", ConcreteDataType::string_datatype(), true),
+ ]));
+ let columns = vec![
+ Arc::new(StringVector::from(vec![name])) as _,
+ Arc::new(StringVector::from(vec![value])) as _,
+ ];
+ RecordBatches::try_from_columns(schema, columns)
+ // unwrap is safe because the schema and data are definitely able to form a recordbatch, they are all string type
+ .unwrap()
+}
+
+fn select_variable(query: &str) -> Option<Output> {
+ let mut fields = vec![];
+ let mut values = vec![];
+
+ // query like "SELECT @@aa, @@bb as cc, @dd..."
+ let query = query.to_lowercase();
+ let vars: Vec<&str> = query.split("@@").collect();
+ if vars.len() <= 1 {
+ return None;
+ }
+
+ // skip the first "select"
+ for var in vars.iter().skip(1) {
+ let var = var.trim_matches(|c| c == ' ' || c == ',');
+ let var_as: Vec<&str> = var
+ .split(" as ")
+ .map(|x| {
+ x.trim_matches(|c| c == ' ')
+ .split_whitespace()
+ .next()
+ .unwrap_or("")
+ })
+ .collect();
+ match var_as.len() {
+ 1 => {
+ // @@aa
+ let value = VAR_VALUES.get(var_as[0]).unwrap_or(&"0");
+ values.push(Arc::new(StringVector::from(vec![*value])) as _);
+
+ // field is '@@aa'
+ fields.push(ColumnSchema::new(
+ &format!("@@{}", var_as[0]),
+ ConcreteDataType::string_datatype(),
+ true,
+ ));
+ }
+ 2 => {
+ // @@bb as cc:
+ // var is 'bb'.
+ let value = VAR_VALUES.get(var_as[0]).unwrap_or(&"0");
+ values.push(Arc::new(StringVector::from(vec![*value])) as _);
+
+ // field is 'cc'.
+ fields.push(ColumnSchema::new(
+ var_as[1],
+ ConcreteDataType::string_datatype(),
+ true,
+ ));
+ }
+ _ => return None,
+ }
+ }
+
+ let schema = Arc::new(Schema::new(fields));
+ // unwrap is safe because the schema and data are definitely able to form a recordbatch, they are all string type
+ let batches = RecordBatches::try_from_columns(schema, values).unwrap();
+ Some(Output::RecordBatches(batches))
+}
+
+fn check_select_variable(query: &str) -> Option<Output> {
+ if vec![&SELECT_VAR_PATTERN, &MYSQL_CONN_JAVA_PATTERN]
+ .iter()
+ .any(|r| r.is_match(query))
+ {
+ select_variable(query)
+ } else {
+ None
+ }
+}
+
+fn check_show_variables(query: &str) -> Option<Output> {
+ let recordbatches = if SHOW_SQL_MODE_PATTERN.is_match(query) {
+ Some(show_variables("sql_mode", "ONLY_FULL_GROUP_BY STRICT_TRANS_TABLES NO_ZERO_IN_DATE NO_ZERO_DATE ERROR_FOR_DIVISION_BY_ZERO NO_ENGINE_SUBSTITUTION"))
+ } else if SHOW_LOWER_CASE_PATTERN.is_match(query) {
+ Some(show_variables("lower_case_table_names", "0"))
+ } else if SHOW_COLLATION_PATTERN.is_match(query) || SHOW_VARIABLES_PATTERN.is_match(query) {
+ Some(show_variables("", ""))
+ } else {
+ None
+ };
+ recordbatches.map(Output::RecordBatches)
+}
+
+// Check for SET or others query, this is the final check of the federated query.
+fn check_others(query: &str) -> Option<Output> {
+ if OTHER_NOT_SUPPORTED_STMT.is_match(query.as_bytes()) {
+ return Some(Output::RecordBatches(RecordBatches::empty()));
+ }
+
+ let recordbatches = if SELECT_VERSION_PATTERN.is_match(query) {
+ Some(select_function("version()", MYSQL_VERSION))
+ } else if SELECT_TIME_DIFF_FUNC_PATTERN.is_match(query) {
+ Some(select_function(
+ "TIMEDIFF(NOW(), UTC_TIMESTAMP())",
+ "00:00:00",
+ ))
+ } else {
+ None
+ };
+ recordbatches.map(Output::RecordBatches)
+}
+
+// Check whether the query is a federated or driver setup command,
+// and return some faked results if there are any.
+pub fn check(query: &str) -> Option<Output> {
+ // First to check the query is like "select @@variables".
+ let output = check_select_variable(query);
+ if output.is_some() {
+ return output;
+ }
+
+ // Then to check "show variables like ...".
+ let output = check_show_variables(query);
+ if output.is_some() {
+ return output;
+ }
+
+ // Last check.
+ check_others(query)
+}
+
+#[cfg(test)]
+mod test {
+ use super::*;
+
+ #[test]
+ fn test_check() {
+ let query = "select 1";
+ let result = check(query);
+ assert!(result.is_none());
+
+ let query = "select versiona";
+ let output = check(query);
+ assert!(output.is_none());
+
+ fn test(query: &str, expected: Vec<&str>) {
+ let output = check(query);
+ match output.unwrap() {
+ Output::RecordBatches(r) => {
+ assert_eq!(r.pretty_print().lines().collect::<Vec<_>>(), expected)
+ }
+ _ => unreachable!(),
+ }
+ }
+
+ let query = "select version()";
+ let expected = vec![
+ "+-----------+",
+ "| version() |",
+ "+-----------+",
+ "| 8.0.26 |",
+ "+-----------+",
+ ];
+ test(query, expected);
+
+ let query = "SELECT @@version_comment LIMIT 1";
+ let expected = vec![
+ "+-------------------+",
+ "| @@version_comment |",
+ "+-------------------+",
+ "| Greptime |",
+ "+-------------------+",
+ ];
+ test(query, expected);
+
+ // variables
+ let query = "select @@tx_isolation, @@session.tx_isolation";
+ let expected = vec![
+ "+-----------------+------------------------+",
+ "| @@tx_isolation | @@session.tx_isolation |",
+ "+-----------------+------------------------+",
+ "| REPEATABLE-READ | REPEATABLE-READ |",
+ "+-----------------+------------------------+",
+ ];
+ test(query, expected);
+
+ // complex variables
+ let query = "/* mysql-connector-java-8.0.17 (Revision: 16a712ddb3f826a1933ab42b0039f7fb9eebc6ec) */SELECT @@session.auto_increment_increment AS auto_increment_increment, @@character_set_client AS character_set_client, @@character_set_connection AS character_set_connection, @@character_set_results AS character_set_results, @@character_set_server AS character_set_server, @@collation_server AS collation_server, @@collation_connection AS collation_connection, @@init_connect AS init_connect, @@interactive_timeout AS interactive_timeout, @@license AS license, @@lower_case_table_names AS lower_case_table_names, @@max_allowed_packet AS max_allowed_packet, @@net_write_timeout AS net_write_timeout, @@performance_schema AS performance_schema, @@sql_mode AS sql_mode, @@system_time_zone AS system_time_zone, @@time_zone AS time_zone, @@transaction_isolation AS transaction_isolation, @@wait_timeout AS wait_timeout;";
+ let expected = vec![
+ "+--------------------------+----------------------+--------------------------+-----------------------+----------------------+------------------+----------------------+--------------+---------------------+---------+------------------------+--------------------+-------------------+--------------------+----------+------------------+-----------+-----------------------+---------------+",
+ "| auto_increment_increment | character_set_client | character_set_connection | character_set_results | character_set_server | collation_server | collation_connection | init_connect | interactive_timeout | license | lower_case_table_names | max_allowed_packet | net_write_timeout | performance_schema | sql_mode | system_time_zone | time_zone | transaction_isolation | wait_timeout; |",
+ "+--------------------------+----------------------+--------------------------+-----------------------+----------------------+------------------+----------------------+--------------+---------------------+---------+------------------------+--------------------+-------------------+--------------------+----------+------------------+-----------+-----------------------+---------------+",
+ "| 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 31536000 | 0 | 0 | 134217728 | 31536000 | 0 | 0 | UTC | UTC | REPEATABLE-READ | 31536000 |",
+ "+--------------------------+----------------------+--------------------------+-----------------------+----------------------+------------------+----------------------+--------------+---------------------+---------+------------------------+--------------------+-------------------+--------------------+----------+------------------+-----------+-----------------------+---------------+",
+ ];
+ test(query, expected);
+
+ let query = "show variables";
+ let expected = vec![
+ "+---------------+-------+",
+ "| Variable_name | Value |",
+ "+---------------+-------+",
+ "| | |",
+ "+---------------+-------+",
+ ];
+ test(query, expected);
+
+ let query = "show variables like 'lower_case_table_names'";
+ let expected = vec![
+ "+------------------------+-------+",
+ "| Variable_name | Value |",
+ "+------------------------+-------+",
+ "| lower_case_table_names | 0 |",
+ "+------------------------+-------+",
+ ];
+ test(query, expected);
+
+ let query = "show collation";
+ let expected = vec!["++", "++"]; // empty
+ test(query, expected);
+
+ let query = "SELECT TIMEDIFF(NOW(), UTC_TIMESTAMP())";
+ let expected = vec![
+ "+----------------------------------+",
+ "| TIMEDIFF(NOW(), UTC_TIMESTAMP()) |",
+ "+----------------------------------+",
+ "| 00:00:00 |",
+ "+----------------------------------+",
+ ];
+ test(query, expected);
+ }
+}
diff --git a/src/servers/src/mysql/handler.rs b/src/servers/src/mysql/handler.rs
index 9c784df7b5d5..74083de14edb 100644
--- a/src/servers/src/mysql/handler.rs
+++ b/src/servers/src/mysql/handler.rs
@@ -63,7 +63,14 @@ impl<W: io::Write + Send + Sync> AsyncMysqlShim<W> for MysqlInstanceShim {
query: &'a str,
writer: QueryResultWriter<'a, W>,
) -> Result<()> {
- let output = self.query_handler.do_query(query).await;
+ // TODO(LFC): Find a better way:
+ // `check` uses regex to filter out unsupported statements emitted by MySQL's federated
+ // components, this is quick and dirty, there must be a better way to do it.
+ let output = if let Some(output) = crate::mysql::federated::check(query) {
+ Ok(output)
+ } else {
+ self.query_handler.do_query(query).await
+ };
let mut writer = MysqlResultWriter::new(writer);
writer.write(output).await
diff --git a/src/servers/src/mysql/mod.rs b/src/servers/src/mysql/mod.rs
index 2c0f9eef38be..d1df54151c20 100644
--- a/src/servers/src/mysql/mod.rs
+++ b/src/servers/src/mysql/mod.rs
@@ -1,3 +1,4 @@
+mod federated;
pub mod handler;
pub mod server;
pub mod writer;
|
feat
|
use regex to filter out not supported MySQL stmt (#396)
|
7a1b856dfbbe903e3505319b2486fa0e1798f5f4
|
2024-01-16 14:32:09
|
Weny Xu
|
feat: add tests-fuzz crate (#3173)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index 82862de58ca0..6a48189586d8 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -9481,6 +9481,13 @@ version = "0.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3369f5ac52d5eb6ab48c6b4ffdc8efbcad6b89c765749064ba298f2c68a16a76"
+[[package]]
+name = "tests-fuzz"
+version = "0.6.0"
+dependencies = [
+ "async-trait",
+]
+
[[package]]
name = "tests-integration"
version = "0.6.0"
diff --git a/Cargo.toml b/Cargo.toml
index 545434313131..2b67086ac49b 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -52,6 +52,7 @@ members = [
"src/store-api",
"src/table",
"src/index",
+ "tests-fuzz",
"tests-integration",
"tests/runner",
]
diff --git a/tests-fuzz/Cargo.toml b/tests-fuzz/Cargo.toml
new file mode 100644
index 000000000000..c522adda88fe
--- /dev/null
+++ b/tests-fuzz/Cargo.toml
@@ -0,0 +1,8 @@
+[package]
+name = "tests-fuzz"
+version.workspace = true
+edition.workspace = true
+license.workspace = true
+
+[dependencies]
+async-trait = { workspace = true }
diff --git a/tests-fuzz/src/context.rs b/tests-fuzz/src/context.rs
new file mode 100644
index 000000000000..59f3388c4861
--- /dev/null
+++ b/tests-fuzz/src/context.rs
@@ -0,0 +1,13 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
diff --git a/tests-fuzz/src/executor.rs b/tests-fuzz/src/executor.rs
new file mode 100644
index 000000000000..064bd0ce3486
--- /dev/null
+++ b/tests-fuzz/src/executor.rs
@@ -0,0 +1,22 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::fmt;
+
+#[async_trait::async_trait]
+pub(crate) trait DslExecutor<T, U> {
+ type Error: Sync + Send + fmt::Debug;
+
+ async fn execute(&self, input: &T) -> Result<U, Self::Error>;
+}
diff --git a/tests-fuzz/src/generator.rs b/tests-fuzz/src/generator.rs
new file mode 100644
index 000000000000..93e8227ea753
--- /dev/null
+++ b/tests-fuzz/src/generator.rs
@@ -0,0 +1,22 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::fmt;
+
+#[async_trait::async_trait]
+pub(crate) trait Generator<T> {
+ type Error: Sync + Send + fmt::Debug;
+
+ async fn generate(&self) -> Result<T, Self::Error>;
+}
diff --git a/tests-fuzz/src/lib.rs b/tests-fuzz/src/lib.rs
new file mode 100644
index 000000000000..4615527a0a7b
--- /dev/null
+++ b/tests-fuzz/src/lib.rs
@@ -0,0 +1,19 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+pub(crate) mod context;
+pub(crate) mod executor;
+pub(crate) mod generator;
+pub(crate) mod table_creator;
+pub(crate) mod translator;
diff --git a/tests-fuzz/src/table_creator.rs b/tests-fuzz/src/table_creator.rs
new file mode 100644
index 000000000000..59f3388c4861
--- /dev/null
+++ b/tests-fuzz/src/table_creator.rs
@@ -0,0 +1,13 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
diff --git a/tests-fuzz/src/translator.rs b/tests-fuzz/src/translator.rs
new file mode 100644
index 000000000000..11e18a87dbbd
--- /dev/null
+++ b/tests-fuzz/src/translator.rs
@@ -0,0 +1,21 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::fmt;
+
+pub(crate) trait DslTranslator<T, U> {
+ type Error: Sync + Send + fmt::Debug;
+
+ fn translate(&self, input: &T) -> Result<U, Self::Error>;
+}
|
feat
|
add tests-fuzz crate (#3173)
|
718246ea1a277c3bd7bb588620f2d677621b5998
|
2023-09-12 18:27:15
|
Ruihang Xia
|
feat: implement heartbeat for region server (#2279)
| false
|
diff --git a/.github/workflows/develop.yml b/.github/workflows/develop.yml
index da561c4175a9..504676423a0e 100644
--- a/.github/workflows/develop.yml
+++ b/.github/workflows/develop.yml
@@ -74,33 +74,33 @@ jobs:
- name: Run taplo
run: taplo format --check
- sqlness:
- name: Sqlness Test
- if: github.event.pull_request.draft == false
- runs-on: ${{ matrix.os }}
- strategy:
- matrix:
- os: [ ubuntu-latest-8-cores, windows-latest-8-cores ]
- timeout-minutes: 60
- steps:
- - uses: actions/checkout@v3
- - uses: arduino/setup-protoc@v1
- with:
- repo-token: ${{ secrets.GITHUB_TOKEN }}
- - uses: dtolnay/rust-toolchain@master
- with:
- toolchain: ${{ env.RUST_TOOLCHAIN }}
- - name: Rust Cache
- uses: Swatinem/rust-cache@v2
- - name: Run sqlness
- run: cargo sqlness
- - name: Upload sqlness logs
- if: always()
- uses: actions/upload-artifact@v3
- with:
- name: sqlness-logs
- path: ${{ runner.temp }}/greptime-*.log
- retention-days: 3
+ # sqlness:
+ # name: Sqlness Test
+ # if: github.event.pull_request.draft == false
+ # runs-on: ${{ matrix.os }}
+ # strategy:
+ # matrix:
+ # os: [ ubuntu-latest-8-cores, windows-latest-8-cores ]
+ # timeout-minutes: 60
+ # steps:
+ # - uses: actions/checkout@v3
+ # - uses: arduino/setup-protoc@v1
+ # with:
+ # repo-token: ${{ secrets.GITHUB_TOKEN }}
+ # - uses: dtolnay/rust-toolchain@master
+ # with:
+ # toolchain: ${{ env.RUST_TOOLCHAIN }}
+ # - name: Rust Cache
+ # uses: Swatinem/rust-cache@v2
+ # - name: Run sqlness
+ # run: cargo sqlness
+ # - name: Upload sqlness logs
+ # if: always()
+ # uses: actions/upload-artifact@v3
+ # with:
+ # name: sqlness-logs
+ # path: ${{ runner.temp }}/greptime-*.log
+ # retention-days: 3
fmt:
name: Rustfmt
diff --git a/Cargo.lock b/Cargo.lock
index fc681d786742..8230e5ec814f 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -640,15 +640,6 @@ dependencies = [
"syn 2.0.29",
]
-[[package]]
-name = "atoi"
-version = "1.0.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d7c57d12312ff59c811c0643f4d80830505833c9ffaebd193d819392b265be8e"
-dependencies = [
- "num-traits",
-]
-
[[package]]
name = "atomic"
version = "0.5.3"
@@ -1067,9 +1058,9 @@ dependencies = [
[[package]]
name = "bstr"
-version = "1.6.1"
+version = "1.6.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8042c26c77e5bd6897a7358e0abb3ec412ed126d826988135653fc669263899d"
+checksum = "4c2f7349907b712260e64b0afe2f84692af14a454be26187d9df565c7f69266a"
dependencies = [
"memchr",
"regex-automata 0.3.7",
@@ -1258,7 +1249,7 @@ dependencies = [
"mito",
"moka 0.11.3",
"object-store",
- "parking_lot 0.12.1",
+ "parking_lot",
"regex",
"serde",
"serde_json",
@@ -1317,9 +1308,9 @@ dependencies = [
[[package]]
name = "chrono"
-version = "0.4.27"
+version = "0.4.28"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f56b4c72906975ca04becb8a30e102dfecddd0c06181e3e95ddc444be28881f8"
+checksum = "95ed24df0632f708f5f6d8082675bef2596f7084dee3dd55f632290bf35bfe0f"
dependencies = [
"android-tzdata",
"iana-time-zone",
@@ -1533,7 +1524,7 @@ dependencies = [
"enum_dispatch",
"futures-util",
"moka 0.9.9",
- "parking_lot 0.12.1",
+ "parking_lot",
"prost",
"rand",
"snafu",
@@ -1945,7 +1936,7 @@ dependencies = [
"once_cell",
"opentelemetry 0.17.0",
"opentelemetry-jaeger",
- "parking_lot 0.12.1",
+ "parking_lot",
"rand",
"rs-snowflake",
"serde",
@@ -2066,12 +2057,6 @@ dependencies = [
"tracing-subscriber",
]
-[[package]]
-name = "const-oid"
-version = "0.7.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e4c78c047431fee22c1a7bb92e00ad095a02a983affe4d8a72e2a2c62c1b94f3"
-
[[package]]
name = "const-oid"
version = "0.9.5"
@@ -2140,21 +2125,6 @@ dependencies = [
"libc",
]
-[[package]]
-name = "crc"
-version = "3.0.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "86ec7a15cbe22e59248fc7eadb1907dab5ba09372595da4d73dd805ed4417dfe"
-dependencies = [
- "crc-catalog",
-]
-
-[[package]]
-name = "crc-catalog"
-version = "2.2.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9cace84e55f07e7301bae1c519df89cdad8cc3cd868413d3fdbdeca9ff3db484"
-
[[package]]
name = "crc32fast"
version = "1.3.2"
@@ -2311,16 +2281,6 @@ version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7"
-[[package]]
-name = "crypto-bigint"
-version = "0.3.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "03c6a1d5fa1de37e071642dfa44ec552ca5b299adb128fab16138e24b548fd21"
-dependencies = [
- "generic-array",
- "subtle",
-]
-
[[package]]
name = "crypto-common"
version = "0.1.6"
@@ -2432,7 +2392,7 @@ dependencies = [
"hashbrown 0.14.0",
"lock_api",
"once_cell",
- "parking_lot_core 0.9.8",
+ "parking_lot_core",
]
[[package]]
@@ -2467,7 +2427,7 @@ dependencies = [
"log",
"num_cpus",
"object_store",
- "parking_lot 0.12.1",
+ "parking_lot",
"parquet",
"percent-encoding",
"pin-project-lite",
@@ -2508,7 +2468,7 @@ dependencies = [
"hashbrown 0.14.0",
"log",
"object_store",
- "parking_lot 0.12.1",
+ "parking_lot",
"rand",
"tempfile",
"url",
@@ -2724,25 +2684,14 @@ dependencies = [
"uuid",
]
-[[package]]
-name = "der"
-version = "0.5.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6919815d73839e7ad218de758883aae3a257ba6759ce7a9992501efbb53d705c"
-dependencies = [
- "const-oid 0.7.1",
- "crypto-bigint",
- "pem-rfc7468 0.3.1",
-]
-
[[package]]
name = "der"
version = "0.7.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fffa369a668c8af7dbf8b5e56c9f744fbd399949ed171606040001947de40b1c"
dependencies = [
- "const-oid 0.9.5",
- "pem-rfc7468 0.7.0",
+ "const-oid",
+ "pem-rfc7468",
"zeroize",
]
@@ -2838,7 +2787,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292"
dependencies = [
"block-buffer",
- "const-oid 0.9.5",
+ "const-oid",
"crypto-common",
"subtle",
]
@@ -2917,18 +2866,6 @@ version = "0.3.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fea41bba32d969b513997752735605054bc0dfa92b4c56bf1189f2e174be7a10"
-[[package]]
-name = "dotenv"
-version = "0.15.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "77c90badedccf4105eca100756a0b1289e191f6fcbdadd3cee1d2f614f97da8f"
-
-[[package]]
-name = "dotenvy"
-version = "0.15.7"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1aaf95b3e5c8f23aa320147307562d361db0ae0d51242340f558153b4eb2439b"
-
[[package]]
name = "dunce"
version = "1.0.4"
@@ -3453,17 +3390,6 @@ dependencies = [
"futures-util",
]
-[[package]]
-name = "futures-intrusive"
-version = "0.4.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a604f7a68fbf8103337523b1fadc8ade7361ee3f112f7c680ad179651616aed5"
-dependencies = [
- "futures-core",
- "lock_api",
- "parking_lot 0.11.2",
-]
-
[[package]]
name = "futures-io"
version = "0.3.28"
@@ -3508,12 +3434,6 @@ version = "0.3.28"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "76d3d132be6c0e6aa1534069c705a74a5997a356c0dc2f86a47765e5617c5b65"
-[[package]]
-name = "futures-timer"
-version = "3.0.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e64b03909df88034c26dc1547e8970b91f98bdb65165d6a4e9110d94263dbb2c"
-
[[package]]
name = "futures-util"
version = "0.3.28"
@@ -3653,7 +3573,7 @@ version = "0.19.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dc22b0cdc52237667c301dd7cdc6ead8f8f73c9f824e9942c8ebd6b764f6c0bf"
dependencies = [
- "bstr 1.6.1",
+ "bstr 1.6.2",
"btoi",
"gix-date",
"itoa",
@@ -3667,7 +3587,7 @@ version = "0.10.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2231a25934a240d0a4b6f4478401c73ee81d8be52de0293eedbc172334abf3e1"
dependencies = [
- "bstr 1.6.1",
+ "bstr 1.6.2",
"gix-features 0.28.1",
"gix-glob",
"gix-path",
@@ -3700,7 +3620,7 @@ version = "0.2.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0f28f654184b5f725c5737c7e4f466cbd8f0102ac352d5257eeab19647ee4256"
dependencies = [
- "bstr 1.6.1",
+ "bstr 1.6.2",
]
[[package]]
@@ -3709,7 +3629,7 @@ version = "0.20.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7fbad5ce54a8fc997acc50febd89ec80fa6e97cb7f8d0654cb229936407489d8"
dependencies = [
- "bstr 1.6.1",
+ "bstr 1.6.2",
"gix-config-value",
"gix-features 0.28.1",
"gix-glob",
@@ -3732,7 +3652,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d09154c0c8677e4da0ec35e896f56ee3e338e741b9599fae06075edd83a4081c"
dependencies = [
"bitflags 1.3.2",
- "bstr 1.6.1",
+ "bstr 1.6.2",
"gix-path",
"libc",
"thiserror",
@@ -3744,7 +3664,7 @@ version = "0.12.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "750b684197374518ea057e0a0594713e07683faa0a3f43c0f93d97f64130ad8d"
dependencies = [
- "bstr 1.6.1",
+ "bstr 1.6.2",
"gix-command",
"gix-config-value",
"gix-path",
@@ -3760,7 +3680,7 @@ version = "0.4.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b96271912ce39822501616f177dea7218784e6c63be90d5f36322ff3a722aae2"
dependencies = [
- "bstr 1.6.1",
+ "bstr 1.6.2",
"itoa",
"thiserror",
"time 0.3.28",
@@ -3784,7 +3704,7 @@ version = "0.16.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6eba8ba458cb8f4a6c33409b0fe650b1258655175a7ffd1d24fafd3ed31d880b"
dependencies = [
- "bstr 1.6.1",
+ "bstr 1.6.2",
"dunce",
"gix-hash 0.10.4",
"gix-path",
@@ -3836,7 +3756,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "93e43efd776bc543f46f0fd0ca3d920c37af71a764a16f2aebd89765e9ff2993"
dependencies = [
"bitflags 1.3.2",
- "bstr 1.6.1",
+ "bstr 1.6.2",
]
[[package]]
@@ -3867,7 +3787,7 @@ checksum = "e4e55e40dfd694884f0eb78796c5bddcf2f8b295dace47039099dd7e76534973"
dependencies = [
"gix-hash 0.10.4",
"hashbrown 0.13.2",
- "parking_lot 0.12.1",
+ "parking_lot",
]
[[package]]
@@ -3877,7 +3797,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "717ab601ece7921f59fe86849dbe27d44a46ebb883b5885732c4f30df4996177"
dependencies = [
"bitflags 1.3.2",
- "bstr 1.6.1",
+ "bstr 1.6.2",
"btoi",
"filetime",
"gix-bitmap",
@@ -3909,7 +3829,7 @@ version = "0.11.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2b66aea5e52875cd4915f4957a6f4b75831a36981e2ec3f5fad9e370e444fe1a"
dependencies = [
- "bstr 1.6.1",
+ "bstr 1.6.2",
"gix-actor",
"thiserror",
]
@@ -3920,7 +3840,7 @@ version = "0.28.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8df068db9180ee935fbb70504848369e270bdcb576b05c0faa8b9fd3b86fc017"
dependencies = [
- "bstr 1.6.1",
+ "bstr 1.6.2",
"btoi",
"gix-actor",
"gix-features 0.28.1",
@@ -3946,7 +3866,7 @@ dependencies = [
"gix-pack",
"gix-path",
"gix-quote",
- "parking_lot 0.12.1",
+ "parking_lot",
"tempfile",
"thiserror",
]
@@ -3968,7 +3888,7 @@ dependencies = [
"gix-tempfile",
"gix-traverse",
"memmap2",
- "parking_lot 0.12.1",
+ "parking_lot",
"smallvec",
"thiserror",
]
@@ -3979,7 +3899,7 @@ version = "0.7.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "32370dce200bb951df013e03dff35b4233fc7a89458642b047629b91734a7e19"
dependencies = [
- "bstr 1.6.1",
+ "bstr 1.6.2",
"thiserror",
]
@@ -3992,7 +3912,7 @@ dependencies = [
"gix-command",
"gix-config-value",
"nix 0.26.4",
- "parking_lot 0.12.1",
+ "parking_lot",
"thiserror",
]
@@ -4002,7 +3922,7 @@ version = "0.4.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "475c86a97dd0127ba4465fbb239abac9ea10e68301470c9791a6dd5351cdc905"
dependencies = [
- "bstr 1.6.1",
+ "bstr 1.6.2",
"btoi",
"thiserror",
]
@@ -4032,7 +3952,7 @@ version = "0.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "aba332462bda2e8efeae4302b39a6ed01ad56ef772fd5b7ef197cf2798294d65"
dependencies = [
- "bstr 1.6.1",
+ "bstr 1.6.2",
"gix-hash 0.10.4",
"gix-revision",
"gix-validate",
@@ -4046,7 +3966,7 @@ version = "0.12.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3c6f6ff53f888858afc24bf12628446a14279ceec148df6194481f306f553ad2"
dependencies = [
- "bstr 1.6.1",
+ "bstr 1.6.2",
"gix-date",
"gix-hash 0.10.4",
"gix-hashtable",
@@ -4076,7 +3996,7 @@ dependencies = [
"gix-fs",
"libc",
"once_cell",
- "parking_lot 0.12.1",
+ "parking_lot",
"signal-hook",
"signal-hook-registry",
"tempfile",
@@ -4100,7 +4020,7 @@ version = "0.16.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b6a22b4b32ad14d68f7b7fb6458fa58d44b01797d94c1b8f4db2d9c7b3c366b5"
dependencies = [
- "bstr 1.6.1",
+ "bstr 1.6.2",
"gix-features 0.28.1",
"gix-path",
"home",
@@ -4123,7 +4043,7 @@ version = "0.7.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ba9b3737b2cef3dcd014633485f0034b0f1a931ee54aeb7d8f87f177f3c89040"
dependencies = [
- "bstr 1.6.1",
+ "bstr 1.6.2",
"thiserror",
]
@@ -4133,7 +4053,7 @@ version = "0.15.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "54ec9a000b4f24af706c3cc680c7cda235656cbe3216336522f5692773b8a301"
dependencies = [
- "bstr 1.6.1",
+ "bstr 1.6.2",
"gix-attributes",
"gix-features 0.28.1",
"gix-glob",
@@ -4229,15 +4149,6 @@ dependencies = [
"allocator-api2",
]
-[[package]]
-name = "hashlink"
-version = "0.8.4"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e8094feaf31ff591f651a2664fb9cfd92bba7a60ce3197265e9482ebe753c8f7"
-dependencies = [
- "hashbrown 0.14.0",
-]
-
[[package]]
name = "hdrhistogram"
version = "7.5.2"
@@ -4281,9 +4192,6 @@ name = "heck"
version = "0.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8"
-dependencies = [
- "unicode-segmentation",
-]
[[package]]
name = "hermit-abi"
@@ -4312,15 +4220,6 @@ version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dfa686283ad6dd069f105e5ab091b04c62850d3e4cf5d67debad1933f55023df"
-[[package]]
-name = "hkdf"
-version = "0.12.3"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "791a029f6b9fc27657f6f188ec6e5e43f6911f6f878e0dc5501396e09809d437"
-dependencies = [
- "hmac",
-]
-
[[package]]
name = "hmac"
version = "0.12.1"
@@ -4439,9 +4338,9 @@ dependencies = [
"futures-util",
"http",
"hyper",
- "rustls 0.21.7",
+ "rustls",
"tokio",
- "tokio-rustls 0.24.1",
+ "tokio-rustls",
]
[[package]]
@@ -5217,9 +5116,9 @@ checksum = "490cc448043f947bae3cbee9c203358d62dbee0db12107a74be5c30ccfd09771"
[[package]]
name = "memchr"
-version = "2.6.1"
+version = "2.6.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f478948fd84d9f8e86967bf432640e46adfb5a4bd4f14ef7e864ab38220534ae"
+checksum = "5486aed0026218e61b8a01d5fbd5a0a134649abb71a0e53b7bc088529dced86e"
[[package]]
name = "memcomparable"
@@ -5329,7 +5228,7 @@ dependencies = [
"lazy_static",
"metrics",
"once_cell",
- "parking_lot 0.12.1",
+ "parking_lot",
"prost",
"rand",
"regex",
@@ -5360,7 +5259,7 @@ source = "git+https://github.com/GreptimeTeam/greptime-meter.git?rev=abbd357c1e1
dependencies = [
"anymap",
"once_cell",
- "parking_lot 0.12.1",
+ "parking_lot",
]
[[package]]
@@ -5391,7 +5290,7 @@ dependencies = [
"indexmap 1.9.3",
"metrics",
"metrics-util",
- "parking_lot 0.12.1",
+ "parking_lot",
"portable-atomic 0.3.20",
"quanta 0.10.1",
"thiserror",
@@ -5437,7 +5336,7 @@ dependencies = [
"metrics",
"num_cpus",
"ordered-float 2.10.0",
- "parking_lot 0.12.1",
+ "parking_lot",
"portable-atomic 0.3.20",
"quanta 0.10.1",
"radix_trie",
@@ -5590,7 +5489,7 @@ dependencies = [
"futures-util",
"num_cpus",
"once_cell",
- "parking_lot 0.12.1",
+ "parking_lot",
"quanta 0.11.1",
"rustc_version",
"scheduled-thread-pool",
@@ -5615,7 +5514,7 @@ dependencies = [
"crossbeam-utils",
"futures-util",
"once_cell",
- "parking_lot 0.12.1",
+ "parking_lot",
"quanta 0.11.1",
"rustc_version",
"scheduled-thread-pool",
@@ -5680,14 +5579,14 @@ dependencies = [
"percent-encoding",
"pin-project",
"priority-queue",
- "rustls 0.21.7",
+ "rustls",
"rustls-pemfile",
"serde",
"serde_json",
"socket2 0.5.3",
"thiserror",
"tokio",
- "tokio-rustls 0.24.1",
+ "tokio-rustls",
"tokio-util",
"twox-hash",
"url",
@@ -6048,7 +5947,7 @@ dependencies = [
"futures",
"humantime",
"itertools 0.10.5",
- "parking_lot 0.12.1",
+ "parking_lot",
"percent-encoding",
"snafu",
"tokio",
@@ -6090,7 +5989,7 @@ dependencies = [
"md-5",
"metrics",
"once_cell",
- "parking_lot 0.12.1",
+ "parking_lot",
"percent-encoding",
"pin-project",
"quick-xml 0.27.1",
@@ -6128,7 +6027,7 @@ dependencies = [
"nom",
"pin-project-lite",
"tokio",
- "tokio-rustls 0.24.1",
+ "tokio-rustls",
]
[[package]]
@@ -6365,17 +6264,6 @@ version = "2.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "14f2252c834a40ed9bb5422029649578e63aa341ac401f74e719dd1afda8394e"
-[[package]]
-name = "parking_lot"
-version = "0.11.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7d17b78036a60663b797adeaee46f5c9dfebb86948d1255007a1d6be0271ff99"
-dependencies = [
- "instant",
- "lock_api",
- "parking_lot_core 0.8.6",
-]
-
[[package]]
name = "parking_lot"
version = "0.12.1"
@@ -6383,21 +6271,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f"
dependencies = [
"lock_api",
- "parking_lot_core 0.9.8",
-]
-
-[[package]]
-name = "parking_lot_core"
-version = "0.8.6"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "60a2cfe6f0ad2bfc16aefa463b497d5c7a5ecd44a23efa72aa342d90177356dc"
-dependencies = [
- "cfg-if 1.0.0",
- "instant",
- "libc",
- "redox_syscall 0.2.16",
- "smallvec",
- "winapi",
+ "parking_lot_core",
]
[[package]]
@@ -6520,15 +6394,6 @@ dependencies = [
"serde",
]
-[[package]]
-name = "pem-rfc7468"
-version = "0.3.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "01de5d978f34aa4b2296576379fcc416034702fd94117c56ffd8a1a767cefb30"
-dependencies = [
- "base64ct",
-]
-
[[package]]
name = "pem-rfc7468"
version = "0.7.0"
@@ -6546,19 +6411,20 @@ checksum = "9b2a4787296e9989611394c33f193f676704af1686e70b8f8033ab5ba9a35a94"
[[package]]
name = "pest"
-version = "2.7.2"
+version = "2.7.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1acb4a4365a13f749a93f1a094a7805e5cfa0955373a9de860d962eaa3a5fe5a"
+checksum = "d7a4d085fd991ac8d5b05a147b437791b4260b76326baf0fc60cf7c9c27ecd33"
dependencies = [
+ "memchr",
"thiserror",
"ucd-trie",
]
[[package]]
name = "pest_derive"
-version = "2.7.2"
+version = "2.7.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "666d00490d4ac815001da55838c500eafb0320019bbaa44444137c48b443a853"
+checksum = "a2bee7be22ce7918f641a33f08e3f43388c7656772244e2bbb2477f44cc9021a"
dependencies = [
"pest",
"pest_generator",
@@ -6566,9 +6432,9 @@ dependencies = [
[[package]]
name = "pest_generator"
-version = "2.7.2"
+version = "2.7.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "68ca01446f50dbda87c1786af8770d535423fa8a53aec03b8f4e3d7eb10e0929"
+checksum = "d1511785c5e98d79a05e8a6bc34b4ac2168a0e3e92161862030ad84daa223141"
dependencies = [
"pest",
"pest_meta",
@@ -6579,9 +6445,9 @@ dependencies = [
[[package]]
name = "pest_meta"
-version = "2.7.2"
+version = "2.7.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "56af0a30af74d0445c0bf6d9d051c979b516a1a5af790d251daee76005420a48"
+checksum = "b42f0394d3123e33353ca5e1e89092e533d2cc490389f2bd6131c43c634ebc5f"
dependencies = [
"once_cell",
"pest",
@@ -6621,7 +6487,7 @@ dependencies = [
"thiserror",
"time 0.3.28",
"tokio",
- "tokio-rustls 0.24.1",
+ "tokio-rustls",
"tokio-util",
"x509-certificate",
]
@@ -6706,37 +6572,15 @@ version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184"
-[[package]]
-name = "pkcs1"
-version = "0.3.3"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a78f66c04ccc83dd4486fd46c33896f4e17b24a7a3a6400dedc48ed0ddd72320"
-dependencies = [
- "der 0.5.1",
- "pkcs8 0.8.0",
- "zeroize",
-]
-
[[package]]
name = "pkcs1"
version = "0.7.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c8ffb9f10fa047879315e6625af03c164b16962a5368d724ed16323b68ace47f"
dependencies = [
- "der 0.7.8",
- "pkcs8 0.10.2",
- "spki 0.7.2",
-]
-
-[[package]]
-name = "pkcs8"
-version = "0.8.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7cabda3fb821068a9a4fab19a683eac3af12edf0f34b94a8be53c4972b8149d0"
-dependencies = [
- "der 0.5.1",
- "spki 0.5.4",
- "zeroize",
+ "der",
+ "pkcs8",
+ "spki",
]
[[package]]
@@ -6745,8 +6589,8 @@ version = "0.10.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f950b2377845cebe5cf8b5165cb3cc1a5e0fa5cfa3e1f7f55707d8fd82e0a7b7"
dependencies = [
- "der 0.7.8",
- "spki 0.7.2",
+ "der",
+ "spki",
]
[[package]]
@@ -6869,7 +6713,7 @@ dependencies = [
"log",
"nix 0.26.4",
"once_cell",
- "parking_lot 0.12.1",
+ "parking_lot",
"prost",
"prost-build",
"prost-derive",
@@ -7020,7 +6864,7 @@ dependencies = [
"fnv",
"lazy_static",
"memchr",
- "parking_lot 0.12.1",
+ "parking_lot",
"protobuf",
"thiserror",
]
@@ -7216,7 +7060,7 @@ dependencies = [
"indoc",
"libc",
"memoffset 0.9.0",
- "parking_lot 0.12.1",
+ "parking_lot",
"pyo3-build-config",
"pyo3-ffi",
"pyo3-macros",
@@ -7436,7 +7280,7 @@ dependencies = [
"nix 0.26.4",
"num-derive",
"num-traits",
- "parking_lot 0.12.1",
+ "parking_lot",
"prometheus",
"prometheus-static-metric",
"protobuf",
@@ -7652,7 +7496,7 @@ dependencies = [
"quick-xml 0.28.2",
"rand",
"reqwest",
- "rsa 0.9.2",
+ "rsa",
"rust-ini 0.19.0",
"serde",
"serde_json",
@@ -7685,14 +7529,14 @@ dependencies = [
"once_cell",
"percent-encoding",
"pin-project-lite",
- "rustls 0.21.7",
+ "rustls",
"rustls-native-certs",
"rustls-pemfile",
"serde",
"serde_json",
"serde_urlencoded",
"tokio",
- "tokio-rustls 0.24.1",
+ "tokio-rustls",
"tokio-util",
"tower-service",
"url",
@@ -7843,26 +7687,6 @@ version = "0.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e60ef3b82994702bbe4e134d98aadca4b49ed04440148985678d415c68127666"
-[[package]]
-name = "rsa"
-version = "0.6.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4cf22754c49613d2b3b119f0e5d46e34a2c628a937e3024b8762de4e7d8c710b"
-dependencies = [
- "byteorder",
- "digest",
- "num-bigint-dig",
- "num-integer",
- "num-iter",
- "num-traits",
- "pkcs1 0.3.3",
- "pkcs8 0.8.0",
- "rand_core",
- "smallvec",
- "subtle",
- "zeroize",
-]
-
[[package]]
name = "rsa"
version = "0.9.2"
@@ -7870,59 +7694,21 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6ab43bb47d23c1a631b4b680199a45255dce26fa9ab2fa902581f624ff13e6a8"
dependencies = [
"byteorder",
- "const-oid 0.9.5",
+ "const-oid",
"digest",
"num-bigint-dig",
"num-integer",
"num-iter",
"num-traits",
- "pkcs1 0.7.5",
- "pkcs8 0.10.2",
+ "pkcs1",
+ "pkcs8",
"rand_core",
"signature",
- "spki 0.7.2",
+ "spki",
"subtle",
"zeroize",
]
-[[package]]
-name = "rstest"
-version = "0.17.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "de1bb486a691878cd320c2f0d319ba91eeaa2e894066d8b5f8f117c000e9d962"
-dependencies = [
- "futures",
- "futures-timer",
- "rstest_macros",
- "rustc_version",
-]
-
-[[package]]
-name = "rstest_macros"
-version = "0.17.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "290ca1a1c8ca7edb7c3283bd44dc35dd54fdec6253a3912e201ba1072018fca8"
-dependencies = [
- "cfg-if 1.0.0",
- "proc-macro2",
- "quote",
- "rustc_version",
- "syn 1.0.109",
- "unicode-ident",
-]
-
-[[package]]
-name = "rstest_reuse"
-version = "0.5.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "45f80dcc84beab3a327bbe161f77db25f336a1452428176787c8c79ac79d7073"
-dependencies = [
- "quote",
- "rand",
- "rustc_version",
- "syn 1.0.109",
-]
-
[[package]]
name = "rust-embed"
version = "6.8.1"
@@ -8055,18 +7841,6 @@ dependencies = [
"windows-sys 0.48.0",
]
-[[package]]
-name = "rustls"
-version = "0.20.9"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1b80e3dec595989ea8510028f30c408a4630db12c9cbb8de34203b89d6577e99"
-dependencies = [
- "log",
- "ring",
- "sct",
- "webpki",
-]
-
[[package]]
name = "rustls"
version = "0.21.7"
@@ -8225,8 +7999,8 @@ dependencies = [
[[package]]
name = "rustpython-doc"
-version = "0.1.0"
-source = "git+https://github.com/RustPython/__doc__?branch=main#d927debd491e4c45b88e953e6e50e4718e0f2965"
+version = "0.3.0"
+source = "git+https://github.com/RustPython/__doc__?branch=main#8b62ce5d796d68a091969c9fa5406276cb483f79"
dependencies = [
"once_cell",
]
@@ -8302,7 +8076,7 @@ dependencies = [
"num_enum",
"once_cell",
"page_size",
- "parking_lot 0.12.1",
+ "parking_lot",
"paste",
"puruspe",
"rand",
@@ -8368,7 +8142,7 @@ dependencies = [
"num_enum",
"once_cell",
"optional",
- "parking_lot 0.12.1",
+ "parking_lot",
"paste",
"rand",
"result-like",
@@ -8553,7 +8327,7 @@ version = "0.2.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3cbc66816425a074528352f5789333ecff06ca41b36b0b0efdfbb29edc391a19"
dependencies = [
- "parking_lot 0.12.1",
+ "parking_lot",
]
[[package]]
@@ -8888,7 +8662,7 @@ dependencies = [
"openmetrics-parser",
"opensrv-mysql",
"opentelemetry-proto",
- "parking_lot 0.12.1",
+ "parking_lot",
"pgwire",
"pin-project",
"postgres-types",
@@ -8899,7 +8673,7 @@ dependencies = [
"rand",
"regex",
"rust-embed",
- "rustls 0.21.7",
+ "rustls",
"rustls-pemfile",
"schemars",
"script",
@@ -8917,7 +8691,7 @@ dependencies = [
"tokio",
"tokio-postgres",
"tokio-postgres-rustls",
- "tokio-rustls 0.24.1",
+ "tokio-rustls",
"tokio-stream",
"tokio-test",
"tonic 0.9.2",
@@ -9192,16 +8966,6 @@ dependencies = [
"lock_api",
]
-[[package]]
-name = "spki"
-version = "0.5.4"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "44d01ac02a6ccf3e07db148d2be087da624fea0221a16152ed01f0496a6b0a27"
-dependencies = [
- "base64ct",
- "der 0.5.1",
-]
-
[[package]]
name = "spki"
version = "0.7.2"
@@ -9209,7 +8973,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9d1e996ef02c474957d681f1b05213dfb0abab947b446a62d37770b23500184a"
dependencies = [
"base64ct",
- "der 0.7.8",
+ "der",
]
[[package]]
@@ -9233,17 +8997,6 @@ dependencies = [
"sqlparser 0.34.0",
]
-[[package]]
-name = "sqlformat"
-version = "0.2.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0c12bc9199d1db8234678b7051747c07f517cdcf019262d1847b94ec8b1aee3e"
-dependencies = [
- "itertools 0.10.5",
- "nom",
- "unicode_categories",
-]
-
[[package]]
name = "sqlness"
version = "0.5.0"
@@ -9320,104 +9073,6 @@ dependencies = [
"syn 1.0.109",
]
-[[package]]
-name = "sqlx"
-version = "0.6.3"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f8de3b03a925878ed54a954f621e64bf55a3c1bd29652d0d1a17830405350188"
-dependencies = [
- "sqlx-core",
- "sqlx-macros",
-]
-
-[[package]]
-name = "sqlx-core"
-version = "0.6.3"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "fa8241483a83a3f33aa5fff7e7d9def398ff9990b2752b6c6112b83c6d246029"
-dependencies = [
- "ahash 0.7.6",
- "atoi",
- "base64 0.13.1",
- "bitflags 1.3.2",
- "byteorder",
- "bytes",
- "chrono",
- "crc",
- "crossbeam-queue",
- "digest",
- "dirs",
- "dotenvy",
- "either",
- "event-listener",
- "futures-channel",
- "futures-core",
- "futures-intrusive",
- "futures-util",
- "generic-array",
- "hashlink",
- "hex",
- "hkdf",
- "hmac",
- "indexmap 1.9.3",
- "itoa",
- "libc",
- "log",
- "md-5",
- "memchr",
- "num-bigint",
- "once_cell",
- "paste",
- "percent-encoding",
- "rand",
- "rsa 0.6.1",
- "rustls 0.20.9",
- "rustls-pemfile",
- "serde",
- "serde_json",
- "sha1",
- "sha2",
- "smallvec",
- "sqlformat",
- "sqlx-rt",
- "stringprep",
- "thiserror",
- "tokio-stream",
- "url",
- "webpki-roots 0.22.6",
- "whoami",
-]
-
-[[package]]
-name = "sqlx-macros"
-version = "0.6.3"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9966e64ae989e7e575b19d7265cb79d7fc3cbbdf179835cb0d716f294c2049c9"
-dependencies = [
- "dotenvy",
- "either",
- "heck",
- "once_cell",
- "proc-macro2",
- "quote",
- "sha2",
- "sqlx-core",
- "sqlx-rt",
- "syn 1.0.109",
- "url",
-]
-
-[[package]]
-name = "sqlx-rt"
-version = "0.6.3"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "804d3f245f894e61b1e6263c84b23ca675d96753b5abfd5cc8597d86806e8024"
-dependencies = [
- "once_cell",
- "tokio",
- "tokio-rustls 0.23.4",
-]
-
[[package]]
name = "sre-engine"
version = "0.4.1"
@@ -9574,7 +9229,7 @@ checksum = "f91138e76242f575eb1d3b38b4f1362f10d3a43f47d182a5b359af488a02293b"
dependencies = [
"new_debug_unreachable",
"once_cell",
- "parking_lot 0.12.1",
+ "parking_lot",
"phf_shared 0.10.0",
"precomputed-hash",
]
@@ -9892,7 +9547,7 @@ version = "0.3.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9547444bfe52cbd79515c6c8087d8ae6ca8d64d2d31a27746320f5cb81d1a15c"
dependencies = [
- "parking_lot 0.12.1",
+ "parking_lot",
]
[[package]]
@@ -9937,69 +9592,6 @@ dependencies = [
"libc",
]
-[[package]]
-name = "tests-integration"
-version = "0.4.0-nightly"
-dependencies = [
- "api",
- "async-trait",
- "auth",
- "axum",
- "axum-test-helper",
- "catalog",
- "chrono",
- "client",
- "common-base",
- "common-catalog",
- "common-error",
- "common-grpc",
- "common-meta",
- "common-procedure",
- "common-query",
- "common-recordbatch",
- "common-runtime",
- "common-telemetry",
- "common-test-util",
- "datafusion",
- "datafusion-expr",
- "datanode",
- "datatypes",
- "dotenv",
- "frontend",
- "futures",
- "itertools 0.10.5",
- "meta-client",
- "meta-srv",
- "mito",
- "object-store",
- "once_cell",
- "opentelemetry-proto",
- "partition",
- "paste",
- "prost",
- "query",
- "rand",
- "rstest",
- "rstest_reuse",
- "script",
- "secrecy",
- "serde",
- "serde_json",
- "servers",
- "session",
- "snafu",
- "sql",
- "sqlx",
- "store-api",
- "table",
- "tempfile",
- "tokio",
- "tokio-postgres",
- "tonic 0.9.2",
- "tower",
- "uuid",
-]
-
[[package]]
name = "textwrap"
version = "0.11.0"
@@ -10217,7 +9809,7 @@ dependencies = [
"libc",
"mio",
"num_cpus",
- "parking_lot 0.12.1",
+ "parking_lot",
"pin-project-lite",
"signal-hook-registry",
"socket2 0.5.3",
@@ -10260,7 +9852,7 @@ dependencies = [
"futures-channel",
"futures-util",
"log",
- "parking_lot 0.12.1",
+ "parking_lot",
"percent-encoding",
"phf",
"pin-project-lite",
@@ -10281,21 +9873,10 @@ checksum = "dd5831152cb0d3f79ef5523b357319ba154795d64c7078b2daa95a803b54057f"
dependencies = [
"futures",
"ring",
- "rustls 0.21.7",
+ "rustls",
"tokio",
"tokio-postgres",
- "tokio-rustls 0.24.1",
-]
-
-[[package]]
-name = "tokio-rustls"
-version = "0.23.4"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c43ee83903113e03984cb9e5cebe6c04a5116269e900e3ddba8f068a62adda59"
-dependencies = [
- "rustls 0.20.9",
- "tokio",
- "webpki",
+ "tokio-rustls",
]
[[package]]
@@ -10304,7 +9885,7 @@ version = "0.24.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c28327cf380ac148141087fbfb9de9d7bd4e84ab5d2c28fbc911d753de8a7081"
dependencies = [
- "rustls 0.21.7",
+ "rustls",
"tokio",
]
@@ -10445,7 +10026,7 @@ dependencies = [
"prost",
"rustls-pemfile",
"tokio",
- "tokio-rustls 0.24.1",
+ "tokio-rustls",
"tokio-stream",
"tower",
"tower-layer",
@@ -11016,12 +10597,6 @@ version = "0.2.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f962df74c8c05a667b5ee8bcf162993134c104e96440b663c8daa176dc772d8c"
-[[package]]
-name = "unicode_categories"
-version = "0.1.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "39ec24b3121d976906ece63c9daad25b85969647682eee313cb5779fdd69e14e"
-
[[package]]
name = "unicode_names2"
version = "0.6.0"
@@ -11279,23 +10854,14 @@ dependencies = [
[[package]]
name = "webpki"
-version = "0.22.0"
+version = "0.22.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f095d78192e208183081cc07bc5515ef55216397af48b873e5edcd72637fa1bd"
+checksum = "f0e74f82d49d545ad128049b7e88f6576df2da6b02e9ce565c6f533be576957e"
dependencies = [
"ring",
"untrusted",
]
-[[package]]
-name = "webpki-roots"
-version = "0.22.6"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b6c71e40d7d2c34a5106301fb632274ca37242cd0c9d3e64dbece371a40a2d87"
-dependencies = [
- "webpki",
-]
-
[[package]]
name = "webpki-roots"
version = "0.23.1"
@@ -11624,12 +11190,12 @@ dependencies = [
"bcder",
"bytes",
"chrono",
- "der 0.7.8",
+ "der",
"hex",
"pem 2.0.1",
"ring",
"signature",
- "spki 0.7.2",
+ "spki",
"thiserror",
]
diff --git a/Cargo.toml b/Cargo.toml
index 66253450fe0d..fcb8f0dd8c9e 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -48,7 +48,8 @@ members = [
"src/store-api",
"src/table",
"src/table-procedure",
- "tests-integration",
+ # TODO: add this back once the region server is available
+ # "tests-integration",
"tests/runner",
]
resolver = "2"
diff --git a/src/cmd/src/standalone.rs b/src/cmd/src/standalone.rs
index 165b364452bb..a9d7533c5c4b 100644
--- a/src/cmd/src/standalone.rs
+++ b/src/cmd/src/standalone.rs
@@ -154,10 +154,7 @@ pub struct Instance {
impl Instance {
pub async fn start(&mut self) -> Result<()> {
// Start datanode instance before starting services, to avoid requests come in before internal components are started.
- self.datanode
- .start_instance()
- .await
- .context(StartDatanodeSnafu)?;
+ self.datanode.start().await.context(StartDatanodeSnafu)?;
info!("Datanode instance started");
self.frontend.start().await.context(StartFrontendSnafu)?;
@@ -171,7 +168,7 @@ impl Instance {
.context(ShutdownFrontendSnafu)?;
self.datanode
- .shutdown_instance()
+ .shutdown()
.await
.context(ShutdownDatanodeSnafu)?;
info!("Datanode instance stopped.");
@@ -293,6 +290,9 @@ impl StartCommand {
})))
}
+ #[allow(unreachable_code)]
+ #[allow(unused_variables)]
+ #[allow(clippy::diverging_sub_expression)]
async fn build(self, fe_opts: FrontendOptions, dn_opts: DatanodeOptions) -> Result<Instance> {
let plugins = Arc::new(load_frontend_plugins(&self.user_provider)?);
@@ -306,7 +306,8 @@ impl StartCommand {
.await
.context(StartDatanodeSnafu)?;
- let mut frontend = build_frontend(plugins.clone(), datanode.get_instance()).await?;
+ // TODO: build frontend instance like in distributed mode
+ let mut frontend = build_frontend(plugins.clone(), todo!()).await?;
frontend
.build_servers(&fe_opts)
diff --git a/src/common/meta/src/error.rs b/src/common/meta/src/error.rs
index b066bee50dd5..97a28fe084c2 100644
--- a/src/common/meta/src/error.rs
+++ b/src/common/meta/src/error.rs
@@ -147,6 +147,9 @@ pub enum Error {
#[snafu(display("External error: {}", err_msg))]
External { location: Location, err_msg: String },
+
+ #[snafu(display("Invalid heartbeat response, location: {}", location))]
+ InvalidHeartbeatResponse { location: Location },
}
pub type Result<T> = std::result::Result<T, Error>;
@@ -164,7 +167,8 @@ impl ErrorExt for Error {
| InvalidTableMetadata { .. }
| MoveRegion { .. }
| Unexpected { .. }
- | External { .. } => StatusCode::Unexpected,
+ | External { .. }
+ | InvalidHeartbeatResponse { .. } => StatusCode::Unexpected,
SendMessage { .. }
| GetKvCache { .. }
diff --git a/src/common/runtime/src/runtime.rs b/src/common/runtime/src/runtime.rs
index 7389ebe7509c..fae0412a9c8f 100644
--- a/src/common/runtime/src/runtime.rs
+++ b/src/common/runtime/src/runtime.rs
@@ -52,6 +52,10 @@ impl Drop for Dropper {
}
impl Runtime {
+ pub fn builder() -> Builder {
+ Builder::default()
+ }
+
/// Spawn a future and execute it in this thread pool
///
/// Similar to tokio::runtime::Runtime::spawn()
diff --git a/src/datanode/src/datanode.rs b/src/datanode/src/datanode.rs
index d67ec763e216..fb7ba3df674e 100644
--- a/src/datanode/src/datanode.rs
+++ b/src/datanode/src/datanode.rs
@@ -17,13 +17,16 @@
use std::sync::Arc;
use std::time::Duration;
+use catalog::local::MemoryCatalogManager;
use common_base::readable_size::ReadableSize;
use common_base::Plugins;
use common_error::ext::BoxedError;
pub use common_procedure::options::ProcedureConfig;
+use common_runtime::Runtime;
use common_telemetry::info;
use common_telemetry::logging::LoggingOptions;
use meta_client::MetaClientOptions;
+use query::QueryEngineFactory;
use secrecy::SecretString;
use serde::{Deserialize, Serialize};
use servers::heartbeat_options::HeartbeatOptions;
@@ -36,9 +39,9 @@ use storage::config::{
};
use storage::scheduler::SchedulerConfig;
-use crate::error::{Result, ShutdownInstanceSnafu};
+use crate::error::{Result, RuntimeResourceSnafu, ShutdownInstanceSnafu};
use crate::heartbeat::HeartbeatTask;
-use crate::instance::{Instance, InstanceRef};
+use crate::region_server::RegionServer;
use crate::server::Services;
pub const DEFAULT_OBJECT_STORE_CACHE_SIZE: ReadableSize = ReadableSize(1024);
@@ -407,38 +410,54 @@ impl DatanodeOptions {
pub struct Datanode {
opts: DatanodeOptions,
services: Option<Services>,
- instance: InstanceRef,
heartbeat_task: Option<HeartbeatTask>,
}
impl Datanode {
pub async fn new(opts: DatanodeOptions, plugins: Arc<Plugins>) -> Result<Datanode> {
- let (instance, heartbeat_task) = Instance::with_opts(&opts, plugins).await?;
+ let query_engine_factory = QueryEngineFactory::new_with_plugins(
+ // query engine in datanode only executes plan with resolved table source.
+ MemoryCatalogManager::with_default_setup(),
+ false,
+ None,
+ None,
+ plugins,
+ );
+ let query_engine = query_engine_factory.query_engine();
+
+ let runtime = Arc::new(
+ Runtime::builder()
+ .worker_threads(opts.rpc_runtime_size)
+ .thread_name("io-handlers")
+ .build()
+ .context(RuntimeResourceSnafu)?,
+ );
+
+ let region_server = RegionServer::new(query_engine, runtime);
+
+ // build optional things with different modes
let services = match opts.mode {
- Mode::Distributed => Some(Services::try_new(instance.clone(), &opts).await?),
+ Mode::Distributed => Some(Services::try_new(region_server.clone(), &opts).await?),
Mode::Standalone => None,
};
+ let heartbeat_task = match opts.mode {
+ Mode::Distributed => Some(HeartbeatTask::try_new(&opts, Some(region_server)).await?),
+ Mode::Standalone => None,
+ };
+
Ok(Self {
opts,
services,
- instance,
heartbeat_task,
})
}
pub async fn start(&mut self) -> Result<()> {
info!("Starting datanode instance...");
- self.start_instance().await?;
- self.start_services().await
- }
-
- /// Start only the internal component of datanode.
- pub async fn start_instance(&mut self) -> Result<()> {
- let _ = self.instance.start().await;
if let Some(task) = &self.heartbeat_task {
task.start().await?;
}
- Ok(())
+ self.start_services().await
}
/// Start services of datanode. This method call will block until services are shutdown.
@@ -450,22 +469,6 @@ impl Datanode {
}
}
- pub fn get_instance(&self) -> InstanceRef {
- self.instance.clone()
- }
-
- pub async fn shutdown_instance(&self) -> Result<()> {
- if let Some(heartbeat_task) = &self.heartbeat_task {
- heartbeat_task
- .close()
- .await
- .map_err(BoxedError::new)
- .context(ShutdownInstanceSnafu)?;
- }
- let _ = self.instance.shutdown().await;
- Ok(())
- }
-
async fn shutdown_services(&self) -> Result<()> {
if let Some(service) = self.services.as_ref() {
service.shutdown().await
@@ -477,7 +480,14 @@ impl Datanode {
pub async fn shutdown(&self) -> Result<()> {
// We must shutdown services first
self.shutdown_services().await?;
- self.shutdown_instance().await
+ if let Some(heartbeat_task) = &self.heartbeat_task {
+ heartbeat_task
+ .close()
+ .await
+ .map_err(BoxedError::new)
+ .context(ShutdownInstanceSnafu)?;
+ }
+ Ok(())
}
}
diff --git a/src/datanode/src/heartbeat.rs b/src/datanode/src/heartbeat.rs
index 39240512b53f..e6434e1e39c1 100644
--- a/src/datanode/src/heartbeat.rs
+++ b/src/datanode/src/heartbeat.rs
@@ -16,22 +16,28 @@ use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc;
use std::time::Duration;
-use api::v1::meta::{HeartbeatRequest, NodeStat, Peer};
+use api::v1::meta::{HeartbeatRequest, NodeStat, Peer, RegionStat, TableIdent};
use catalog::remote::region_alive_keeper::RegionAliveKeepers;
-use catalog::{datanode_stat, CatalogManagerRef};
+use common_meta::heartbeat::handler::parse_mailbox_message::ParseMailboxMessageHandler;
use common_meta::heartbeat::handler::{
- HeartbeatResponseHandlerContext, HeartbeatResponseHandlerExecutorRef,
+ HandlerGroupExecutor, HeartbeatResponseHandlerContext, HeartbeatResponseHandlerExecutorRef,
};
use common_meta::heartbeat::mailbox::{HeartbeatMailbox, MailboxRef};
use common_meta::heartbeat::utils::outgoing_message_to_mailbox_message;
use common_telemetry::{debug, error, info, trace, warn};
use meta_client::client::{HeartbeatSender, MetaClient};
-use snafu::ResultExt;
+use snafu::{OptionExt, ResultExt};
+use table::engine::manager::MemoryTableEngineManager;
use tokio::sync::mpsc;
use tokio::time::Instant;
+use self::handler::RegionHeartbeatResponseHandler;
use crate::datanode::DatanodeOptions;
-use crate::error::{self, MetaClientInitSnafu, Result};
+use crate::error::{
+ self, MetaClientInitSnafu, MissingMetasrvOptsSnafu, MissingNodeIdSnafu, Result,
+};
+use crate::instance::new_metasrv_client;
+use crate::region_server::RegionServer;
pub(crate) mod handler;
@@ -42,7 +48,7 @@ pub struct HeartbeatTask {
server_hostname: Option<String>,
running: Arc<AtomicBool>,
meta_client: Arc<MetaClient>,
- catalog_manager: CatalogManagerRef,
+ region_server: RegionServer,
interval: u64,
resp_handler_executor: HeartbeatResponseHandlerExecutorRef,
region_alive_keepers: Arc<RegionAliveKeepers>,
@@ -56,28 +62,44 @@ impl Drop for HeartbeatTask {
impl HeartbeatTask {
/// Create a new heartbeat task instance.
- pub fn new(
- node_id: u64,
+ pub async fn try_new(
opts: &DatanodeOptions,
- meta_client: Arc<MetaClient>,
- catalog_manager: CatalogManagerRef,
- resp_handler_executor: HeartbeatResponseHandlerExecutorRef,
- heartbeat_interval_millis: u64,
- region_alive_keepers: Arc<RegionAliveKeepers>,
- ) -> Self {
- Self {
- node_id,
+ // TODO: remove optional
+ region_server: Option<RegionServer>,
+ ) -> Result<Self> {
+ let meta_client = new_metasrv_client(
+ opts.node_id.context(MissingNodeIdSnafu)?,
+ opts.meta_client_options
+ .as_ref()
+ .context(MissingMetasrvOptsSnafu)?,
+ )
+ .await?;
+
+ let region_server = region_server.unwrap();
+
+ let region_alive_keepers = Arc::new(RegionAliveKeepers::new(
+ Arc::new(MemoryTableEngineManager::new_empty()),
+ opts.heartbeat.interval_millis,
+ ));
+ let resp_handler_executor = Arc::new(HandlerGroupExecutor::new(vec![
+ Arc::new(ParseMailboxMessageHandler),
+ Arc::new(RegionHeartbeatResponseHandler::new(region_server.clone())),
+ region_alive_keepers.clone(),
+ ]));
+
+ Ok(Self {
+ node_id: opts.node_id.unwrap_or(0),
// We use datanode's start time millis as the node's epoch.
node_epoch: common_time::util::current_time_millis() as u64,
server_addr: opts.rpc_addr.clone(),
server_hostname: opts.rpc_hostname.clone(),
running: Arc::new(AtomicBool::new(false)),
- meta_client,
- catalog_manager,
- interval: heartbeat_interval_millis,
+ meta_client: Arc::new(meta_client),
+ region_server,
+ interval: opts.heartbeat.interval_millis,
resp_handler_executor,
region_alive_keepers,
- }
+ })
}
pub async fn create_streams(
@@ -144,7 +166,7 @@ impl HeartbeatTask {
self.region_alive_keepers.start().await;
let meta_client = self.meta_client.clone();
- let catalog_manager_clone = self.catalog_manager.clone();
+ let region_server_clone = self.region_server.clone();
let handler_executor = self.resp_handler_executor.clone();
@@ -160,12 +182,12 @@ impl HeartbeatTask {
.await?;
let epoch = self.region_alive_keepers.epoch();
- let _handle = common_runtime::spawn_bg(async move {
+ common_runtime::spawn_bg(async move {
let sleep = tokio::time::sleep(Duration::from_millis(0));
tokio::pin!(sleep);
loop {
- if !running.load(Ordering::Acquire) {
+ if !running.load(Ordering::Relaxed) {
info!("shutdown heartbeat task");
break;
}
@@ -194,7 +216,7 @@ impl HeartbeatTask {
}
}
_ = &mut sleep => {
- let (region_num, region_stats) = datanode_stat(&catalog_manager_clone).await;
+ let (region_num,region_stats) = Self::load_stats(®ion_server_clone).await;
let req = HeartbeatRequest {
peer: Some(Peer {
id: node_id,
@@ -241,6 +263,26 @@ impl HeartbeatTask {
Ok(())
}
+ async fn load_stats(region_server: &RegionServer) -> (u64, Vec<RegionStat>) {
+ let region_ids = region_server.opened_region_ids();
+ let region_stats = region_ids
+ .into_iter()
+ .map(|region_id| RegionStat {
+ // TODO: scratch more info
+ region_id: region_id.as_u64(),
+ table_ident: Some(TableIdent {
+ table_id: region_id.table_id(),
+ table_name: None,
+ engine: "MitoEngine".to_string(),
+ }),
+
+ ..Default::default()
+ })
+ .collect::<Vec<_>>();
+
+ (region_stats.len() as _, region_stats)
+ }
+
pub async fn close(&self) -> Result<()> {
let running = self.running.clone();
if running
diff --git a/src/datanode/src/heartbeat/handler.rs b/src/datanode/src/heartbeat/handler.rs
index 664f349bcced..8743d6d26a96 100644
--- a/src/datanode/src/heartbeat/handler.rs
+++ b/src/datanode/src/heartbeat/handler.rs
@@ -12,5 +12,136 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+use std::collections::HashMap;
+
+use async_trait::async_trait;
+use common_meta::error::{InvalidHeartbeatResponseSnafu, Result as MetaResult};
+use common_meta::heartbeat::handler::{
+ HandleControl, HeartbeatResponseHandler, HeartbeatResponseHandlerContext,
+};
+use common_meta::instruction::{Instruction, InstructionReply, SimpleReply};
+use common_meta::RegionIdent;
+use common_query::Output;
+use common_telemetry::error;
+use snafu::OptionExt;
+use store_api::region_request::{RegionCloseRequest, RegionOpenRequest, RegionRequest};
+use store_api::storage::RegionId;
+
+use crate::error::Result;
+use crate::region_server::RegionServer;
+
pub mod close_region;
pub mod open_region;
+
+/// Handler for [Instruction::OpenRegion] and [Instruction::CloseRegion].
+#[derive(Clone)]
+pub struct RegionHeartbeatResponseHandler {
+ region_server: RegionServer,
+}
+
+impl RegionHeartbeatResponseHandler {
+ pub fn new(region_server: RegionServer) -> Self {
+ Self { region_server }
+ }
+
+ fn instruction_to_request(instruction: Instruction) -> MetaResult<(RegionId, RegionRequest)> {
+ match instruction {
+ Instruction::OpenRegion(region_ident) => {
+ let region_id = Self::region_ident_to_region_id(®ion_ident);
+ let open_region_req = RegionRequest::Open(RegionOpenRequest {
+ engine: region_ident.table_ident.engine,
+ region_dir: "".to_string(),
+ options: HashMap::new(),
+ });
+ Ok((region_id, open_region_req))
+ }
+ Instruction::CloseRegion(region_ident) => {
+ let region_id = Self::region_ident_to_region_id(®ion_ident);
+ let close_region_req = RegionRequest::Close(RegionCloseRequest {});
+ Ok((region_id, close_region_req))
+ }
+ Instruction::InvalidateTableCache(_) => InvalidHeartbeatResponseSnafu.fail(),
+ }
+ }
+
+ fn region_ident_to_region_id(region_ident: &RegionIdent) -> RegionId {
+ RegionId::new(
+ region_ident.table_ident.table_id,
+ region_ident.region_number,
+ )
+ }
+
+ fn reply_template_from_instruction(instruction: &Instruction) -> InstructionReply {
+ match instruction {
+ Instruction::OpenRegion(_) => InstructionReply::OpenRegion(SimpleReply {
+ result: false,
+ error: None,
+ }),
+ Instruction::CloseRegion(_) => InstructionReply::CloseRegion(SimpleReply {
+ result: false,
+ error: None,
+ }),
+ Instruction::InvalidateTableCache(_) => {
+ InstructionReply::InvalidateTableCache(SimpleReply {
+ result: false,
+ error: None,
+ })
+ }
+ }
+ }
+
+ fn fill_reply(mut template: InstructionReply, result: Result<Output>) -> InstructionReply {
+ let success = result.is_ok();
+ let error = result.map_err(|e| e.to_string()).err();
+ match &mut template {
+ InstructionReply::OpenRegion(reply) => {
+ reply.result = success;
+ reply.error = error;
+ }
+ InstructionReply::CloseRegion(reply) => {
+ reply.result = success;
+ reply.error = error;
+ }
+ InstructionReply::InvalidateTableCache(reply) => {
+ reply.result = success;
+ reply.error = error;
+ }
+ }
+
+ template
+ }
+}
+
+#[async_trait]
+impl HeartbeatResponseHandler for RegionHeartbeatResponseHandler {
+ fn is_acceptable(&self, ctx: &HeartbeatResponseHandlerContext) -> bool {
+ matches!(
+ ctx.incoming_message.as_ref(),
+ Some((_, Instruction::OpenRegion { .. })) | Some((_, Instruction::CloseRegion { .. }))
+ )
+ }
+
+ async fn handle(&self, ctx: &mut HeartbeatResponseHandlerContext) -> MetaResult<HandleControl> {
+ let (meta, instruction) = ctx
+ .incoming_message
+ .take()
+ .context(InvalidHeartbeatResponseSnafu)?;
+
+ let mailbox = ctx.mailbox.clone();
+ let region_server = self.region_server.clone();
+ let reply_template = Self::reply_template_from_instruction(&instruction);
+ let (region_id, region_req) = Self::instruction_to_request(instruction)?;
+ let _handle = common_runtime::spawn_bg(async move {
+ let result = region_server.handle_request(region_id, region_req).await;
+
+ if let Err(e) = mailbox
+ .send((meta, Self::fill_reply(reply_template, result)))
+ .await
+ {
+ error!(e; "Failed to send reply to mailbox");
+ }
+ });
+
+ Ok(HandleControl::Done)
+ }
+}
diff --git a/src/datanode/src/instance.rs b/src/datanode/src/instance.rs
index 84daece2426f..10f40bca817f 100644
--- a/src/datanode/src/instance.rs
+++ b/src/datanode/src/instance.rs
@@ -123,15 +123,15 @@ impl Instance {
Ok(match opts.mode {
Mode::Standalone => None,
Mode::Distributed => {
- let node_id = opts.node_id.context(MissingNodeIdSnafu)?;
- let meta_client = meta_client.context(IncorrectInternalStateSnafu {
+ let _node_id = opts.node_id.context(MissingNodeIdSnafu)?;
+ let _meta_client = meta_client.context(IncorrectInternalStateSnafu {
state: "meta client is not provided when building heartbeat task",
})?;
let region_alive_keepers =
region_alive_keepers.context(IncorrectInternalStateSnafu {
state: "region_alive_keepers is not provided when building heartbeat task",
})?;
- let handlers_executor = HandlerGroupExecutor::new(vec![
+ let _handlers_executor = HandlerGroupExecutor::new(vec![
Arc::new(ParseMailboxMessageHandler),
Arc::new(OpenRegionHandler::new(
catalog_manager.clone(),
@@ -146,15 +146,7 @@ impl Instance {
region_alive_keepers.clone(),
]);
- Some(HeartbeatTask::new(
- node_id,
- opts,
- meta_client,
- catalog_manager,
- Arc::new(handlers_executor),
- opts.heartbeat.interval_millis,
- region_alive_keepers,
- ))
+ todo!("remove this method")
}
})
}
@@ -425,7 +417,10 @@ fn create_compaction_scheduler<S: LogStore>(opts: &DatanodeOptions) -> Compactio
}
/// Create metasrv client instance and spawn heartbeat loop.
-async fn new_metasrv_client(node_id: u64, meta_config: &MetaClientOptions) -> Result<MetaClient> {
+pub async fn new_metasrv_client(
+ node_id: u64,
+ meta_config: &MetaClientOptions,
+) -> Result<MetaClient> {
let cluster_id = 0; // TODO(hl): read from config
let member_id = node_id;
diff --git a/src/datanode/src/region_server.rs b/src/datanode/src/region_server.rs
index 0719d9e3363b..5be6ed752e45 100644
--- a/src/datanode/src/region_server.rs
+++ b/src/datanode/src/region_server.rs
@@ -87,6 +87,14 @@ impl RegionServer {
pub async fn handle_read(&self, request: QueryRequest) -> Result<SendableRecordBatchStream> {
self.inner.handle_read(request).await
}
+
+ pub fn opened_region_ids(&self) -> Vec<RegionId> {
+ self.inner.region_map.iter().map(|e| *e.key()).collect()
+ }
+
+ pub fn runtime(&self) -> Arc<Runtime> {
+ self.inner.runtime.clone()
+ }
}
#[async_trait]
diff --git a/src/datanode/src/server.rs b/src/datanode/src/server.rs
index 6a002410957d..35faeb2a877c 100644
--- a/src/datanode/src/server.rs
+++ b/src/datanode/src/server.rs
@@ -12,25 +12,20 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-use std::default::Default;
use std::net::SocketAddr;
use std::sync::Arc;
-use common_runtime::Builder as RuntimeBuilder;
use futures::future;
use servers::grpc::GrpcServer;
use servers::http::{HttpServer, HttpServerBuilder};
use servers::metrics_handler::MetricsHandler;
-use servers::query_handler::grpc::ServerGrpcQueryHandlerAdaptor;
use servers::server::Server;
use snafu::ResultExt;
use crate::datanode::DatanodeOptions;
use crate::error::{
- ParseAddrSnafu, Result, RuntimeResourceSnafu, ShutdownServerSnafu, StartServerSnafu,
- WaitForGrpcServingSnafu,
+ ParseAddrSnafu, Result, ShutdownServerSnafu, StartServerSnafu, WaitForGrpcServingSnafu,
};
-use crate::instance::InstanceRef;
use crate::region_server::RegionServer;
pub mod grpc;
@@ -42,38 +37,19 @@ pub struct Services {
}
impl Services {
- pub async fn try_new(instance: InstanceRef, opts: &DatanodeOptions) -> Result<Self> {
- // TODO(ruihang): remove database service once region server is ready.
- let enable_region_server = option_env!("ENABLE_REGION_SERVER").is_some();
-
- let grpc_runtime = Arc::new(
- RuntimeBuilder::default()
- .worker_threads(opts.rpc_runtime_size)
- .thread_name("grpc-io-handlers")
- .build()
- .context(RuntimeResourceSnafu)?,
- );
-
- let region_server = RegionServer::new(instance.query_engine(), grpc_runtime.clone());
- let flight_handler = if enable_region_server {
- Some(Arc::new(region_server.clone()) as _)
- } else {
- None
- };
- let region_server_handler = if enable_region_server {
- Some(Arc::new(region_server.clone()) as _)
- } else {
- None
- };
+ pub async fn try_new(region_server: RegionServer, opts: &DatanodeOptions) -> Result<Self> {
+ let flight_handler = Some(Arc::new(region_server.clone()) as _);
+ let region_server_handler = Some(Arc::new(region_server.clone()) as _);
+ let runtime = region_server.runtime();
Ok(Self {
grpc_server: GrpcServer::new(
- ServerGrpcQueryHandlerAdaptor::arc(instance),
+ None,
None,
flight_handler,
region_server_handler,
None,
- grpc_runtime,
+ runtime,
),
http_server: HttpServerBuilder::new(opts.http_opts.clone())
.with_metrics_handler(MetricsHandler)
diff --git a/src/frontend/src/server.rs b/src/frontend/src/server.rs
index 41a58d666c55..956f3a6d1503 100644
--- a/src/frontend/src/server.rs
+++ b/src/frontend/src/server.rs
@@ -69,7 +69,7 @@ impl Services {
);
let grpc_server = GrpcServer::new(
- ServerGrpcQueryHandlerAdaptor::arc(instance.clone()),
+ Some(ServerGrpcQueryHandlerAdaptor::arc(instance.clone())),
Some(instance.clone()),
None,
None,
diff --git a/src/servers/src/grpc.rs b/src/servers/src/grpc.rs
index ff0ff5173a3f..4689647c4b1b 100644
--- a/src/servers/src/grpc.rs
+++ b/src/servers/src/grpc.rs
@@ -79,22 +79,23 @@ pub struct GrpcServer {
impl GrpcServer {
pub fn new(
- query_handler: ServerGrpcQueryHandlerRef,
+ query_handler: Option<ServerGrpcQueryHandlerRef>,
prometheus_handler: Option<PrometheusHandlerRef>,
flight_handler: Option<FlightCraftRef>,
region_server_handler: Option<RegionServerHandlerRef>,
user_provider: Option<UserProviderRef>,
runtime: Arc<Runtime>,
) -> Self {
- let database_handler =
- GreptimeRequestHandler::new(query_handler, user_provider.clone(), runtime.clone());
- let region_server_handler =
- region_server_handler.map(|handler| RegionServerRequestHandler::new(handler, runtime));
+ let database_handler = query_handler.map(|handler| {
+ GreptimeRequestHandler::new(handler, user_provider.clone(), runtime.clone())
+ });
+ let region_server_handler = region_server_handler
+ .map(|handler| RegionServerRequestHandler::new(handler, runtime.clone()));
Self {
shutdown_tx: Mutex::new(None),
user_provider,
serve_state: Mutex::new(None),
- database_handler: Some(database_handler),
+ database_handler,
prometheus_handler,
flight_handler,
region_server_handler,
diff --git a/src/table/src/engine/manager.rs b/src/table/src/engine/manager.rs
index f89046ce22c6..e642d3aebbf7 100644
--- a/src/table/src/engine/manager.rs
+++ b/src/table/src/engine/manager.rs
@@ -51,6 +51,17 @@ impl MemoryTableEngineManager {
MemoryTableEngineManager::alias(engine.name().to_string(), engine)
}
+ // TODO: remove `TableEngineManager`
+ pub fn new_empty() -> Self {
+ let engines = RwLock::new(HashMap::new());
+ let engine_procedures = RwLock::new(HashMap::new());
+
+ MemoryTableEngineManager {
+ engines,
+ engine_procedures,
+ }
+ }
+
/// Create a new [MemoryTableEngineManager] with single table `engine` and
/// an alias `name` instead of the engine's name.
pub fn alias(name: String, engine: TableEngineRef) -> Self {
diff --git a/tests-integration/src/cluster.rs b/tests-integration/src/cluster.rs
index 350a6a27c197..8659054e8aa0 100644
--- a/tests-integration/src/cluster.rs
+++ b/tests-integration/src/cluster.rs
@@ -294,7 +294,7 @@ async fn create_datanode_client(datanode_instance: Arc<DatanodeInstance>) -> (St
runtime.clone(),
));
let grpc_server = GrpcServer::new(
- ServerGrpcQueryHandlerAdaptor::arc(datanode_instance),
+ Some(ServerGrpcQueryHandlerAdaptor::arc(datanode_instance)),
None,
Some(query_handler),
None,
diff --git a/tests-integration/src/test_util.rs b/tests-integration/src/test_util.rs
index c152ec6acb30..9727980ff7db 100644
--- a/tests-integration/src/test_util.rs
+++ b/tests-integration/src/test_util.rs
@@ -590,7 +590,7 @@ pub async fn setup_grpc_server_with_user_provider(
runtime.clone(),
));
let fe_grpc_server = Arc::new(GrpcServer::new(
- ServerGrpcQueryHandlerAdaptor::arc(fe_instance_ref.clone()),
+ Some(ServerGrpcQueryHandlerAdaptor::arc(fe_instance_ref.clone())),
Some(fe_instance_ref.clone()),
Some(flight_handler),
None,
|
feat
|
implement heartbeat for region server (#2279)
|
4a4237115a1672b7c142c0d9de3b0afdb8269d9f
|
2024-06-25 11:51:19
|
Yingwen
|
test: wait until checkpoint finish (#4202)
| false
|
diff --git a/src/mito2/src/manifest/tests/checkpoint.rs b/src/mito2/src/manifest/tests/checkpoint.rs
index 40dd3694627f..692f40422b17 100644
--- a/src/mito2/src/manifest/tests/checkpoint.rs
+++ b/src/mito2/src/manifest/tests/checkpoint.rs
@@ -172,6 +172,11 @@ async fn test_corrupted_data_causing_checksum_error() {
manager.update(nop_action()).await.unwrap();
}
+ // Wait for the checkpoint to finish.
+ while manager.checkpointer().is_doing_checkpoint() {
+ tokio::time::sleep(Duration::from_millis(50)).await;
+ }
+
// Check if there is a checkpoint
assert!(manager
.store()
|
test
|
wait until checkpoint finish (#4202)
|
62d8bbb10c972ea3109d6268ab1671728363a5cc
|
2024-03-26 09:34:57
|
Yingwen
|
ci: use single commit on the deployment branch (#3580)
| false
|
diff --git a/.github/workflows/apidoc.yml b/.github/workflows/apidoc.yml
index 06e845f63021..ca1befa52cff 100644
--- a/.github/workflows/apidoc.yml
+++ b/.github/workflows/apidoc.yml
@@ -40,3 +40,4 @@ jobs:
uses: JamesIves/github-pages-deploy-action@v4
with:
folder: target/doc
+ single-commit: true
|
ci
|
use single commit on the deployment branch (#3580)
|
1fa0b4e3f972713e9bc1685afabf5e1a7077a433
|
2022-04-21 09:10:46
|
evenyag
|
chore: Setup code skeleton of datanode
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index c4b261287516..64e3fa91ca44 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -2,6 +2,23 @@
# It is not intended for manual editing.
version = 3
+[[package]]
+name = "async-trait"
+version = "0.1.53"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ed6aa3524a2dfcf9fe180c51eae2b58738348d819517ceadf95789c51fff7600"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn",
+]
+
+[[package]]
+name = "cfg-if"
+version = "1.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
+
[[package]]
name = "common"
version = "0.1.0"
@@ -9,23 +26,136 @@ version = "0.1.0"
[[package]]
name = "datanode"
version = "0.1.0"
+dependencies = [
+ "snafu",
+]
[[package]]
name = "datatypes"
version = "0.1.0"
+[[package]]
+name = "doc-comment"
+version = "0.3.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "fea41bba32d969b513997752735605054bc0dfa92b4c56bf1189f2e174be7a10"
+
+[[package]]
+name = "heck"
+version = "0.3.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6d621efb26863f0e9924c6ac577e8275e5e6b77455db64ffa6c65c904e9e132c"
+dependencies = [
+ "unicode-segmentation",
+]
+
+[[package]]
+name = "log"
+version = "0.4.16"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6389c490849ff5bc16be905ae24bc913a9c8892e19b2341dbc175e14c341c2b8"
+dependencies = [
+ "cfg-if",
+]
+
[[package]]
name = "log-store"
version = "0.1.0"
+[[package]]
+name = "logical-plans"
+version = "0.1.0"
+
[[package]]
name = "object-store"
version = "0.1.0"
+[[package]]
+name = "proc-macro2"
+version = "1.0.37"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ec757218438d5fda206afc041538b2f6d889286160d649a86a24d37e1235afd1"
+dependencies = [
+ "unicode-xid",
+]
+
[[package]]
name = "query"
version = "0.1.0"
+[[package]]
+name = "quote"
+version = "1.0.18"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a1feb54ed693b93a84e14094943b84b7c4eae204c512b7ccb95ab0c66d278ad1"
+dependencies = [
+ "proc-macro2",
+]
+
+[[package]]
+name = "snafu"
+version = "0.7.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2eba135d2c579aa65364522eb78590cdf703176ef71ad4c32b00f58f7afb2df5"
+dependencies = [
+ "doc-comment",
+ "snafu-derive",
+]
+
+[[package]]
+name = "snafu-derive"
+version = "0.7.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7a7fe9b0669ef117c5cabc5549638528f36771f058ff977d7689deb517833a75"
+dependencies = [
+ "heck",
+ "proc-macro2",
+ "quote",
+ "syn",
+]
+
+[[package]]
+name = "sql"
+version = "0.1.0"
+dependencies = [
+ "sqlparser",
+]
+
+[[package]]
+name = "sqlparser"
+version = "0.16.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7e9a527b68048eb95495a1508f6c8395c8defcff5ecdbe8ad4106d08a2ef2a3c"
+dependencies = [
+ "log",
+]
+
+[[package]]
+name = "syn"
+version = "1.0.91"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b683b2b825c8eef438b77c36a06dc262294da3d5a5813fac20da149241dcd44d"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "unicode-xid",
+]
+
[[package]]
name = "table"
version = "0.1.0"
+dependencies = [
+ "async-trait",
+]
+
+[[package]]
+name = "unicode-segmentation"
+version = "1.9.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7e8820f5d777f6224dc4be3632222971ac30164d4a258d595640799554ebfd99"
+
+[[package]]
+name = "unicode-xid"
+version = "0.2.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8ccb82d61f80a663efe1f787a51b16b5a51e3314d6ac365b08639f52387b33f3"
diff --git a/Cargo.toml b/Cargo.toml
index 7da6ac97a7de..b63440d6e3f6 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -4,7 +4,9 @@ members = [
"src/datanode",
"src/datatypes",
"src/log-store",
+ "src/logical-plans",
"src/object-store",
"src/query",
+ "src/sql",
"src/table",
]
diff --git a/src/datanode/Cargo.toml b/src/datanode/Cargo.toml
index c8417dd79907..ec6162f9688f 100644
--- a/src/datanode/Cargo.toml
+++ b/src/datanode/Cargo.toml
@@ -6,3 +6,4 @@ edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
+snafu = "0.7"
diff --git a/src/datanode/src/catalog.rs b/src/datanode/src/catalog.rs
new file mode 100644
index 000000000000..8b137891791f
--- /dev/null
+++ b/src/datanode/src/catalog.rs
@@ -0,0 +1 @@
+
diff --git a/src/datanode/src/error.rs b/src/datanode/src/error.rs
new file mode 100644
index 000000000000..d7815b56db7b
--- /dev/null
+++ b/src/datanode/src/error.rs
@@ -0,0 +1,8 @@
+use snafu::Snafu;
+
+/// business error of datanode.
+#[derive(Debug, Snafu)]
+#[snafu(display("DataNode error"))]
+pub struct Error;
+
+pub type Result<T> = std::result::Result<T, Error>;
diff --git a/src/datanode/src/lib.rs b/src/datanode/src/lib.rs
index 8b137891791f..f12076ea061e 100644
--- a/src/datanode/src/lib.rs
+++ b/src/datanode/src/lib.rs
@@ -1 +1,21 @@
+mod catalog;
+mod error;
+mod processors;
+mod rpc;
+use crate::error::Result;
+use crate::rpc::Services;
+
+/// DataNode service.
+pub struct DataNode {
+ services: Services,
+}
+
+impl DataNode {
+ /// Shutdown the datanode service gracefully.
+ pub async fn shutdown(&self) -> Result<()> {
+ self.services.shutdown().await?;
+
+ unimplemented!()
+ }
+}
diff --git a/src/datanode/src/processors.rs b/src/datanode/src/processors.rs
new file mode 100644
index 000000000000..8b137891791f
--- /dev/null
+++ b/src/datanode/src/processors.rs
@@ -0,0 +1 @@
+
diff --git a/src/datanode/src/rpc.rs b/src/datanode/src/rpc.rs
new file mode 100644
index 000000000000..ef06c5b171c6
--- /dev/null
+++ b/src/datanode/src/rpc.rs
@@ -0,0 +1,10 @@
+use crate::error::Result;
+
+/// All rpc services.
+pub struct Services {}
+
+impl Services {
+ pub async fn shutdown(&self) -> Result<()> {
+ unimplemented!()
+ }
+}
diff --git a/src/datatypes/src/lib.rs b/src/datatypes/src/lib.rs
index e69de29bb2d1..6bde67a2b144 100644
--- a/src/datatypes/src/lib.rs
+++ b/src/datatypes/src/lib.rs
@@ -0,0 +1 @@
+mod schema;
diff --git a/src/datatypes/src/schema.rs b/src/datatypes/src/schema.rs
new file mode 100644
index 000000000000..8b137891791f
--- /dev/null
+++ b/src/datatypes/src/schema.rs
@@ -0,0 +1 @@
+
diff --git a/src/logical-plans/Cargo.toml b/src/logical-plans/Cargo.toml
new file mode 100644
index 000000000000..fd20e3350bca
--- /dev/null
+++ b/src/logical-plans/Cargo.toml
@@ -0,0 +1,8 @@
+[package]
+name = "logical-plans"
+version = "0.1.0"
+edition = "2021"
+
+# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
+
+[dependencies]
diff --git a/src/logical-plans/src/lib.rs b/src/logical-plans/src/lib.rs
new file mode 100644
index 000000000000..8b137891791f
--- /dev/null
+++ b/src/logical-plans/src/lib.rs
@@ -0,0 +1 @@
+
diff --git a/src/query/src/executor.rs b/src/query/src/executor.rs
new file mode 100644
index 000000000000..8b137891791f
--- /dev/null
+++ b/src/query/src/executor.rs
@@ -0,0 +1 @@
+
diff --git a/src/query/src/lib.rs b/src/query/src/lib.rs
index 8b137891791f..c7de888be0fb 100644
--- a/src/query/src/lib.rs
+++ b/src/query/src/lib.rs
@@ -1 +1,4 @@
-
+mod executor;
+mod logical_optimizer;
+mod physical_optimizer;
+mod physical_planner;
diff --git a/src/query/src/logical_optimizer.rs b/src/query/src/logical_optimizer.rs
new file mode 100644
index 000000000000..8b137891791f
--- /dev/null
+++ b/src/query/src/logical_optimizer.rs
@@ -0,0 +1 @@
+
diff --git a/src/query/src/physical_optimizer.rs b/src/query/src/physical_optimizer.rs
new file mode 100644
index 000000000000..8b137891791f
--- /dev/null
+++ b/src/query/src/physical_optimizer.rs
@@ -0,0 +1 @@
+
diff --git a/src/query/src/physical_planner.rs b/src/query/src/physical_planner.rs
new file mode 100644
index 000000000000..8b137891791f
--- /dev/null
+++ b/src/query/src/physical_planner.rs
@@ -0,0 +1 @@
+
diff --git a/src/sql/Cargo.toml b/src/sql/Cargo.toml
new file mode 100644
index 000000000000..01cdc0da6dea
--- /dev/null
+++ b/src/sql/Cargo.toml
@@ -0,0 +1,9 @@
+[package]
+name = "sql"
+version = "0.1.0"
+edition = "2021"
+
+# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
+
+[dependencies]
+sqlparser = "0.16.0"
diff --git a/src/sql/src/ast.rs b/src/sql/src/ast.rs
new file mode 100644
index 000000000000..8b137891791f
--- /dev/null
+++ b/src/sql/src/ast.rs
@@ -0,0 +1 @@
+
diff --git a/src/sql/src/lib.rs b/src/sql/src/lib.rs
new file mode 100644
index 000000000000..c289ce327fe3
--- /dev/null
+++ b/src/sql/src/lib.rs
@@ -0,0 +1,3 @@
+mod ast;
+mod parser;
+mod planner;
diff --git a/src/sql/src/parser.rs b/src/sql/src/parser.rs
new file mode 100644
index 000000000000..8b137891791f
--- /dev/null
+++ b/src/sql/src/parser.rs
@@ -0,0 +1 @@
+
diff --git a/src/sql/src/planner.rs b/src/sql/src/planner.rs
new file mode 100644
index 000000000000..8b137891791f
--- /dev/null
+++ b/src/sql/src/planner.rs
@@ -0,0 +1 @@
+
diff --git a/src/table/Cargo.toml b/src/table/Cargo.toml
index 12933c68f0bf..6ee53b9861f1 100644
--- a/src/table/Cargo.toml
+++ b/src/table/Cargo.toml
@@ -6,3 +6,4 @@ edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
+async-trait = "0.1"
diff --git a/src/table/src/engine.rs b/src/table/src/engine.rs
new file mode 100644
index 000000000000..8b2309192c5c
--- /dev/null
+++ b/src/table/src/engine.rs
@@ -0,0 +1,3 @@
+/// Table engine abstraction.
+#[async_trait::async_trait]
+pub trait Engine {}
diff --git a/src/table/src/lib.rs b/src/table/src/lib.rs
index 8b137891791f..804f6c9a6844 100644
--- a/src/table/src/lib.rs
+++ b/src/table/src/lib.rs
@@ -1 +1,5 @@
+mod engine;
+/// Table abstraction.
+#[async_trait::async_trait]
+pub trait Table: Send + Sync {}
|
chore
|
Setup code skeleton of datanode
|
0847ff36ce49c3c560c7a828378c49ce010817b1
|
2024-09-18 13:11:25
|
zyy17
|
fix: config test failed and use `similar_asserts::assert_eq` to replace `assert_eq` for long string compare (#4731)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index 0e9db0052712..b121698979e5 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -1175,6 +1175,17 @@ dependencies = [
"regex-automata 0.1.10",
]
+[[package]]
+name = "bstr"
+version = "1.10.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "40723b8fb387abc38f4f4a37c09073622e41dd12327033091ef8950659e6dc0c"
+dependencies = [
+ "memchr",
+ "regex-automata 0.4.7",
+ "serde",
+]
+
[[package]]
name = "btoi"
version = "0.4.3"
@@ -1761,6 +1772,7 @@ dependencies = [
"serde_json",
"servers",
"session",
+ "similar-asserts",
"snafu 0.8.4",
"store-api",
"substrait 0.9.3",
@@ -9657,7 +9669,7 @@ source = "git+https://github.com/discord9/RustPython?rev=9ed5137412#9ed51374125b
dependencies = [
"ascii",
"bitflags 1.3.2",
- "bstr",
+ "bstr 0.2.17",
"cfg-if",
"hexf-parse",
"itertools 0.10.5",
@@ -9692,7 +9704,7 @@ version = "0.2.0"
source = "git+https://github.com/discord9/RustPython?rev=9ed5137412#9ed51374125b5f1a9e5cee5dd7e27023b8591f1e"
dependencies = [
"bitflags 1.3.2",
- "bstr",
+ "bstr 0.2.17",
"itertools 0.10.5",
"lz4_flex 0.9.5",
"num-bigint",
@@ -9845,7 +9857,7 @@ dependencies = [
"ascii",
"atty",
"bitflags 1.3.2",
- "bstr",
+ "bstr 0.2.17",
"caseless",
"cfg-if",
"chrono",
@@ -10613,6 +10625,26 @@ version = "0.1.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f27f6278552951f1f2b8cf9da965d10969b2efdea95a6ec47987ab46edfe263a"
+[[package]]
+name = "similar"
+version = "2.6.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1de1d4f81173b03af4c0cbed3c898f6bff5b870e4a7f5d6f4057d62a7a4b686e"
+dependencies = [
+ "bstr 1.10.0",
+ "unicode-segmentation",
+]
+
+[[package]]
+name = "similar-asserts"
+version = "1.6.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "cfe85670573cd6f0fa97940f26e7e6601213c3b0555246c24234131f88c5709e"
+dependencies = [
+ "console",
+ "similar",
+]
+
[[package]]
name = "simple_asn1"
version = "0.6.2"
@@ -11801,6 +11833,7 @@ dependencies = [
"serde_json",
"servers",
"session",
+ "similar-asserts",
"snafu 0.8.4",
"sql",
"sqlx",
diff --git a/Cargo.toml b/Cargo.toml
index 672067140297..3854ebc08459 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -169,6 +169,7 @@ shadow-rs = "0.31"
smallvec = { version = "1", features = ["serde"] }
snafu = "0.8"
sysinfo = "0.30"
+similar-asserts = "1.6.0"
# on branch v0.44.x
sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "54a267ac89c09b11c0c88934690530807185d3e7", features = [
"visitor",
diff --git a/src/cmd/Cargo.toml b/src/cmd/Cargo.toml
index 71841aabeccb..b57d2211875b 100644
--- a/src/cmd/Cargo.toml
+++ b/src/cmd/Cargo.toml
@@ -70,6 +70,7 @@ serde.workspace = true
serde_json.workspace = true
servers.workspace = true
session.workspace = true
+similar-asserts.workspace = true
snafu.workspace = true
store-api.workspace = true
substrait.workspace = true
diff --git a/src/cmd/tests/load_config_test.rs b/src/cmd/tests/load_config_test.rs
index 68d67a53a488..7a50234431e2 100644
--- a/src/cmd/tests/load_config_test.rs
+++ b/src/cmd/tests/load_config_test.rs
@@ -16,12 +16,10 @@ use std::time::Duration;
use cmd::options::GreptimeOptions;
use cmd::standalone::StandaloneOptions;
-use common_base::readable_size::ReadableSize;
use common_config::Configurable;
use common_grpc::channel_manager::{
DEFAULT_MAX_GRPC_RECV_MESSAGE_SIZE, DEFAULT_MAX_GRPC_SEND_MESSAGE_SIZE,
};
-use common_runtime::global::RuntimeOptions;
use common_telemetry::logging::{LoggingOptions, DEFAULT_OTLP_ENDPOINT};
use common_wal::config::raft_engine::RaftEngineConfig;
use common_wal::config::DatanodeWalConfig;
@@ -45,10 +43,6 @@ fn test_load_datanode_example_config() {
.unwrap();
let expected = GreptimeOptions::<DatanodeOptions> {
- runtime: RuntimeOptions {
- global_rt_size: 8,
- compact_rt_size: 4,
- },
component: DatanodeOptions {
node_id: Some(42),
meta_client: Some(MetaClientOptions {
@@ -76,8 +70,6 @@ fn test_load_datanode_example_config() {
RegionEngineConfig::Mito(MitoConfig {
auto_flush_interval: Duration::from_secs(3600),
scan_parallelism: 0,
- global_write_buffer_reject_size: ReadableSize::gb(2),
- max_background_jobs: 4,
experimental_write_cache_ttl: Some(Duration::from_secs(60 * 60 * 8)),
..Default::default()
}),
@@ -102,9 +94,10 @@ fn test_load_datanode_example_config() {
rpc_max_send_message_size: Some(DEFAULT_MAX_GRPC_SEND_MESSAGE_SIZE),
..Default::default()
},
+ ..Default::default()
};
- assert_eq!(options, expected);
+ similar_asserts::assert_eq!(options, expected);
}
#[test]
@@ -114,10 +107,6 @@ fn test_load_frontend_example_config() {
GreptimeOptions::<FrontendOptions>::load_layered_options(example_config.to_str(), "")
.unwrap();
let expected = GreptimeOptions::<FrontendOptions> {
- runtime: RuntimeOptions {
- global_rt_size: 8,
- compact_rt_size: 4,
- },
component: FrontendOptions {
default_timezone: Some("UTC".to_string()),
meta_client: Some(MetaClientOptions {
@@ -150,8 +139,9 @@ fn test_load_frontend_example_config() {
},
..Default::default()
},
+ ..Default::default()
};
- assert_eq!(options, expected);
+ similar_asserts::assert_eq!(options, expected);
}
#[test]
@@ -161,10 +151,6 @@ fn test_load_metasrv_example_config() {
GreptimeOptions::<MetasrvOptions>::load_layered_options(example_config.to_str(), "")
.unwrap();
let expected = GreptimeOptions::<MetasrvOptions> {
- runtime: RuntimeOptions {
- global_rt_size: 8,
- compact_rt_size: 4,
- },
component: MetasrvOptions {
selector: SelectorType::default(),
data_home: "/tmp/metasrv/".to_string(),
@@ -182,8 +168,9 @@ fn test_load_metasrv_example_config() {
},
..Default::default()
},
+ ..Default::default()
};
- assert_eq!(options, expected);
+ similar_asserts::assert_eq!(options, expected);
}
#[test]
@@ -193,10 +180,6 @@ fn test_load_standalone_example_config() {
GreptimeOptions::<StandaloneOptions>::load_layered_options(example_config.to_str(), "")
.unwrap();
let expected = GreptimeOptions::<StandaloneOptions> {
- runtime: RuntimeOptions {
- global_rt_size: 8,
- compact_rt_size: 4,
- },
component: StandaloneOptions {
default_timezone: Some("UTC".to_string()),
wal: DatanodeWalConfig::RaftEngine(RaftEngineConfig {
@@ -208,11 +191,8 @@ fn test_load_standalone_example_config() {
region_engine: vec![
RegionEngineConfig::Mito(MitoConfig {
auto_flush_interval: Duration::from_secs(3600),
- scan_parallelism: 0,
- global_write_buffer_reject_size: ReadableSize::gb(2),
- sst_meta_cache_size: ReadableSize::mb(128),
- max_background_jobs: 4,
experimental_write_cache_ttl: Some(Duration::from_secs(60 * 60 * 8)),
+ scan_parallelism: 0,
..Default::default()
}),
RegionEngineConfig::File(EngineConfig {}),
@@ -234,6 +214,7 @@ fn test_load_standalone_example_config() {
},
..Default::default()
},
+ ..Default::default()
};
- assert_eq!(options, expected);
+ similar_asserts::assert_eq!(options, expected);
}
diff --git a/tests-integration/Cargo.toml b/tests-integration/Cargo.toml
index 997214bca00c..80fb76c55aac 100644
--- a/tests-integration/Cargo.toml
+++ b/tests-integration/Cargo.toml
@@ -60,6 +60,7 @@ rstest_reuse.workspace = true
serde_json.workspace = true
servers = { workspace = true, features = ["testing"] }
session.workspace = true
+similar-asserts.workspace = true
snafu.workspace = true
sql.workspace = true
sqlx = { version = "0.6", features = [
diff --git a/tests-integration/tests/http.rs b/tests-integration/tests/http.rs
index d467e42dd4dd..e11060fbbda5 100644
--- a/tests-integration/tests/http.rs
+++ b/tests-integration/tests/http.rs
@@ -888,7 +888,7 @@ write_interval = "30s"
.trim()
.to_string();
let body_text = drop_lines_with_inconsistent_results(res_get.text().await);
- assert_eq!(body_text, expected_toml_str);
+ similar_asserts::assert_eq!(body_text, expected_toml_str);
}
fn drop_lines_with_inconsistent_results(input: String) -> String {
|
fix
|
config test failed and use `similar_asserts::assert_eq` to replace `assert_eq` for long string compare (#4731)
|
033b650d0ddea3796051fa4f7437941346e2678a
|
2023-08-19 18:38:44
|
JeremyHi
|
feat: row write protocol (#2189)
| false
|
diff --git a/src/common/grpc-expr/src/insert.rs b/src/common/grpc-expr/src/insert.rs
index fa8fe002fe7d..a465a4114e92 100644
--- a/src/common/grpc-expr/src/insert.rs
+++ b/src/common/grpc-expr/src/insert.rs
@@ -23,6 +23,7 @@ use datatypes::data_type::{ConcreteDataType, DataType};
use datatypes::prelude::VectorRef;
use datatypes::schema::SchemaRef;
use snafu::{ensure, ResultExt};
+use table::engine::TableReference;
use table::metadata::TableId;
use table::requests::InsertRequest;
@@ -47,12 +48,11 @@ pub fn build_create_expr_from_insertion(
columns: &[Column],
engine: &str,
) -> Result<CreateTableExpr> {
+ let table_name = TableReference::full(catalog_name, schema_name, table_name);
let column_exprs = ColumnExpr::from_columns(columns);
util::build_create_table_expr(
- catalog_name,
- schema_name,
table_id,
- table_name,
+ &table_name,
column_exprs,
engine,
"Created on insertion",
diff --git a/src/common/grpc-expr/src/lib.rs b/src/common/grpc-expr/src/lib.rs
index 61da4fe1957d..cd3d9540b3eb 100644
--- a/src/common/grpc-expr/src/lib.rs
+++ b/src/common/grpc-expr/src/lib.rs
@@ -16,7 +16,7 @@ mod alter;
pub mod delete;
pub mod error;
pub mod insert;
-mod util;
+pub mod util;
pub use alter::{alter_expr_to_request, create_expr_to_request, create_table_schema};
pub use insert::{build_create_expr_from_insertion, find_new_columns};
diff --git a/src/common/grpc-expr/src/util.rs b/src/common/grpc-expr/src/util.rs
index 89e6b2dd278e..806f848173ee 100644
--- a/src/common/grpc-expr/src/util.rs
+++ b/src/common/grpc-expr/src/util.rs
@@ -19,39 +19,45 @@ use api::v1::{
};
use datatypes::schema::Schema;
use snafu::{ensure, OptionExt};
+use table::engine::TableReference;
use table::metadata::TableId;
use crate::error::{
DuplicatedColumnNameSnafu, DuplicatedTimestampColumnSnafu, MissingTimestampColumnSnafu, Result,
};
-pub struct ColumnExpr {
- pub column_name: String,
+pub struct ColumnExpr<'a> {
+ pub column_name: &'a str,
pub datatype: i32,
pub semantic_type: i32,
}
-impl ColumnExpr {
+impl<'a> ColumnExpr<'a> {
#[inline]
- pub fn from_columns(columns: &[Column]) -> Vec<Self> {
+ pub fn from_columns(columns: &'a [Column]) -> Vec<Self> {
columns.iter().map(Self::from).collect()
}
+
+ #[inline]
+ pub fn from_column_schemas(schemas: &'a [ColumnSchema]) -> Vec<Self> {
+ schemas.iter().map(Self::from).collect()
+ }
}
-impl From<&Column> for ColumnExpr {
- fn from(column: &Column) -> Self {
+impl<'a> From<&'a Column> for ColumnExpr<'a> {
+ fn from(column: &'a Column) -> Self {
Self {
- column_name: column.column_name.clone(),
+ column_name: &column.column_name,
datatype: column.datatype,
semantic_type: column.semantic_type,
}
}
}
-impl From<&ColumnSchema> for ColumnExpr {
- fn from(schema: &ColumnSchema) -> Self {
+impl<'a> From<&'a ColumnSchema> for ColumnExpr<'a> {
+ fn from(schema: &'a ColumnSchema) -> Self {
Self {
- column_name: schema.column_name.clone(),
+ column_name: &schema.column_name,
datatype: schema.datatype,
semantic_type: schema.semantic_type,
}
@@ -59,10 +65,8 @@ impl From<&ColumnSchema> for ColumnExpr {
}
pub fn build_create_table_expr(
- catalog_name: &str,
- schema_name: &str,
table_id: Option<TableId>,
- table_name: &str,
+ table_name: &TableReference<'_>,
column_exprs: Vec<ColumnExpr>,
engine: &str,
desc: &str,
@@ -77,8 +81,8 @@ pub fn build_create_table_expr(
let mut distinct_names = HashSet::with_capacity(column_exprs.len());
for ColumnExpr { column_name, .. } in &column_exprs {
ensure!(
- distinct_names.insert(column_name),
- DuplicatedColumnNameSnafu { name: column_name }
+ distinct_names.insert(*column_name),
+ DuplicatedColumnNameSnafu { name: *column_name }
);
}
@@ -94,16 +98,16 @@ pub fn build_create_table_expr(
{
let mut is_nullable = true;
match semantic_type {
- v if v == SemanticType::Tag as i32 => primary_keys.push(column_name.clone()),
+ v if v == SemanticType::Tag as i32 => primary_keys.push(column_name.to_string()),
v if v == SemanticType::Timestamp as i32 => {
ensure!(
time_index.is_none(),
DuplicatedTimestampColumnSnafu {
exists: time_index.unwrap(),
- duplicated: &column_name,
+ duplicated: column_name,
}
);
- time_index = Some(column_name.clone());
+ time_index = Some(column_name.to_string());
// Timestamp column must not be null.
is_nullable = false;
}
@@ -111,7 +115,7 @@ pub fn build_create_table_expr(
}
let column_def = ColumnDef {
- name: column_name,
+ name: column_name.to_string(),
datatype,
is_nullable,
default_constraint: vec![],
@@ -120,13 +124,13 @@ pub fn build_create_table_expr(
}
let time_index = time_index.context(MissingTimestampColumnSnafu {
- msg: format!("table is {}", table_name),
+ msg: format!("table is {}", table_name.table),
})?;
let expr = CreateTableExpr {
- catalog_name: catalog_name.to_string(),
- schema_name: schema_name.to_string(),
- table_name: table_name.to_string(),
+ catalog_name: table_name.catalog.to_string(),
+ schema_name: table_name.schema.to_string(),
+ table_name: table_name.table.to_string(),
desc: desc.to_string(),
column_defs,
time_index,
@@ -148,11 +152,11 @@ pub fn extract_new_columns(
) -> Result<Option<AddColumns>> {
let columns_to_add = column_exprs
.into_iter()
- .filter(|expr| schema.column_schema_by_name(&expr.column_name).is_none())
+ .filter(|expr| schema.column_schema_by_name(expr.column_name).is_none())
.map(|expr| {
let is_key = expr.semantic_type == SemanticType::Tag as i32;
let column_def = Some(ColumnDef {
- name: expr.column_name,
+ name: expr.column_name.to_string(),
datatype: expr.datatype,
is_nullable: true,
default_constraint: vec![],
@@ -170,7 +174,7 @@ pub fn extract_new_columns(
} else {
let mut distinct_names = HashSet::with_capacity(columns_to_add.len());
for add_column in &columns_to_add {
- let name = &add_column.column_def.as_ref().unwrap().name;
+ let name = add_column.column_def.as_ref().unwrap().name.as_str();
ensure!(
distinct_names.insert(name),
DuplicatedColumnNameSnafu { name }
diff --git a/src/datanode/src/error.rs b/src/datanode/src/error.rs
index 7cd3a6eca9a8..0b4186a3e998 100644
--- a/src/datanode/src/error.rs
+++ b/src/datanode/src/error.rs
@@ -478,6 +478,31 @@ pub enum Error {
location: Location,
},
+ #[snafu(display(
+ "Invalid insert row len, table: {}, expected: {}, actual: {}",
+ table_name,
+ expected,
+ actual
+ ))]
+ InvalidInsertRowLen {
+ table_name: String,
+ expected: usize,
+ actual: usize,
+ location: Location,
+ },
+
+ #[snafu(display("Column datatype error, source: {}", source))]
+ ColumnDataType {
+ location: Location,
+ source: api::error::Error,
+ },
+
+ #[snafu(display("Failed to create vector, source: {}", source))]
+ CreateVector {
+ location: Location,
+ source: datatypes::error::Error,
+ },
+
#[snafu(display("Unexpected, violated: {}", violated))]
Unexpected {
violated: String,
@@ -557,6 +582,7 @@ impl ErrorExt for Error {
TableEngineNotFound { source, .. } | EngineProcedureNotFound { source, .. } => {
source.status_code()
}
+ CreateVector { source, .. } => source.status_code(),
TableNotFound { .. } => StatusCode::TableNotFound,
ColumnNotFound { .. } => StatusCode::TableColumnNotFound,
@@ -583,7 +609,9 @@ impl ErrorExt for Error {
| MissingMetasrvOpts { .. }
| ColumnNoneDefaultValue { .. }
| MissingWalDirConfig { .. }
- | PrepareImmutableTable { .. } => StatusCode::InvalidArguments,
+ | PrepareImmutableTable { .. }
+ | InvalidInsertRowLen { .. }
+ | ColumnDataType { .. } => StatusCode::InvalidArguments,
EncodeJson { .. } | DecodeJson { .. } | PayloadNotExist { .. } | Unexpected { .. } => {
StatusCode::Unexpected
diff --git a/src/datanode/src/instance.rs b/src/datanode/src/instance.rs
index e7d4cbbf61f4..84daece2426f 100644
--- a/src/datanode/src/instance.rs
+++ b/src/datanode/src/instance.rs
@@ -67,6 +67,7 @@ use crate::greptimedb_telemetry::get_greptimedb_telemetry_task;
use crate::heartbeat::handler::close_region::CloseRegionHandler;
use crate::heartbeat::handler::open_region::OpenRegionHandler;
use crate::heartbeat::HeartbeatTask;
+use crate::row_inserter::RowInserter;
use crate::sql::{SqlHandler, SqlRequest};
use crate::store;
@@ -81,6 +82,7 @@ pub struct Instance {
pub(crate) sql_handler: SqlHandler,
pub(crate) catalog_manager: CatalogManagerRef,
pub(crate) table_id_provider: Option<TableIdProviderRef>,
+ row_inserter: RowInserter,
procedure_manager: ProcedureManagerRef,
greptimedb_telemetry_task: Arc<GreptimeDBTelemetryTask>,
}
@@ -279,10 +281,14 @@ impl Instance {
plugins,
);
let query_engine = factory.query_engine();
-
let procedure_manager =
create_procedure_manager(opts.node_id.unwrap_or(0), &opts.procedure, object_store)
.await?;
+ let sql_handler = SqlHandler::new(
+ engine_manager.clone(),
+ catalog_manager.clone(),
+ procedure_manager.clone(),
+ );
// Register all procedures.
// Register procedures of the mito engine.
mito_engine.register_procedure_loaders(&*procedure_manager);
@@ -295,23 +301,22 @@ impl Instance {
mito_engine.clone(),
&*procedure_manager,
);
+ let row_inserter = RowInserter::new(catalog_manager.clone());
+ let greptimedb_telemetry_task = get_greptimedb_telemetry_task(
+ Some(opts.storage.data_home.clone()),
+ &opts.mode,
+ opts.enable_telemetry,
+ )
+ .await;
let instance = Arc::new(Self {
query_engine: query_engine.clone(),
- sql_handler: SqlHandler::new(
- engine_manager.clone(),
- catalog_manager.clone(),
- procedure_manager.clone(),
- ),
+ sql_handler,
catalog_manager: catalog_manager.clone(),
table_id_provider,
+ row_inserter,
procedure_manager,
- greptimedb_telemetry_task: get_greptimedb_telemetry_task(
- Some(opts.storage.data_home.clone()),
- &opts.mode,
- opts.enable_telemetry,
- )
- .await,
+ greptimedb_telemetry_task,
});
let heartbeat_task = Instance::build_heartbeat_task(
diff --git a/src/datanode/src/instance/grpc.rs b/src/datanode/src/instance/grpc.rs
index f857e13f3fd9..2d9e527f694b 100644
--- a/src/datanode/src/instance/grpc.rs
+++ b/src/datanode/src/instance/grpc.rs
@@ -18,7 +18,7 @@ use std::sync::Arc;
use api::v1::ddl_request::Expr as DdlExpr;
use api::v1::greptime_request::Request;
use api::v1::query_request::Query;
-use api::v1::{CreateDatabaseExpr, DdlRequest, DeleteRequests, InsertRequests};
+use api::v1::{CreateDatabaseExpr, DdlRequest, DeleteRequests, InsertRequests, RowInsertRequests};
use async_trait::async_trait;
use catalog::CatalogManagerRef;
use common_grpc_expr::insert::to_table_insert_request;
@@ -129,7 +129,7 @@ impl Instance {
pub async fn handle_inserts(
&self,
requests: InsertRequests,
- ctx: &QueryContextRef,
+ ctx: QueryContextRef,
) -> Result<Output> {
let results = future::try_join_all(requests.inserts.into_iter().map(|insert| {
let catalog_manager = self.catalog_manager.clone();
@@ -164,6 +164,14 @@ impl Instance {
Ok(Output::AffectedRows(affected_rows))
}
+ pub async fn handle_row_inserts(
+ &self,
+ requests: RowInsertRequests,
+ ctx: QueryContextRef,
+ ) -> Result<Output> {
+ self.row_inserter.handle_inserts(requests, ctx).await
+ }
+
async fn handle_deletes(
&self,
request: DeleteRequests,
@@ -221,7 +229,7 @@ impl GrpcQueryHandler for Instance {
async fn do_query(&self, request: Request, ctx: QueryContextRef) -> Result<Output> {
match request {
- Request::Inserts(requests) => self.handle_inserts(requests, &ctx).await,
+ Request::Inserts(requests) => self.handle_inserts(requests, ctx).await,
Request::Deletes(request) => self.handle_deletes(request, ctx).await,
Request::Query(query_request) => {
let query = query_request
@@ -232,8 +240,9 @@ impl GrpcQueryHandler for Instance {
self.handle_query(query, ctx).await
}
Request::Ddl(request) => self.handle_ddl(request, ctx).await,
- Request::RowInserts(_) | Request::RowDeletes(_) => UnsupportedGrpcRequestSnafu {
- kind: "row insert/delete",
+ Request::RowInserts(requests) => self.handle_row_inserts(requests, ctx).await,
+ Request::RowDeletes(_) => UnsupportedGrpcRequestSnafu {
+ kind: "row deletes",
}
.fail(),
}
diff --git a/src/datanode/src/lib.rs b/src/datanode/src/lib.rs
index 9097d680c110..f5b313188ba1 100644
--- a/src/datanode/src/lib.rs
+++ b/src/datanode/src/lib.rs
@@ -24,6 +24,7 @@ pub mod metrics;
#[cfg(any(test, feature = "testing"))]
mod mock;
pub mod region_server;
+mod row_inserter;
pub mod server;
pub mod sql;
mod store;
diff --git a/src/datanode/src/row_inserter.rs b/src/datanode/src/row_inserter.rs
new file mode 100644
index 000000000000..8b2fde825d2c
--- /dev/null
+++ b/src/datanode/src/row_inserter.rs
@@ -0,0 +1,143 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use api::helper;
+use api::helper::ColumnDataTypeWrapper;
+use api::v1::{RowInsertRequest, RowInsertRequests};
+use catalog::CatalogManagerRef;
+use common_query::Output;
+use datatypes::data_type::{ConcreteDataType, DataType};
+use futures_util::future;
+use session::context::QueryContextRef;
+use snafu::{ensure, OptionExt, ResultExt};
+use table::requests::InsertRequest;
+
+use crate::error::{
+ CatalogSnafu, ColumnDataTypeSnafu, CreateVectorSnafu, InsertSnafu, InvalidInsertRowLenSnafu,
+ JoinTaskSnafu, Result, TableNotFoundSnafu,
+};
+
+pub struct RowInserter {
+ catalog_manager: CatalogManagerRef,
+}
+
+impl RowInserter {
+ pub fn new(catalog_manager: CatalogManagerRef) -> Self {
+ Self { catalog_manager }
+ }
+
+ pub async fn handle_inserts(
+ &self,
+ requests: RowInsertRequests,
+ ctx: QueryContextRef,
+ ) -> Result<Output> {
+ let insert_tasks = requests.inserts.into_iter().map(|insert| {
+ let catalog_manager = self.catalog_manager.clone();
+ let catalog_name = ctx.current_catalog().to_owned();
+ let schema_name = ctx.current_schema().to_owned();
+ let table_name = insert.table_name.clone();
+
+ let insert_task = async move {
+ let Some(request) =
+ convert_to_table_insert_request(&catalog_name, &schema_name, insert)?
+ else {
+ // empty data
+ return Ok(0usize);
+ };
+
+ let table = catalog_manager
+ .table(&catalog_name, &schema_name, &table_name)
+ .await
+ .context(CatalogSnafu)?
+ .with_context(|| TableNotFoundSnafu {
+ table_name: format!("{catalog_name}.{schema_name}.{table_name}"),
+ })?;
+
+ table.insert(request).await.with_context(|_| InsertSnafu {
+ table_name: format!("{catalog_name}.{schema_name}.{table_name}"),
+ })
+ };
+
+ common_runtime::spawn_write(insert_task)
+ });
+
+ let results = future::try_join_all(insert_tasks)
+ .await
+ .context(JoinTaskSnafu)?;
+ let affected_rows = results.into_iter().sum::<Result<usize>>()?;
+
+ Ok(Output::AffectedRows(affected_rows))
+ }
+}
+
+fn convert_to_table_insert_request(
+ catalog_name: &str,
+ schema_name: &str,
+ request: RowInsertRequest,
+) -> Result<Option<InsertRequest>> {
+ let table_name = request.table_name;
+ let region_number = request.region_number;
+ let Some(rows) = request.rows else {
+ return Ok(None);
+ };
+ let schema = rows.schema;
+ let rows = rows.rows;
+ let num_columns = schema.len();
+ let num_rows = rows.len();
+
+ if num_rows == 0 || num_columns == 0 {
+ return Ok(None);
+ }
+
+ let mut columns_values = Vec::with_capacity(num_columns);
+ for column_schema in schema {
+ let datatype: ConcreteDataType = ColumnDataTypeWrapper::try_new(column_schema.datatype)
+ .context(ColumnDataTypeSnafu)?
+ .into();
+ let mutable_vector = datatype.create_mutable_vector(num_rows);
+ columns_values.push((column_schema.column_name, mutable_vector));
+ }
+
+ for row in rows {
+ ensure!(
+ row.values.len() == num_columns,
+ InvalidInsertRowLenSnafu {
+ table_name: format!("{catalog_name}.{schema_name}.{table_name}"),
+ expected: num_columns,
+ actual: row.values.len(),
+ }
+ );
+
+ for ((_, mutable_vector), value) in columns_values.iter_mut().zip(row.values.iter()) {
+ mutable_vector
+ .try_push_value_ref(helper::pb_value_to_value_ref(value))
+ .context(CreateVectorSnafu)?;
+ }
+ }
+
+ let columns_values = columns_values
+ .into_iter()
+ .map(|(k, mut v)| (k, v.to_vector()))
+ .collect();
+
+ let insert_request = InsertRequest {
+ catalog_name: catalog_name.to_string(),
+ schema_name: schema_name.to_string(),
+ table_name,
+ columns_values,
+ region_number,
+ };
+
+ Ok(Some(insert_request))
+}
diff --git a/src/frontend/src/error.rs b/src/frontend/src/error.rs
index efc3ae912a76..72a792891465 100644
--- a/src/frontend/src/error.rs
+++ b/src/frontend/src/error.rs
@@ -597,6 +597,9 @@ pub enum Error {
source: auth::error::Error,
location: Location,
},
+
+ #[snafu(display("Empty data: {}", msg))]
+ EmptyData { msg: String, location: Location },
}
pub type Result<T> = std::result::Result<T, Error>;
@@ -618,7 +621,8 @@ impl ErrorExt for Error {
| Error::PrepareImmutableTable { .. }
| Error::BuildCsvConfig { .. }
| Error::ProjectSchema { .. }
- | Error::UnsupportedFormat { .. } => StatusCode::InvalidArguments,
+ | Error::UnsupportedFormat { .. }
+ | Error::EmptyData { .. } => StatusCode::InvalidArguments,
Error::NotSupported { .. } => StatusCode::Unsupported,
diff --git a/src/frontend/src/expr_factory.rs b/src/frontend/src/expr_factory.rs
index 23c08698762c..164550d4f17a 100644
--- a/src/frontend/src/expr_factory.rs
+++ b/src/frontend/src/expr_factory.rs
@@ -13,7 +13,6 @@
// limitations under the License.
use std::collections::HashMap;
-use std::sync::Arc;
use api::helper::ColumnDataTypeWrapper;
use api::v1::alter_expr::Kind;
@@ -22,6 +21,7 @@ use api::v1::{
DropColumns, RenameTable,
};
use common_error::ext::BoxedError;
+use common_grpc_expr::util::ColumnExpr;
use datanode::instance::sql::table_idents_to_full_name;
use datatypes::schema::ColumnSchema;
use file_table_engine::table::immutable::ImmutableFileTableOptions;
@@ -33,49 +33,51 @@ use sql::statements::alter::{AlterTable, AlterTableOperation};
use sql::statements::create::{CreateExternalTable, CreateTable, TIME_INDEX};
use sql::statements::{column_def_to_schema, sql_column_def_to_grpc_column_def};
use sql::util::to_lowercase_options_map;
+use table::engine::TableReference;
use table::requests::{TableOptions, IMMUTABLE_TABLE_META_KEY};
use crate::error::{
- self, BuildCreateExprOnInsertionSnafu, ColumnDataTypeSnafu,
- ConvertColumnDefaultConstraintSnafu, ExternalSnafu, IllegalPrimaryKeysDefSnafu,
- InvalidSqlSnafu, ParseSqlSnafu, Result,
+ BuildCreateExprOnInsertionSnafu, ColumnDataTypeSnafu, ConvertColumnDefaultConstraintSnafu,
+ EncodeJsonSnafu, ExternalSnafu, IllegalPrimaryKeysDefSnafu, InvalidSqlSnafu, NotSupportedSnafu,
+ ParseSqlSnafu, PrepareImmutableTableSnafu, Result, UnrecognizedTableOptionSnafu,
};
-pub type CreateExprFactoryRef = Arc<dyn CreateExprFactory + Send + Sync>;
+#[derive(Debug, Copy, Clone)]
+pub struct CreateExprFactory;
-#[async_trait::async_trait]
-pub trait CreateExprFactory {
- async fn create_expr_by_columns(
+impl CreateExprFactory {
+ pub fn create_table_expr_by_columns(
&self,
- catalog_name: &str,
- schema_name: &str,
- table_name: &str,
+ table_name: &TableReference<'_>,
columns: &[Column],
engine: &str,
- ) -> crate::error::Result<CreateTableExpr>;
-}
+ ) -> Result<CreateTableExpr> {
+ let column_exprs = ColumnExpr::from_columns(columns);
+ let create_expr = common_grpc_expr::util::build_create_table_expr(
+ None,
+ table_name,
+ column_exprs,
+ engine,
+ "Created on insertion",
+ )
+ .context(BuildCreateExprOnInsertionSnafu)?;
-#[derive(Debug)]
-pub struct DefaultCreateExprFactory;
+ Ok(create_expr)
+ }
-#[async_trait::async_trait]
-impl CreateExprFactory for DefaultCreateExprFactory {
- async fn create_expr_by_columns(
+ pub fn create_table_expr_by_column_schemas(
&self,
- catalog_name: &str,
- schema_name: &str,
- table_name: &str,
- columns: &[Column],
+ table_name: &TableReference<'_>,
+ column_schemas: &[api::v1::ColumnSchema],
engine: &str,
) -> Result<CreateTableExpr> {
- let table_id = None;
- let create_expr = common_grpc_expr::build_create_expr_from_insertion(
- catalog_name,
- schema_name,
- table_id,
+ let column_exprs = ColumnExpr::from_column_schemas(column_schemas);
+ let create_expr = common_grpc_expr::util::build_create_table_expr(
+ None,
table_name,
- columns,
+ column_exprs,
engine,
+ "Created on insertion",
)
.context(BuildCreateExprOnInsertionSnafu)?;
@@ -90,18 +92,18 @@ pub(crate) async fn create_external_expr(
let (catalog_name, schema_name, table_name) =
table_idents_to_full_name(&create.name, query_ctx)
.map_err(BoxedError::new)
- .context(error::ExternalSnafu)?;
+ .context(ExternalSnafu)?;
let mut options = create.options;
let (files, schema) = prepare_immutable_file_table_files_and_schema(&options, &create.columns)
.await
- .context(error::PrepareImmutableTableSnafu)?;
+ .context(PrepareImmutableTableSnafu)?;
let meta = ImmutableFileTableOptions { files };
let _ = options.insert(
IMMUTABLE_TABLE_META_KEY.to_string(),
- serde_json::to_string(&meta).context(error::EncodeJsonSnafu)?,
+ serde_json::to_string(&meta).context(EncodeJsonSnafu)?,
);
let expr = CreateTableExpr {
@@ -126,12 +128,12 @@ pub fn create_to_expr(create: &CreateTable, query_ctx: QueryContextRef) -> Resul
let (catalog_name, schema_name, table_name) =
table_idents_to_full_name(&create.name, query_ctx)
.map_err(BoxedError::new)
- .context(error::ExternalSnafu)?;
+ .context(ExternalSnafu)?;
let time_index = find_time_index(&create.constraints)?;
let table_options = HashMap::from(
&TableOptions::try_from(&to_lowercase_options_map(&create.options))
- .context(error::UnrecognizedTableOptionSnafu)?,
+ .context(UnrecognizedTableOptionSnafu)?,
);
let expr = CreateTableExpr {
catalog_name,
@@ -201,7 +203,7 @@ fn find_primary_keys(
Ok(primary_keys)
}
-pub fn find_time_index(constraints: &[TableConstraint]) -> crate::error::Result<String> {
+pub fn find_time_index(constraints: &[TableConstraint]) -> Result<String> {
let time_index = constraints
.iter()
.filter_map(|constraint| match constraint {
@@ -229,10 +231,7 @@ pub fn find_time_index(constraints: &[TableConstraint]) -> crate::error::Result<
Ok(time_index.first().unwrap().to_string())
}
-fn columns_to_expr(
- column_defs: &[ColumnDef],
- time_index: &str,
-) -> crate::error::Result<Vec<api::v1::ColumnDef>> {
+fn columns_to_expr(column_defs: &[ColumnDef], time_index: &str) -> Result<Vec<api::v1::ColumnDef>> {
let column_schemas = column_defs
.iter()
.map(|c| column_def_to_schema(c, c.name.to_string() == time_index).context(ParseSqlSnafu))
@@ -286,7 +285,7 @@ pub(crate) fn to_alter_expr(
let kind = match alter_table.alter_operation() {
AlterTableOperation::AddConstraint(_) => {
- return error::NotSupportedSnafu {
+ return NotSupportedSnafu {
feat: "ADD CONSTRAINT",
}
.fail();
diff --git a/src/frontend/src/instance.rs b/src/frontend/src/instance.rs
index 00cbe4e0be0d..44cbf6e98b7f 100644
--- a/src/frontend/src/instance.rs
+++ b/src/frontend/src/instance.rs
@@ -29,7 +29,9 @@ use api::v1::alter_expr::Kind;
use api::v1::ddl_request::Expr as DdlExpr;
use api::v1::greptime_request::Request;
use api::v1::meta::Role;
-use api::v1::{AddColumns, AlterExpr, Column, DdlRequest, InsertRequest, InsertRequests};
+use api::v1::{
+ AddColumns, AlterExpr, Column, DdlRequest, InsertRequest, InsertRequests, RowInsertRequests,
+};
use async_trait::async_trait;
use auth::{PermissionChecker, PermissionCheckerRef, PermissionReq};
use catalog::remote::CachedMetaKvBackend;
@@ -76,6 +78,7 @@ use sql::parser::ParserContext;
use sql::statements::copy::CopyTable;
use sql::statements::statement::Statement;
use sqlparser::ast::ObjectName;
+use table::engine::TableReference;
use crate::catalog::FrontendCatalogManager;
use crate::error::{
@@ -83,12 +86,13 @@ use crate::error::{
InvalidInsertRequestSnafu, MissingMetasrvOptsSnafu, ParseSqlSnafu, PermissionSnafu,
PlanStatementSnafu, Result, SqlExecInterceptedSnafu,
};
-use crate::expr_factory::{CreateExprFactoryRef, DefaultCreateExprFactory};
+use crate::expr_factory::CreateExprFactory;
use crate::frontend::FrontendOptions;
use crate::heartbeat::handler::invalidate_table_cache::InvalidateTableCacheHandler;
use crate::heartbeat::HeartbeatTask;
use crate::instance::standalone::StandaloneGrpcQueryHandler;
use crate::metrics;
+use crate::row_inserter::RowInserter;
use crate::script::ScriptExecutor;
use crate::server::{start_server, ServerHandlers, Services};
use crate::statement::StatementExecutor;
@@ -120,16 +124,13 @@ pub struct Instance {
statement_executor: Arc<StatementExecutor>,
query_engine: QueryEngineRef,
grpc_query_handler: GrpcQueryHandlerRef<Error>,
-
- create_expr_factory: CreateExprFactoryRef,
-
+ create_expr_factory: CreateExprFactory,
/// plugins: this map holds extensions to customize query or auth
/// behaviours.
plugins: Arc<Plugins>,
-
servers: Arc<ServerHandlers>,
-
heartbeat_task: Option<HeartbeatTask>,
+ row_inserter: Arc<RowInserter>,
}
impl Instance {
@@ -209,16 +210,26 @@ impl Instance {
common_telemetry::init_node_id(opts.node_id.clone());
+ let create_expr_factory = CreateExprFactory;
+
+ let row_inserter = Arc::new(RowInserter::new(
+ MITO_ENGINE.to_string(),
+ catalog_manager.clone(),
+ create_expr_factory,
+ dist_instance.clone(),
+ ));
+
Ok(Instance {
catalog_manager,
script_executor,
- create_expr_factory: Arc::new(DefaultCreateExprFactory),
+ create_expr_factory,
statement_executor,
query_engine,
grpc_query_handler: dist_instance,
plugins: plugins.clone(),
servers: Arc::new(HashMap::new()),
heartbeat_task,
+ row_inserter,
})
}
@@ -271,16 +282,27 @@ impl Instance {
dn_instance.clone(),
));
+ let create_expr_factory = CreateExprFactory;
+ let grpc_query_handler = StandaloneGrpcQueryHandler::arc(dn_instance.clone());
+
+ let row_inserter = Arc::new(RowInserter::new(
+ MITO_ENGINE.to_string(),
+ catalog_manager.clone(),
+ create_expr_factory,
+ grpc_query_handler.clone(),
+ ));
+
Ok(Instance {
catalog_manager: catalog_manager.clone(),
script_executor,
- create_expr_factory: Arc::new(DefaultCreateExprFactory),
+ create_expr_factory,
statement_executor,
query_engine,
- grpc_query_handler: StandaloneGrpcQueryHandler::arc(dn_instance.clone()),
+ grpc_query_handler,
plugins: Default::default(),
servers: Arc::new(HashMap::new()),
heartbeat_task: None,
+ row_inserter,
})
}
@@ -295,6 +317,15 @@ impl Instance {
&self.catalog_manager
}
+ // Handle batch inserts with row-format
+ pub async fn handle_row_inserts(
+ &self,
+ requests: RowInsertRequests,
+ ctx: QueryContextRef,
+ ) -> Result<Output> {
+ self.row_inserter.handle_inserts(requests, ctx).await
+ }
+
/// Handle batch inserts
pub async fn handle_inserts(
&self,
@@ -379,10 +410,10 @@ impl Instance {
let schema_name = ctx.current_schema();
// Create table automatically, build schema from data.
- let create_expr = self
- .create_expr_factory
- .create_expr_by_columns(catalog_name, schema_name, table_name, columns, engine)
- .await?;
+ let table_name = TableReference::full(catalog_name, schema_name, table_name);
+ let create_expr =
+ self.create_expr_factory
+ .create_table_expr_by_columns(&table_name, columns, engine)?;
info!(
"Try to create table: {} automatically with request: {:?}",
diff --git a/src/frontend/src/instance/distributed.rs b/src/frontend/src/instance/distributed.rs
index 4f8d39d0b817..bb59dec2ae62 100644
--- a/src/frontend/src/instance/distributed.rs
+++ b/src/frontend/src/instance/distributed.rs
@@ -666,7 +666,7 @@ impl GrpcQueryHandler for DistInstance {
match request {
Request::Inserts(requests) => self.handle_dist_insert(requests, ctx).await,
Request::RowInserts(_) | Request::RowDeletes(_) => NotSupportedSnafu {
- feat: "row insert/delete",
+ feat: "row inserts/deletes",
}
.fail(),
Request::Deletes(requests) => self.handle_dist_delete(requests, ctx).await,
diff --git a/src/frontend/src/instance/grpc.rs b/src/frontend/src/instance/grpc.rs
index 9a3b1c06b1cb..e888cff1dba9 100644
--- a/src/frontend/src/instance/grpc.rs
+++ b/src/frontend/src/instance/grpc.rs
@@ -44,9 +44,10 @@ impl GrpcQueryHandler for Instance {
let output = match request {
Request::Inserts(requests) => self.handle_inserts(requests, ctx.clone()).await?,
- Request::RowInserts(_) | Request::RowDeletes(_) => {
+ Request::RowInserts(requests) => self.handle_row_inserts(requests, ctx.clone()).await?,
+ Request::RowDeletes(_) => {
return NotSupportedSnafu {
- feat: "row insert/delete",
+ feat: "row deletes",
}
.fail();
}
diff --git a/src/frontend/src/instance/influxdb.rs b/src/frontend/src/instance/influxdb.rs
index d85b6a0f5ef4..ed2088ae9642 100644
--- a/src/frontend/src/instance/influxdb.rs
+++ b/src/frontend/src/instance/influxdb.rs
@@ -27,7 +27,7 @@ use crate::instance::Instance;
impl InfluxdbLineProtocolHandler for Instance {
async fn exec(
&self,
- request: &InfluxdbRequest,
+ request: InfluxdbRequest,
ctx: QueryContextRef,
) -> servers::error::Result<()> {
self.plugins
diff --git a/src/frontend/src/lib.rs b/src/frontend/src/lib.rs
index 1c09176e5c84..ab76e8d3a1d9 100644
--- a/src/frontend/src/lib.rs
+++ b/src/frontend/src/lib.rs
@@ -22,6 +22,7 @@ pub mod frontend;
pub mod heartbeat;
pub mod instance;
pub(crate) mod metrics;
+mod row_inserter;
mod script;
mod server;
pub mod service_config;
diff --git a/src/frontend/src/row_inserter.rs b/src/frontend/src/row_inserter.rs
new file mode 100644
index 000000000000..d83af0bd65d9
--- /dev/null
+++ b/src/frontend/src/row_inserter.rs
@@ -0,0 +1,172 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use api::v1::alter_expr::Kind;
+use api::v1::ddl_request::Expr;
+use api::v1::greptime_request::Request;
+use api::v1::{AlterExpr, ColumnSchema, DdlRequest, Row, RowInsertRequest, RowInsertRequests};
+use catalog::CatalogManagerRef;
+use common_grpc_expr::util::{extract_new_columns, ColumnExpr};
+use common_query::Output;
+use common_telemetry::info;
+use servers::query_handler::grpc::GrpcQueryHandlerRef;
+use session::context::QueryContextRef;
+use snafu::{ensure, OptionExt, ResultExt};
+use table::engine::TableReference;
+use table::TableRef;
+
+use crate::error::{CatalogSnafu, EmptyDataSnafu, Error, FindNewColumnsOnInsertionSnafu, Result};
+use crate::expr_factory::CreateExprFactory;
+
+pub struct RowInserter {
+ engine_name: String,
+ catalog_manager: CatalogManagerRef,
+ create_expr_factory: CreateExprFactory,
+ grpc_query_handler: GrpcQueryHandlerRef<Error>,
+}
+
+impl RowInserter {
+ pub fn new(
+ engine_name: String,
+ catalog_manager: CatalogManagerRef,
+ create_expr_factory: CreateExprFactory,
+ grpc_query_handler: GrpcQueryHandlerRef<Error>,
+ ) -> Self {
+ Self {
+ engine_name,
+ catalog_manager,
+ create_expr_factory,
+ grpc_query_handler,
+ }
+ }
+
+ pub async fn handle_inserts(
+ &self,
+ requests: RowInsertRequests,
+ ctx: QueryContextRef,
+ ) -> Result<Output> {
+ self.create_or_alter_tables_on_demand(&requests, ctx.clone())
+ .await?;
+ let query = Request::RowInserts(requests);
+ self.grpc_query_handler.do_query(query, ctx).await
+ }
+
+ // check if tables already exist:
+ // - if table does not exist, create table by inferred CreateExpr
+ // - if table exist, check if schema matches. If any new column found, alter table by inferred `AlterExpr`
+ async fn create_or_alter_tables_on_demand(
+ &self,
+ requests: &RowInsertRequests,
+ ctx: QueryContextRef,
+ ) -> Result<()> {
+ let catalog_name = ctx.current_catalog();
+ let schema_name = ctx.current_schema();
+
+ // TODO(jeremy): create and alter in batch?
+ for req in &requests.inserts {
+ let table_name = &req.table_name;
+ let table = self
+ .catalog_manager
+ .table(catalog_name, schema_name, table_name)
+ .await
+ .context(CatalogSnafu)?;
+ match table {
+ Some(table) => {
+ self.alter_table_on_demand(catalog_name, schema_name, table, req, ctx.clone())
+ .await?
+ }
+ None => {
+ let table_name = TableReference::full(catalog_name, schema_name, table_name);
+ self.create_table(&table_name, req, ctx.clone()).await?
+ }
+ }
+ }
+
+ Ok(())
+ }
+
+ async fn create_table(
+ &self,
+ table_name: &TableReference<'_>,
+ req: &RowInsertRequest,
+ ctx: QueryContextRef,
+ ) -> Result<()> {
+ let (column_schemas, _) = extract_schema_and_rows(req)?;
+ let create_table_expr = self
+ .create_expr_factory
+ .create_table_expr_by_column_schemas(table_name, column_schemas, &self.engine_name)?;
+
+ let req = Request::Ddl(DdlRequest {
+ expr: Some(Expr::CreateTable(create_table_expr)),
+ });
+ self.grpc_query_handler.do_query(req, ctx).await?;
+
+ Ok(())
+ }
+
+ async fn alter_table_on_demand(
+ &self,
+ catalog_name: &str,
+ schema_name: &str,
+ table: TableRef,
+ req: &RowInsertRequest,
+ ctx: QueryContextRef,
+ ) -> Result<()> {
+ let (column_schemas, _) = extract_schema_and_rows(req)?;
+ let column_exprs = ColumnExpr::from_column_schemas(column_schemas);
+ let add_columns = extract_new_columns(&table.schema(), column_exprs)
+ .context(FindNewColumnsOnInsertionSnafu)?;
+ let Some(add_columns) = add_columns else {
+ return Ok(());
+ };
+ let table_name = table.table_info().name.clone();
+
+ info!(
+ "Adding new columns: {:?} to table: {}.{}.{}",
+ add_columns, catalog_name, schema_name, table_name
+ );
+
+ let alter_table_expr = AlterExpr {
+ catalog_name: catalog_name.to_string(),
+ schema_name: schema_name.to_string(),
+ table_name,
+ kind: Some(Kind::AddColumns(add_columns)),
+ ..Default::default()
+ };
+
+ let req = Request::Ddl(DdlRequest {
+ expr: Some(Expr::Alter(alter_table_expr)),
+ });
+ self.grpc_query_handler.do_query(req, ctx).await?;
+
+ Ok(())
+ }
+}
+
+fn extract_schema_and_rows(req: &RowInsertRequest) -> Result<(&[ColumnSchema], &[Row])> {
+ let rows = req.rows.as_ref().with_context(|| EmptyDataSnafu {
+ msg: format!("insert to table: {:?}", &req.table_name),
+ })?;
+ let schema = &rows.schema;
+ let rows = &rows.rows;
+
+ ensure!(
+ !rows.is_empty(),
+ EmptyDataSnafu {
+ msg: format!("insert to table: {:?}", &req.table_name),
+ }
+ );
+
+ Ok((schema, rows))
+}
diff --git a/src/meta-srv/src/service/store/etcd.rs b/src/meta-srv/src/service/store/etcd.rs
index 21aa4cf7022f..a9d8f70713e6 100644
--- a/src/meta-srv/src/service/store/etcd.rs
+++ b/src/meta-srv/src/service/store/etcd.rs
@@ -391,7 +391,7 @@ struct Get {
}
impl TryFrom<RangeRequest> for Get {
- type Error = error::Error;
+ type Error = Error;
fn try_from(req: RangeRequest) -> Result<Self> {
let RangeRequest {
@@ -428,7 +428,7 @@ struct Put {
}
impl TryFrom<PutRequest> for Put {
- type Error = error::Error;
+ type Error = Error;
fn try_from(req: PutRequest) -> Result<Self> {
let PutRequest {
@@ -456,7 +456,7 @@ struct BatchGet {
}
impl TryFrom<BatchGetRequest> for BatchGet {
- type Error = error::Error;
+ type Error = Error;
fn try_from(req: BatchGetRequest) -> Result<Self> {
let BatchGetRequest { keys } = req;
@@ -476,7 +476,7 @@ struct BatchPut {
}
impl TryFrom<BatchPutRequest> for BatchPut {
- type Error = error::Error;
+ type Error = Error;
fn try_from(req: BatchPutRequest) -> Result<Self> {
let BatchPutRequest { kvs, prev_kv } = req;
@@ -499,7 +499,7 @@ struct BatchDelete {
}
impl TryFrom<BatchDeleteRequest> for BatchDelete {
- type Error = error::Error;
+ type Error = Error;
fn try_from(req: BatchDeleteRequest) -> Result<Self> {
let BatchDeleteRequest { keys, prev_kv } = req;
@@ -524,7 +524,7 @@ struct CompareAndPut {
}
impl TryFrom<CompareAndPutRequest> for CompareAndPut {
- type Error = error::Error;
+ type Error = Error;
fn try_from(req: CompareAndPutRequest) -> Result<Self> {
let CompareAndPutRequest { key, expect, value } = req;
@@ -544,7 +544,7 @@ struct Delete {
}
impl TryFrom<DeleteRangeRequest> for Delete {
- type Error = error::Error;
+ type Error = Error;
fn try_from(req: DeleteRangeRequest) -> Result<Self> {
let DeleteRangeRequest {
@@ -577,7 +577,7 @@ struct MoveValue {
}
impl TryFrom<MoveValueRequest> for MoveValue {
- type Error = error::Error;
+ type Error = Error;
fn try_from(req: MoveValueRequest) -> Result<Self> {
let MoveValueRequest { from_key, to_key } = req;
diff --git a/src/servers/src/error.rs b/src/servers/src/error.rs
index 52c3842e6d60..e1bb62545304 100644
--- a/src/servers/src/error.rs
+++ b/src/servers/src/error.rs
@@ -328,6 +328,21 @@ pub enum Error {
actual: opensrv_mysql::ColumnType,
location: Location,
},
+
+ #[snafu(display(
+ "Column: {}, {} incompatible, expected: {}, actual: {}",
+ column_name,
+ datatype,
+ expected,
+ actual
+ ))]
+ IncompatibleSchema {
+ column_name: String,
+ datatype: String,
+ expected: i32,
+ actual: i32,
+ location: Location,
+ },
}
pub type Result<T> = std::result::Result<T, Error>;
@@ -371,7 +386,8 @@ impl ErrorExt for Error {
| InvalidPrepareStatement { .. }
| DataFrame { .. }
| PreparedStmtTypeMismatch { .. }
- | TimePrecision { .. } => StatusCode::InvalidArguments,
+ | TimePrecision { .. }
+ | IncompatibleSchema { .. } => StatusCode::InvalidArguments,
InfluxdbLinesWrite { source, .. }
| PromSeriesWrite { source, .. }
diff --git a/src/servers/src/http/influxdb.rs b/src/servers/src/http/influxdb.rs
index 6879ca42d579..9e3bec8558e4 100644
--- a/src/servers/src/http/influxdb.rs
+++ b/src/servers/src/http/influxdb.rs
@@ -97,7 +97,7 @@ pub async fn influxdb_write(
let request = InfluxdbRequest { precision, lines };
- handler.exec(&request, ctx).await?;
+ handler.exec(request, ctx).await?;
Ok((StatusCode::NO_CONTENT, ()))
}
diff --git a/src/servers/src/influxdb.rs b/src/servers/src/influxdb.rs
index 4b302c1610c1..82dd83239354 100644
--- a/src/servers/src/influxdb.rs
+++ b/src/servers/src/influxdb.rs
@@ -14,14 +14,22 @@
use std::collections::HashMap;
-use api::v1::{InsertRequest as GrpcInsertRequest, InsertRequests};
+use api::v1::value::ValueData;
+use api::v1::{
+ ColumnDataType, ColumnSchema, InsertRequest as GrpcInsertRequest, InsertRequests, Row,
+ RowInsertRequest, RowInsertRequests, Rows, SemanticType, Value,
+};
+use common_grpc::writer;
use common_grpc::writer::{LinesWriter, Precision};
use common_time::timestamp::TimeUnit;
use common_time::Timestamp;
-use influxdb_line_protocol::{parse_lines, FieldValue};
-use snafu::{OptionExt, ResultExt};
+use influxdb_line_protocol::{parse_lines, FieldSet, FieldValue, TagSet};
+use snafu::{ensure, OptionExt, ResultExt};
-use crate::error::{Error, InfluxdbLineProtocolSnafu, InfluxdbLinesWriteSnafu, TimePrecisionSnafu};
+use crate::error::{
+ Error, IncompatibleSchemaSnafu, InfluxdbLineProtocolSnafu, InfluxdbLinesWriteSnafu,
+ TimePrecisionSnafu,
+};
pub const INFLUXDB_TIMESTAMP_COLUMN_NAME: &str = "ts";
pub const DEFAULT_TIME_PRECISION: Precision = Precision::Nanosecond;
@@ -34,10 +42,10 @@ pub struct InfluxdbRequest {
type TableName = String;
-impl TryFrom<&InfluxdbRequest> for InsertRequests {
+impl TryFrom<InfluxdbRequest> for InsertRequests {
type Error = Error;
- fn try_from(value: &InfluxdbRequest) -> Result<Self, Self::Error> {
+ fn try_from(value: InfluxdbRequest) -> Result<Self, Self::Error> {
let mut writers: HashMap<TableName, LinesWriter> = HashMap::new();
let lines = parse_lines(&value.lines)
.collect::<influxdb_line_protocol::Result<Vec<_>>>()
@@ -92,24 +100,14 @@ impl TryFrom<&InfluxdbRequest> for InsertRequests {
}
if let Some(timestamp) = line.timestamp {
- let precision = unwarp_or_default_precision(value.precision);
+ let precision = unwrap_or_default_precision(value.precision);
writer
.write_ts(INFLUXDB_TIMESTAMP_COLUMN_NAME, (timestamp, precision))
.context(InfluxdbLinesWriteSnafu)?;
} else {
- let precision = unwarp_or_default_precision(value.precision);
+ let precision = unwrap_or_default_precision(value.precision);
let timestamp = Timestamp::current_millis();
- let unit = match precision {
- Precision::Second => TimeUnit::Second,
- Precision::Millisecond => TimeUnit::Millisecond,
- Precision::Microsecond => TimeUnit::Microsecond,
- Precision::Nanosecond => TimeUnit::Nanosecond,
- _ => {
- return Err(Error::NotSupported {
- feat: format!("convert {precision} into TimeUnit"),
- })
- }
- };
+ let unit = get_time_unit(precision)?;
let timestamp = timestamp
.convert_to(unit)
.with_context(|| TimePrecisionSnafu {
@@ -141,7 +139,228 @@ impl TryFrom<&InfluxdbRequest> for InsertRequests {
}
}
-fn unwarp_or_default_precision(precision: Option<Precision>) -> Precision {
+impl TryFrom<InfluxdbRequest> for RowInsertRequests {
+ type Error = Error;
+
+ fn try_from(value: InfluxdbRequest) -> Result<Self, Self::Error> {
+ let lines = parse_lines(&value.lines)
+ .collect::<influxdb_line_protocol::Result<Vec<_>>>()
+ .context(InfluxdbLineProtocolSnafu)?;
+
+ struct TableData<'a> {
+ schema: Vec<ColumnSchema>,
+ rows: Vec<Row>,
+ column_indexes: HashMap<&'a str, usize>,
+ }
+
+ let mut table_data_map = HashMap::new();
+
+ for line in &lines {
+ let table_name = line.series.measurement.as_str();
+ let tags = &line.series.tag_set;
+ let fields = &line.field_set;
+ let ts = line.timestamp;
+ // tags.len + fields.len + timestamp(+1)
+ let num_columns = tags.as_ref().map(|x| x.len()).unwrap_or(0) + fields.len() + 1;
+
+ let TableData {
+ schema,
+ rows,
+ column_indexes,
+ } = table_data_map
+ .entry(table_name)
+ .or_insert_with(|| TableData {
+ schema: Vec::with_capacity(num_columns),
+ rows: Vec::new(),
+ column_indexes: HashMap::with_capacity(num_columns),
+ });
+
+ let mut one_row = vec![Value { value_data: None }; schema.len()];
+
+ // tags
+ parse_tags(tags, column_indexes, schema, &mut one_row)?;
+ // fields
+ parse_fields(fields, column_indexes, schema, &mut one_row)?;
+ // timestamp
+ parse_ts(ts, value.precision, column_indexes, schema, &mut one_row)?;
+
+ rows.push(Row { values: one_row });
+ }
+
+ let inserts = table_data_map
+ .into_iter()
+ .map(
+ |(
+ table_name,
+ TableData {
+ schema, mut rows, ..
+ },
+ )| {
+ let num_columns = schema.len();
+ for row in rows.iter_mut() {
+ if num_columns > row.values.len() {
+ row.values.resize(num_columns, Value { value_data: None });
+ }
+ }
+
+ RowInsertRequest {
+ table_name: table_name.to_string(),
+ rows: Some(Rows { schema, rows }),
+ ..Default::default()
+ }
+ },
+ )
+ .collect::<Vec<_>>();
+
+ Ok(RowInsertRequests { inserts })
+ }
+}
+
+fn parse_tags<'a>(
+ tags: &'a Option<TagSet>,
+ column_indexes: &mut HashMap<&'a str, usize>,
+ schema: &mut Vec<ColumnSchema>,
+ one_row: &mut Vec<Value>,
+) -> Result<(), Error> {
+ let Some(tags) = tags else {
+ return Ok(());
+ };
+
+ for (k, v) in tags {
+ let index = column_indexes.entry(k.as_str()).or_insert(schema.len());
+ if *index == schema.len() {
+ schema.push(ColumnSchema {
+ column_name: k.to_string(),
+ datatype: ColumnDataType::String as i32,
+ semantic_type: SemanticType::Tag as i32,
+ });
+ one_row.push(to_value(ValueData::StringValue(v.to_string())));
+ } else {
+ check_schema(ColumnDataType::String, SemanticType::Tag, &schema[*index])?;
+ one_row[*index].value_data = Some(ValueData::StringValue(v.to_string()));
+ }
+ }
+
+ Ok(())
+}
+
+fn parse_fields<'a>(
+ fields: &'a FieldSet,
+ column_indexes: &mut HashMap<&'a str, usize>,
+ schema: &mut Vec<ColumnSchema>,
+ one_row: &mut Vec<Value>,
+) -> Result<(), Error> {
+ for (k, v) in fields {
+ let index = column_indexes.entry(k.as_str()).or_insert(schema.len());
+ let (datatype, value) = match v {
+ FieldValue::I64(v) => (ColumnDataType::Int64, ValueData::I64Value(*v)),
+ FieldValue::U64(v) => (ColumnDataType::Uint64, ValueData::U64Value(*v)),
+ FieldValue::F64(v) => (ColumnDataType::Float64, ValueData::F64Value(*v)),
+ FieldValue::String(v) => (
+ ColumnDataType::String,
+ ValueData::StringValue(v.to_string()),
+ ),
+ FieldValue::Boolean(v) => (ColumnDataType::Boolean, ValueData::BoolValue(*v)),
+ };
+
+ if *index == schema.len() {
+ schema.push(ColumnSchema {
+ column_name: k.to_string(),
+ datatype: datatype as i32,
+ semantic_type: SemanticType::Field as i32,
+ });
+ one_row.push(to_value(value));
+ } else {
+ check_schema(datatype, SemanticType::Field, &schema[*index])?;
+ one_row[*index].value_data = Some(value);
+ }
+ }
+
+ Ok(())
+}
+
+fn parse_ts(
+ ts: Option<i64>,
+ precision: Option<Precision>,
+ column_indexes: &mut HashMap<&str, usize>,
+ schema: &mut Vec<ColumnSchema>,
+ one_row: &mut Vec<Value>,
+) -> Result<(), Error> {
+ let precision = unwrap_or_default_precision(precision);
+ let ts = match ts {
+ Some(timestamp) => writer::to_ms_ts(precision, timestamp),
+ None => {
+ let timestamp = Timestamp::current_millis();
+ let unit = get_time_unit(precision)?;
+ let timestamp = timestamp
+ .convert_to(unit)
+ .with_context(|| TimePrecisionSnafu {
+ name: precision.to_string(),
+ })?;
+ writer::to_ms_ts(precision, timestamp.into())
+ }
+ };
+
+ let column_name = INFLUXDB_TIMESTAMP_COLUMN_NAME;
+ let index = column_indexes.entry(column_name).or_insert(schema.len());
+ if *index == schema.len() {
+ schema.push(ColumnSchema {
+ column_name: column_name.to_string(),
+ datatype: ColumnDataType::TimestampMillisecond as i32,
+ semantic_type: SemanticType::Timestamp as i32,
+ });
+ one_row.push(to_value(ValueData::TsMillisecondValue(ts)))
+ } else {
+ check_schema(
+ ColumnDataType::TimestampMillisecond,
+ SemanticType::Timestamp,
+ &schema[*index],
+ )?;
+ one_row[*index].value_data = Some(ValueData::TsMillisecondValue(ts));
+ }
+
+ Ok(())
+}
+
+#[inline]
+fn check_schema(
+ datatype: ColumnDataType,
+ semantic_type: SemanticType,
+ schema: &ColumnSchema,
+) -> Result<(), Error> {
+ ensure!(
+ schema.datatype == datatype as i32,
+ IncompatibleSchemaSnafu {
+ column_name: &schema.column_name,
+ datatype: "datatype",
+ expected: schema.datatype,
+ actual: datatype as i32,
+ }
+ );
+
+ ensure!(
+ schema.semantic_type == semantic_type as i32,
+ IncompatibleSchemaSnafu {
+ column_name: &schema.column_name,
+ datatype: "semantic_type",
+ expected: schema.semantic_type,
+ actual: semantic_type as i32,
+ }
+ );
+
+ Ok(())
+}
+
+// TODO(jeremy): impl From<ValueData> for Value
+#[inline]
+fn to_value(value: ValueData) -> Value {
+ Value {
+ value_data: Some(value),
+ }
+}
+
+#[inline]
+fn unwrap_or_default_precision(precision: Option<Precision>) -> Precision {
if let Some(val) = precision {
val
} else {
@@ -149,6 +368,21 @@ fn unwarp_or_default_precision(precision: Option<Precision>) -> Precision {
}
}
+#[inline]
+fn get_time_unit(precision: Precision) -> Result<TimeUnit, Error> {
+ Ok(match precision {
+ Precision::Second => TimeUnit::Second,
+ Precision::Millisecond => TimeUnit::Millisecond,
+ Precision::Microsecond => TimeUnit::Microsecond,
+ Precision::Nanosecond => TimeUnit::Nanosecond,
+ _ => {
+ return Err(Error::NotSupported {
+ feat: format!("convert {precision} into TimeUnit"),
+ })
+ }
+ })
+}
+
#[cfg(test)]
mod tests {
use api::v1::column::Values;
@@ -166,7 +400,7 @@ monitor1,host=host2 memory=1027 1663840496400340001
monitor2,host=host3 cpu=66.5 1663840496100023102
monitor2,host=host4 cpu=66.3,memory=1029 1663840496400340003";
- let influxdb_req = &InfluxdbRequest {
+ let influxdb_req = InfluxdbRequest {
precision: None,
lines: lines.to_string(),
};
@@ -306,4 +540,199 @@ monitor2,host=host4 cpu=66.3,memory=1029 1663840496400340003";
assert_eq!(b, bitvec.get(idx).unwrap())
}
}
+ #[test]
+ fn test_convert_influxdb_lines_to_rows() {
+ let lines = r"
+monitor1,host=host1 cpu=66.6,memory=1024 1663840496100023100
+monitor1,host=host2 memory=1027 1663840496400340001
+monitor2,host=host3 cpu=66.5 1663840496100023102
+monitor2,host=host4 cpu=66.3,memory=1029 1663840496400340003";
+
+ let influxdb_req = InfluxdbRequest {
+ precision: None,
+ lines: lines.to_string(),
+ };
+
+ let requests: RowInsertRequests = influxdb_req.try_into().unwrap();
+ assert_eq!(2, requests.inserts.len());
+
+ for request in requests.inserts {
+ match &request.table_name[..] {
+ "monitor1" => assert_monitor1_rows(&request.rows),
+ "monitor2" => assert_monitor2_rows(&request.rows),
+ _ => panic!(),
+ }
+ }
+ }
+
+ fn assert_monitor1_rows(rows: &Option<Rows>) {
+ let rows = rows.as_ref().unwrap();
+ let schema = &rows.schema;
+ let rows = &rows.rows;
+ assert_eq!(4, schema.len());
+ assert_eq!(2, rows.len());
+
+ for (i, column_schema) in schema.iter().enumerate() {
+ match &column_schema.column_name[..] {
+ "host" => {
+ assert_eq!(ColumnDataType::String as i32, column_schema.datatype);
+ assert_eq!(SemanticType::Tag as i32, column_schema.semantic_type);
+
+ for (j, row) in rows.iter().enumerate() {
+ let v = row.values[i].value_data.as_ref().unwrap();
+ match j {
+ 0 => assert_eq!("host1", extract_string_value(v)),
+ 1 => assert_eq!("host2", extract_string_value(v)),
+ _ => panic!(),
+ }
+ }
+ }
+ "cpu" => {
+ assert_eq!(ColumnDataType::Float64 as i32, column_schema.datatype);
+ assert_eq!(SemanticType::Field as i32, column_schema.semantic_type);
+
+ for (j, row) in rows.iter().enumerate() {
+ let v = row.values[i].value_data.as_ref();
+ match j {
+ 0 => assert_eq!(66.6f64, extract_f64_value(v.as_ref().unwrap())),
+ 1 => assert_eq!(None, v),
+ _ => panic!(),
+ }
+ }
+ }
+ "memory" => {
+ assert_eq!(ColumnDataType::Float64 as i32, column_schema.datatype);
+ assert_eq!(SemanticType::Field as i32, column_schema.semantic_type);
+
+ for (j, row) in rows.iter().enumerate() {
+ let v = row.values[i].value_data.as_ref();
+ match j {
+ 0 => assert_eq!(1024f64, extract_f64_value(v.as_ref().unwrap())),
+ 1 => assert_eq!(1027f64, extract_f64_value(v.as_ref().unwrap())),
+ _ => panic!(),
+ }
+ }
+ }
+ "ts" => {
+ assert_eq!(
+ ColumnDataType::TimestampMillisecond as i32,
+ column_schema.datatype
+ );
+ assert_eq!(SemanticType::Timestamp as i32, column_schema.semantic_type);
+
+ for (j, row) in rows.iter().enumerate() {
+ let v = row.values[i].value_data.as_ref();
+ match j {
+ 0 => assert_eq!(
+ 1663840496100023100 / 1_000_000,
+ extract_ts_millis_value(v.as_ref().unwrap())
+ ),
+ 1 => assert_eq!(
+ 1663840496400340001 / 1_000_000,
+ extract_ts_millis_value(v.as_ref().unwrap())
+ ),
+ _ => panic!(),
+ }
+ }
+ }
+ _ => panic!(),
+ }
+ }
+ }
+
+ fn assert_monitor2_rows(rows: &Option<Rows>) {
+ let rows = rows.as_ref().unwrap();
+ let schema = &rows.schema;
+ let rows = &rows.rows;
+ assert_eq!(4, schema.len());
+ assert_eq!(2, rows.len());
+
+ for (i, column_schema) in schema.iter().enumerate() {
+ match &column_schema.column_name[..] {
+ "host" => {
+ assert_eq!(ColumnDataType::String as i32, column_schema.datatype);
+ assert_eq!(SemanticType::Tag as i32, column_schema.semantic_type);
+
+ for (j, row) in rows.iter().enumerate() {
+ let v = row.values[i].value_data.as_ref().unwrap();
+ match j {
+ 0 => assert_eq!("host3", extract_string_value(v)),
+ 1 => assert_eq!("host4", extract_string_value(v)),
+ _ => panic!(),
+ }
+ }
+ }
+ "cpu" => {
+ assert_eq!(ColumnDataType::Float64 as i32, column_schema.datatype);
+ assert_eq!(SemanticType::Field as i32, column_schema.semantic_type);
+
+ for (j, row) in rows.iter().enumerate() {
+ let v = row.values[i].value_data.as_ref();
+ match j {
+ 0 => assert_eq!(66.5f64, extract_f64_value(v.as_ref().unwrap())),
+ 1 => assert_eq!(66.3f64, extract_f64_value(v.as_ref().unwrap())),
+ _ => panic!(),
+ }
+ }
+ }
+ "memory" => {
+ assert_eq!(ColumnDataType::Float64 as i32, column_schema.datatype);
+ assert_eq!(SemanticType::Field as i32, column_schema.semantic_type);
+
+ for (j, row) in rows.iter().enumerate() {
+ let v = row.values[i].value_data.as_ref();
+ match j {
+ 0 => assert_eq!(None, v),
+ 1 => assert_eq!(1029f64, extract_f64_value(v.as_ref().unwrap())),
+ _ => panic!(),
+ }
+ }
+ }
+ "ts" => {
+ assert_eq!(
+ ColumnDataType::TimestampMillisecond as i32,
+ column_schema.datatype
+ );
+ assert_eq!(SemanticType::Timestamp as i32, column_schema.semantic_type);
+
+ for (j, row) in rows.iter().enumerate() {
+ let v = row.values[i].value_data.as_ref();
+ match j {
+ 0 => assert_eq!(
+ 1663840496100023102 / 1_000_000,
+ extract_ts_millis_value(v.as_ref().unwrap())
+ ),
+ 1 => assert_eq!(
+ 1663840496400340003 / 1_000_000,
+ extract_ts_millis_value(v.as_ref().unwrap())
+ ),
+ _ => panic!(),
+ }
+ }
+ }
+ _ => panic!(),
+ }
+ }
+ }
+
+ fn extract_string_value(value: &ValueData) -> &str {
+ match value {
+ ValueData::StringValue(v) => v,
+ _ => panic!(),
+ }
+ }
+
+ fn extract_f64_value(value: &ValueData) -> f64 {
+ match value {
+ ValueData::F64Value(v) => *v,
+ _ => panic!(),
+ }
+ }
+
+ fn extract_ts_millis_value(value: &ValueData) -> i64 {
+ match value {
+ ValueData::TsMillisecondValue(v) => *v,
+ _ => panic!(),
+ }
+ }
}
diff --git a/src/servers/src/query_handler.rs b/src/servers/src/query_handler.rs
index 14172bce0baf..796303a88841 100644
--- a/src/servers/src/query_handler.rs
+++ b/src/servers/src/query_handler.rs
@@ -62,7 +62,7 @@ pub trait ScriptHandler {
pub trait InfluxdbLineProtocolHandler {
/// A successful request will not return a response.
/// Only on error will the socket return a line of data.
- async fn exec(&self, request: &InfluxdbRequest, ctx: QueryContextRef) -> Result<()>;
+ async fn exec(&self, request: InfluxdbRequest, ctx: QueryContextRef) -> Result<()>;
}
#[async_trait]
diff --git a/src/servers/tests/http/influxdb_test.rs b/src/servers/tests/http/influxdb_test.rs
index ca26bbe8d957..3e2db6311826 100644
--- a/src/servers/tests/http/influxdb_test.rs
+++ b/src/servers/tests/http/influxdb_test.rs
@@ -53,7 +53,7 @@ impl GrpcQueryHandler for DummyInstance {
#[async_trait]
impl InfluxdbLineProtocolHandler for DummyInstance {
- async fn exec(&self, request: &InfluxdbRequest, ctx: QueryContextRef) -> Result<()> {
+ async fn exec(&self, request: InfluxdbRequest, ctx: QueryContextRef) -> Result<()> {
let requests: InsertRequests = request.try_into()?;
for expr in requests.inserts {
let _ = self
diff --git a/tests-integration/src/influxdb.rs b/tests-integration/src/influxdb.rs
index 59d81457d795..aa3f319e663b 100644
--- a/tests-integration/src/influxdb.rs
+++ b/tests-integration/src/influxdb.rs
@@ -64,7 +64,7 @@ monitor1,host=host2 memory=1027";
precision: None,
lines: lines.to_string(),
};
- assert!(instance.exec(&request, QueryContext::arc()).await.is_ok());
+ assert!(instance.exec(request, QueryContext::arc()).await.is_ok());
let mut output = instance
.do_query(
@@ -93,7 +93,7 @@ monitor1,host=host2 memory=1027 1663840496400340001";
precision: None,
lines: lines.to_string(),
};
- instance.exec(&request, QueryContext::arc()).await.unwrap();
+ instance.exec(request, QueryContext::arc()).await.unwrap();
let mut output = instance
.do_query(
|
feat
|
row write protocol (#2189)
|
03057cab6c1cb28c84a722b96ca79c9d084b3b31
|
2023-06-27 11:34:04
|
shuiyisong
|
feat: physical plan wrapper (#1837)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index c75aabea7190..fe703d366e21 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -5209,7 +5209,7 @@ dependencies = [
[[package]]
name = "meter-core"
version = "0.1.0"
-source = "git+https://github.com/GreptimeTeam/greptime-meter.git?rev=f0798c4c648d89f51abe63e870919c75dd463199#f0798c4c648d89f51abe63e870919c75dd463199"
+source = "git+https://github.com/GreptimeTeam/greptime-meter.git?rev=abbd357c1e193cd270ea65ee7652334a150b628f#abbd357c1e193cd270ea65ee7652334a150b628f"
dependencies = [
"anymap",
"once_cell",
@@ -5219,7 +5219,7 @@ dependencies = [
[[package]]
name = "meter-macros"
version = "0.1.0"
-source = "git+https://github.com/GreptimeTeam/greptime-meter.git?rev=f0798c4c648d89f51abe63e870919c75dd463199#f0798c4c648d89f51abe63e870919c75dd463199"
+source = "git+https://github.com/GreptimeTeam/greptime-meter.git?rev=abbd357c1e193cd270ea65ee7652334a150b628f#abbd357c1e193cd270ea65ee7652334a150b628f"
dependencies = [
"meter-core",
]
diff --git a/Cargo.toml b/Cargo.toml
index 4a5d98453814..0b09ee555e5b 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -88,11 +88,11 @@ tokio-util = { version = "0.7", features = ["io-util", "compat"] }
tonic = { version = "0.9", features = ["tls"] }
uuid = { version = "1", features = ["serde", "v4", "fast-rng"] }
metrics = "0.20"
-meter-core = { git = "https://github.com/GreptimeTeam/greptime-meter.git", rev = "f0798c4c648d89f51abe63e870919c75dd463199" }
+meter-core = { git = "https://github.com/GreptimeTeam/greptime-meter.git", rev = "abbd357c1e193cd270ea65ee7652334a150b628f" }
[workspace.dependencies.meter-macros]
git = "https://github.com/GreptimeTeam/greptime-meter.git"
-rev = "f0798c4c648d89f51abe63e870919c75dd463199"
+rev = "abbd357c1e193cd270ea65ee7652334a150b628f"
[profile.release]
debug = true
diff --git a/src/cmd/src/cli/repl.rs b/src/cmd/src/cli/repl.rs
index 6e4d5c645c39..56cea0dd64e5 100644
--- a/src/cmd/src/cli/repl.rs
+++ b/src/cmd/src/cli/repl.rs
@@ -19,6 +19,7 @@ use std::time::Instant;
use catalog::remote::CachedMetaKvBackend;
use client::client_manager::DatanodeClients;
use client::{Client, Database, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
+use common_base::Plugins;
use common_error::prelude::ErrorExt;
use common_query::Output;
use common_recordbatch::RecordBatches;
@@ -266,13 +267,14 @@ async fn create_query_engine(meta_addr: &str) -> Result<DatafusionQueryEngine> {
partition_manager,
datanode_clients,
));
+ let plugins: Arc<Plugins> = Default::default();
let state = Arc::new(QueryEngineState::new(
catalog_list,
false,
None,
None,
- Default::default(),
+ plugins.clone(),
));
- Ok(DatafusionQueryEngine::new(state))
+ Ok(DatafusionQueryEngine::new(state, plugins))
}
diff --git a/src/cmd/src/datanode.rs b/src/cmd/src/datanode.rs
index 5e35cb21fbac..b3518d9627c8 100644
--- a/src/cmd/src/datanode.rs
+++ b/src/cmd/src/datanode.rs
@@ -170,7 +170,9 @@ impl StartCommand {
logging::info!("Datanode start command: {:#?}", self);
logging::info!("Datanode options: {:#?}", opts);
- let datanode = Datanode::new(opts).await.context(StartDatanodeSnafu)?;
+ let datanode = Datanode::new(opts, Default::default())
+ .await
+ .context(StartDatanodeSnafu)?;
Ok(Instance { datanode })
}
diff --git a/src/cmd/src/standalone.rs b/src/cmd/src/standalone.rs
index 6e307aaad9d8..85a4beebec39 100644
--- a/src/cmd/src/standalone.rs
+++ b/src/cmd/src/standalone.rs
@@ -308,7 +308,7 @@ impl StartCommand {
fe_opts, dn_opts
);
- let datanode = Datanode::new(dn_opts.clone())
+ let datanode = Datanode::new(dn_opts.clone(), Default::default())
.await
.context(StartDatanodeSnafu)?;
diff --git a/src/common/query/src/physical_plan.rs b/src/common/query/src/physical_plan.rs
index 1c148020ac9b..a3e30e1ccf3f 100644
--- a/src/common/query/src/physical_plan.rs
+++ b/src/common/query/src/physical_plan.rs
@@ -150,7 +150,7 @@ impl PhysicalPlan for PhysicalPlanAdapter {
}
fn metrics(&self) -> Option<MetricsSet> {
- Some(self.metric.clone_inner())
+ self.df_plan.metrics()
}
}
diff --git a/src/datanode/src/datanode.rs b/src/datanode/src/datanode.rs
index 59bf68557a58..6fa7eb4e7a90 100644
--- a/src/datanode/src/datanode.rs
+++ b/src/datanode/src/datanode.rs
@@ -14,9 +14,11 @@
//! Datanode configurations
+use std::sync::Arc;
use std::time::Duration;
use common_base::readable_size::ReadableSize;
+use common_base::Plugins;
use common_error::prelude::BoxedError;
use common_telemetry::info;
use common_telemetry::logging::LoggingOptions;
@@ -386,8 +388,8 @@ pub struct Datanode {
}
impl Datanode {
- pub async fn new(opts: DatanodeOptions) -> Result<Datanode> {
- let (instance, heartbeat_task) = Instance::with_opts(&opts).await?;
+ pub async fn new(opts: DatanodeOptions, plugins: Arc<Plugins>) -> Result<Datanode> {
+ let (instance, heartbeat_task) = Instance::with_opts(&opts, plugins).await?;
let services = match opts.mode {
Mode::Distributed => Some(Services::try_new(instance.clone(), &opts).await?),
Mode::Standalone => None,
diff --git a/src/datanode/src/instance.rs b/src/datanode/src/instance.rs
index 50d9cce35c3f..1b23e13b4462 100644
--- a/src/datanode/src/instance.rs
+++ b/src/datanode/src/instance.rs
@@ -22,6 +22,7 @@ use catalog::remote::region_alive_keeper::RegionAliveKeepers;
use catalog::remote::{CachedMetaKvBackend, RemoteCatalogManager};
use catalog::{CatalogManager, CatalogManagerRef, RegisterTableRequest};
use common_base::paths::{CLUSTER_DIR, WAL_DIR};
+use common_base::Plugins;
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, MIN_USER_TABLE_ID};
use common_error::prelude::BoxedError;
use common_grpc::channel_manager::{ChannelConfig, ChannelManager};
@@ -84,7 +85,10 @@ pub struct Instance {
pub type InstanceRef = Arc<Instance>;
impl Instance {
- pub async fn with_opts(opts: &DatanodeOptions) -> Result<(InstanceRef, Option<HeartbeatTask>)> {
+ pub async fn with_opts(
+ opts: &DatanodeOptions,
+ plugins: Arc<Plugins>,
+ ) -> Result<(InstanceRef, Option<HeartbeatTask>)> {
let meta_client = match opts.mode {
Mode::Standalone => None,
Mode::Distributed => {
@@ -101,7 +105,7 @@ impl Instance {
let compaction_scheduler = create_compaction_scheduler(opts);
- Self::new(opts, meta_client, compaction_scheduler).await
+ Self::new(opts, meta_client, compaction_scheduler, plugins).await
}
fn build_heartbeat_task(
@@ -154,6 +158,7 @@ impl Instance {
opts: &DatanodeOptions,
meta_client: Option<Arc<MetaClient>>,
compaction_scheduler: CompactionSchedulerRef<RaftEngineLogStore>,
+ plugins: Arc<Plugins>,
) -> Result<(InstanceRef, Option<HeartbeatTask>)> {
let object_store = store::new_object_store(&opts.storage.store).await?;
let log_store = Arc::new(create_log_store(&opts.storage.store, &opts.wal).await?);
@@ -261,7 +266,13 @@ impl Instance {
};
catalog_manager.start().await.context(CatalogSnafu)?;
- let factory = QueryEngineFactory::new(catalog_manager.clone(), false);
+ let factory = QueryEngineFactory::new_with_plugins(
+ catalog_manager.clone(),
+ false,
+ None,
+ None,
+ plugins,
+ );
let query_engine = factory.query_engine();
let procedure_manager =
diff --git a/src/datanode/src/mock.rs b/src/datanode/src/mock.rs
index 36ebac5187a6..4114ae0e688c 100644
--- a/src/datanode/src/mock.rs
+++ b/src/datanode/src/mock.rs
@@ -38,7 +38,13 @@ impl Instance {
) -> Result<(InstanceRef, Option<HeartbeatTask>)> {
let meta_client = Arc::new(mock_meta_client(meta_srv, opts.node_id.unwrap_or(42)).await);
let compaction_scheduler = Arc::new(NoopCompactionScheduler::default());
- Instance::new(opts, Some(meta_client), compaction_scheduler).await
+ Instance::new(
+ opts,
+ Some(meta_client),
+ compaction_scheduler,
+ Default::default(),
+ )
+ .await
}
}
diff --git a/src/query/src/datafusion.rs b/src/query/src/datafusion.rs
index 10066d411e46..4dd7c1f21bef 100644
--- a/src/query/src/datafusion.rs
+++ b/src/query/src/datafusion.rs
@@ -22,6 +22,7 @@ use std::collections::HashMap;
use std::sync::Arc;
use async_trait::async_trait;
+use common_base::Plugins;
use common_error::prelude::BoxedError;
use common_function::scalars::aggregate::AggregateFunctionMetaRef;
use common_function::scalars::udf::create_udf;
@@ -53,6 +54,7 @@ use crate::executor::QueryExecutor;
use crate::logical_optimizer::LogicalOptimizer;
use crate::physical_optimizer::PhysicalOptimizer;
use crate::physical_planner::PhysicalPlanner;
+use crate::physical_wrapper::PhysicalPlanWrapperRef;
use crate::plan::LogicalPlan;
use crate::planner::{DfLogicalPlanner, LogicalPlanner};
use crate::query_engine::{DescribeResult, QueryEngineContext, QueryEngineState};
@@ -60,20 +62,31 @@ use crate::{metrics, QueryEngine};
pub struct DatafusionQueryEngine {
state: Arc<QueryEngineState>,
+ plugins: Arc<Plugins>,
}
impl DatafusionQueryEngine {
- pub fn new(state: Arc<QueryEngineState>) -> Self {
- Self { state }
+ pub fn new(state: Arc<QueryEngineState>, plugins: Arc<Plugins>) -> Self {
+ Self { state, plugins }
}
- async fn exec_query_plan(&self, plan: LogicalPlan) -> Result<Output> {
+ async fn exec_query_plan(
+ &self,
+ plan: LogicalPlan,
+ query_ctx: QueryContextRef,
+ ) -> Result<Output> {
let mut ctx = QueryEngineContext::new(self.state.session_state());
// `create_physical_plan` will optimize logical plan internally
let physical_plan = self.create_physical_plan(&mut ctx, &plan).await?;
let physical_plan = self.optimize_physical_plan(&mut ctx, physical_plan)?;
+ let physical_plan = if let Some(wrapper) = self.plugins.get::<PhysicalPlanWrapperRef>() {
+ wrapper.wrap(physical_plan, query_ctx)
+ } else {
+ physical_plan
+ };
+
Ok(Output::Stream(self.execute_stream(&ctx, &physical_plan)?))
}
@@ -95,7 +108,7 @@ impl DatafusionQueryEngine {
let table = self.find_table(&table_name).await?;
let output = self
- .exec_query_plan(LogicalPlan::DfPlan((*dml.input).clone()))
+ .exec_query_plan(LogicalPlan::DfPlan((*dml.input).clone()), query_ctx)
.await?;
let mut stream = match output {
Output::RecordBatches(batches) => batches.as_stream(),
@@ -216,7 +229,7 @@ impl QueryEngine for DatafusionQueryEngine {
LogicalPlan::DfPlan(DfLogicalPlan::Dml(dml)) => {
self.exec_dml_statement(dml, query_ctx).await
}
- _ => self.exec_query_plan(plan).await,
+ _ => self.exec_query_plan(plan, query_ctx).await,
}
}
diff --git a/src/query/src/lib.rs b/src/query/src/lib.rs
index 4e18a86c0054..a71d6b88d7f4 100644
--- a/src/query/src/lib.rs
+++ b/src/query/src/lib.rs
@@ -26,6 +26,7 @@ mod optimizer;
pub mod parser;
pub mod physical_optimizer;
pub mod physical_planner;
+pub mod physical_wrapper;
pub mod plan;
pub mod planner;
pub mod query_engine;
diff --git a/src/query/src/physical_wrapper.rs b/src/query/src/physical_wrapper.rs
new file mode 100644
index 000000000000..8fb3e8c15739
--- /dev/null
+++ b/src/query/src/physical_wrapper.rs
@@ -0,0 +1,26 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::sync::Arc;
+
+use common_query::physical_plan::PhysicalPlan;
+use session::context::QueryContextRef;
+
+/// wrap physical plan with additional layer
+/// e.g: metrics retrieving layer upon physical plan
+pub trait PhysicalPlanWrapper: Send + Sync + 'static {
+ fn wrap(&self, origin: Arc<dyn PhysicalPlan>, ctx: QueryContextRef) -> Arc<dyn PhysicalPlan>;
+}
+
+pub type PhysicalPlanWrapperRef = Arc<dyn PhysicalPlanWrapper>;
diff --git a/src/query/src/query_engine.rs b/src/query/src/query_engine.rs
index ed7f75e700a6..3cfbf70ff74c 100644
--- a/src/query/src/query_engine.rs
+++ b/src/query/src/query_engine.rs
@@ -108,9 +108,9 @@ impl QueryEngineFactory {
with_dist_planner,
partition_manager,
clients,
- plugins,
+ plugins.clone(),
));
- let query_engine = Arc::new(DatafusionQueryEngine::new(state));
+ let query_engine = Arc::new(DatafusionQueryEngine::new(state, plugins));
register_functions(&query_engine);
Self { query_engine }
}
diff --git a/tests-integration/src/tests.rs b/tests-integration/src/tests.rs
index 2557dc8201af..ca6bfaf60d31 100644
--- a/tests-integration/src/tests.rs
+++ b/tests-integration/src/tests.rs
@@ -65,7 +65,9 @@ impl MockStandaloneInstance {
pub(crate) async fn create_standalone_instance(test_name: &str) -> MockStandaloneInstance {
let (opts, guard) = create_tmp_dir_and_datanode_opts(StorageType::File, test_name);
- let (dn_instance, heartbeat) = DatanodeInstance::with_opts(&opts).await.unwrap();
+ let (dn_instance, heartbeat) = DatanodeInstance::with_opts(&opts, Default::default())
+ .await
+ .unwrap();
let frontend_instance = Instance::try_new_standalone(dn_instance.clone())
.await
|
feat
|
physical plan wrapper (#1837)
|
488eabce4a306c283bdefa2913cdfbb0c33367b0
|
2022-11-13 09:27:23
|
fys
|
feat: support standalone and distributed insert in frontend (#473)
| false
|
diff --git a/src/frontend/src/catalog.rs b/src/frontend/src/catalog.rs
index c58c7fdf6832..4a8874428d4b 100644
--- a/src/frontend/src/catalog.rs
+++ b/src/frontend/src/catalog.rs
@@ -20,6 +20,7 @@ use crate::partitioning::range::RangePartitionRule;
use crate::table::route::TableRoutes;
use crate::table::DistTable;
+#[derive(Clone)]
pub struct FrontendCatalogManager {
backend: KvBackendRef,
table_routes: Arc<TableRoutes>,
diff --git a/src/frontend/src/error.rs b/src/frontend/src/error.rs
index 996e5bb435e8..63b9a2055f31 100644
--- a/src/frontend/src/error.rs
+++ b/src/frontend/src/error.rs
@@ -193,6 +193,33 @@ pub enum Error {
table_name: String,
backtrace: Backtrace,
},
+
+ #[snafu(display("Failed to find catalog by name: {}", catalog_name))]
+ CatalogNotFound {
+ catalog_name: String,
+ backtrace: Backtrace,
+ },
+
+ #[snafu(display("Failed to find schema, schema info: {}", schema_info))]
+ SchemaNotFound {
+ schema_info: String,
+ backtrace: Backtrace,
+ },
+
+ #[snafu(display("Table occurs error, source: {}", source))]
+ Table {
+ #[snafu(backtrace)]
+ source: table::error::Error,
+ },
+
+ #[snafu(display("Failed to get catalog manager"))]
+ CatalogManager { backtrace: Backtrace },
+
+ #[snafu(display("Failed to get full table name, source: {}", source))]
+ FullTableName {
+ #[snafu(backtrace)]
+ source: sql::error::Error,
+ },
}
pub type Result<T> = std::result::Result<T, Error>;
@@ -208,6 +235,7 @@ impl ErrorExt for Error {
| Error::InvalidInsertRequest { .. }
| Error::FindPartitionColumn { .. }
| Error::ColumnValuesNumberMismatch { .. }
+ | Error::CatalogManager { .. }
| Error::RegionKeysSize { .. } => StatusCode::InvalidArguments,
Error::RuntimeResource { source, .. } => source.status_code(),
@@ -216,6 +244,10 @@ impl ErrorExt for Error {
Error::ParseSql { source } => source.status_code(),
+ Error::FullTableName { source, .. } => source.status_code(),
+
+ Error::Table { source } => source.status_code(),
+
Error::ConvertColumnDefaultConstraint { source, .. }
| Error::ConvertScalarValue { source, .. } => source.status_code(),
@@ -234,7 +266,9 @@ impl ErrorExt for Error {
Error::TableNotFound { .. } => StatusCode::TableNotFound,
Error::ColumnNotFound { .. } => StatusCode::TableColumnNotFound,
- Error::JoinTask { .. } => StatusCode::Unexpected,
+ Error::JoinTask { .. }
+ | Error::SchemaNotFound { .. }
+ | Error::CatalogNotFound { .. } => StatusCode::Unexpected,
Error::Catalog { source, .. } => source.status_code(),
Error::ParseCatalogEntry { source, .. } => source.status_code(),
diff --git a/src/frontend/src/instance.rs b/src/frontend/src/instance.rs
index a4205e89d011..d69b69f2c2c4 100644
--- a/src/frontend/src/instance.rs
+++ b/src/frontend/src/instance.rs
@@ -13,6 +13,7 @@ use api::v1::{
};
use async_trait::async_trait;
use catalog::remote::MetaKvBackend;
+use catalog::{CatalogList, CatalogProviderRef, SchemaProviderRef};
use client::admin::{admin_result_to_output, Admin};
use client::{Client, Database, Select};
use common_error::prelude::BoxedError;
@@ -29,6 +30,7 @@ use servers::query_handler::{
use snafu::prelude::*;
use sql::ast::{ColumnDef, TableConstraint};
use sql::statements::create::{CreateTable, TIME_INDEX};
+use sql::statements::insert::Insert;
use sql::statements::statement::Statement;
use sql::statements::{column_def_to_schema, table_idents_to_full_name};
use sql::{dialect::GenericDialect, parser::ParserContext};
@@ -37,6 +39,7 @@ use crate::catalog::FrontendCatalogManager;
use crate::datanode::DatanodeClients;
use crate::error::{self, ConvertColumnDefaultConstraintSnafu, Result};
use crate::frontend::{FrontendOptions, Mode};
+use crate::sql::insert_to_request;
use crate::table::route::TableRoutes;
#[async_trait]
@@ -56,13 +59,27 @@ pub trait FrontendInstance:
pub type FrontendInstanceRef = Arc<dyn FrontendInstance>;
-#[derive(Default)]
+#[derive(Clone)]
pub struct Instance {
// TODO(hl): In standalone mode, there is only one client.
// But in distribute mode, frontend should fetch datanodes' addresses from metasrv.
client: Client,
/// catalog manager is None in standalone mode, datanode will keep their own
catalog_manager: Option<FrontendCatalogManager>,
+ // TODO(fys): it should be a trait that corresponds to two implementations:
+ // Standalone and Distributed, then the code behind it doesn't need to use so
+ // many match statements.
+ mode: Mode,
+}
+
+impl Default for Instance {
+ fn default() -> Self {
+ Self {
+ client: Client::default(),
+ catalog_manager: None,
+ mode: Mode::Standalone,
+ }
+ }
}
impl Instance {
@@ -115,6 +132,43 @@ impl Instance {
pub fn admin(&self) -> Admin {
Admin::new("greptime", self.client.clone())
}
+
+ fn get_catalog(&self, catalog_name: &str) -> Result<CatalogProviderRef> {
+ self.catalog_manager
+ .as_ref()
+ .context(error::CatalogManagerSnafu)?
+ .catalog(catalog_name)
+ .context(error::CatalogSnafu)?
+ .context(error::CatalogNotFoundSnafu { catalog_name })
+ }
+
+ fn get_schema(provider: CatalogProviderRef, schema_name: &str) -> Result<SchemaProviderRef> {
+ provider
+ .schema(schema_name)
+ .context(error::CatalogSnafu)?
+ .context(error::SchemaNotFoundSnafu {
+ schema_info: schema_name,
+ })
+ }
+
+ async fn sql_dist_insert(&self, insert: Box<Insert>) -> Result<usize> {
+ let (catalog, schema, table) = insert.full_table_name().context(error::ParseSqlSnafu)?;
+
+ let catalog_provider = self.get_catalog(&catalog)?;
+ let schema_provider = Self::get_schema(catalog_provider, &schema)?;
+
+ let insert_request = insert_to_request(&schema_provider, *insert)?;
+
+ let table = schema_provider
+ .table(&table)
+ .context(error::CatalogSnafu)?
+ .context(error::TableNotFoundSnafu { table_name: &table })?;
+
+ table
+ .insert(insert_request)
+ .await
+ .context(error::TableSnafu)
+ }
}
#[async_trait]
@@ -131,6 +185,7 @@ impl Instance {
Self {
client,
catalog_manager: None,
+ mode: Mode::Standalone,
}
}
}
@@ -156,26 +211,44 @@ impl SqlQueryHandler for Instance {
.database()
.select(Select::Sql(query.to_string()))
.await
- .and_then(|object_result| object_result.try_into()),
+ .and_then(|object_result| object_result.try_into())
+ .map_err(BoxedError::new)
+ .context(server_error::ExecuteQuerySnafu { query }),
Statement::Insert(insert) => {
- // TODO(dennis): respect schema_name when inserting data
- let (_catalog_name, _schema_name, table_name) = insert
- .full_table_name()
- .context(error::ParseSqlSnafu)
- .map_err(BoxedError::new)
- .context(server_error::ExecuteInsertSnafu {
- msg: "Failed to get table name",
- })?;
-
- let expr = InsertExpr {
- table_name,
- expr: Some(insert_expr::Expr::Sql(query.to_string())),
- options: HashMap::default(),
- };
- self.database()
- .insert(expr)
- .await
- .and_then(|object_result| object_result.try_into())
+ match self.mode {
+ Mode::Standalone => {
+ // TODO(dennis): respect schema_name when inserting data
+ let (_catalog_name, _schema_name, table_name) = insert
+ .full_table_name()
+ .context(error::ParseSqlSnafu)
+ .map_err(BoxedError::new)
+ .context(server_error::ExecuteInsertSnafu {
+ msg: "Failed to get table name",
+ })?;
+
+ let expr = InsertExpr {
+ table_name,
+ expr: Some(insert_expr::Expr::Sql(query.to_string())),
+ options: HashMap::default(),
+ };
+ self.database()
+ .insert(expr)
+ .await
+ .and_then(|object_result| object_result.try_into())
+ .map_err(BoxedError::new)
+ .context(server_error::ExecuteQuerySnafu { query })
+ }
+ Mode::Distributed => {
+ let affected = self
+ .sql_dist_insert(insert)
+ .await
+ .map_err(BoxedError::new)
+ .context(server_error::ExecuteInsertSnafu {
+ msg: "execute insert failed",
+ })?;
+ Ok(Output::AffectedRows(affected))
+ }
+ }
}
Statement::CreateTable(create) => {
let expr = create_to_expr(create)
@@ -185,13 +258,17 @@ impl SqlQueryHandler for Instance {
.create(expr)
.await
.and_then(admin_result_to_output)
+ .map_err(BoxedError::new)
+ .context(server_error::ExecuteQuerySnafu { query })
}
Statement::ShowDatabases(_) | Statement::ShowTables(_) => self
.database()
.select(Select::Sql(query.to_string()))
.await
- .and_then(|object_result| object_result.try_into()),
+ .and_then(|object_result| object_result.try_into())
+ .map_err(BoxedError::new)
+ .context(server_error::ExecuteQuerySnafu { query }),
Statement::CreateDatabase(c) => {
let expr = CreateDatabaseExpr {
@@ -201,6 +278,8 @@ impl SqlQueryHandler for Instance {
.create_database(expr)
.await
.and_then(admin_result_to_output)
+ .map_err(BoxedError::new)
+ .context(server_error::ExecuteQuerySnafu { query })
}
Statement::Alter(alter_stmt) => self
.admin()
@@ -210,13 +289,13 @@ impl SqlQueryHandler for Instance {
.context(server_error::ExecuteAlterSnafu { query })?,
)
.await
- .and_then(admin_result_to_output),
+ .and_then(admin_result_to_output)
+ .map_err(BoxedError::new)
+ .context(server_error::ExecuteQuerySnafu { query }),
Statement::ShowCreateTable(_) => {
return server_error::NotSupportedSnafu { feat: query }.fail()
}
}
- .map_err(BoxedError::new)
- .context(server_error::ExecuteQuerySnafu { query })
}
async fn insert_script(&self, _name: &str, _script: &str) -> server_error::Result<()> {
diff --git a/src/frontend/src/instance/influxdb.rs b/src/frontend/src/instance/influxdb.rs
index 09e26bfa2434..da6de4e7159b 100644
--- a/src/frontend/src/instance/influxdb.rs
+++ b/src/frontend/src/instance/influxdb.rs
@@ -2,22 +2,72 @@ use api::v1::InsertExpr;
use async_trait::async_trait;
use common_error::prelude::BoxedError;
use servers::influxdb::InfluxdbRequest;
-use servers::{error::ExecuteQuerySnafu, query_handler::InfluxdbLineProtocolHandler};
-use snafu::ResultExt;
+use servers::{error as server_error, query_handler::InfluxdbLineProtocolHandler};
+use snafu::{OptionExt, ResultExt};
+use table::requests::InsertRequest;
+use crate::error;
+use crate::error::Result;
+use crate::frontend::Mode;
use crate::instance::Instance;
#[async_trait]
impl InfluxdbLineProtocolHandler for Instance {
async fn exec(&self, request: &InfluxdbRequest) -> servers::error::Result<()> {
- let exprs: Vec<InsertExpr> = request.try_into()?;
- self.database()
- .batch_insert(exprs)
- .await
- .map_err(BoxedError::new)
- .context(ExecuteQuerySnafu {
- query: &request.lines,
- })?;
+ match self.mode {
+ Mode::Standalone => {
+ let exprs: Vec<InsertExpr> = request.try_into()?;
+ self.database()
+ .batch_insert(exprs)
+ .await
+ .map_err(BoxedError::new)
+ .context(server_error::ExecuteQuerySnafu {
+ query: &request.lines,
+ })?;
+ }
+ Mode::Distributed => {
+ self.dist_insert(request.try_into()?)
+ .await
+ .map_err(BoxedError::new)
+ .context(server_error::ExecuteInsertSnafu {
+ msg: "execute insert failed",
+ })?;
+ }
+ }
+
Ok(())
}
}
+
+impl Instance {
+ pub(crate) async fn dist_insert(&self, inserts: Vec<InsertRequest>) -> Result<usize> {
+ let mut joins = Vec::with_capacity(inserts.len());
+
+ for insert in inserts {
+ let self_clone = self.clone();
+
+ // TODO(fys): need a separate runtime here
+ let join = tokio::spawn(async move {
+ let catalog = self_clone.get_catalog(&insert.catalog_name)?;
+ let schema = Self::get_schema(catalog, &insert.schema_name)?;
+ let table = schema
+ .table(&insert.table_name)
+ .context(error::CatalogSnafu)?
+ .context(error::TableNotFoundSnafu {
+ table_name: &insert.table_name,
+ })?;
+
+ table.insert(insert).await.context(error::TableSnafu)
+ });
+ joins.push(join);
+ }
+
+ let mut affected = 0;
+
+ for join in joins {
+ affected += join.await.context(error::JoinTaskSnafu)??;
+ }
+
+ Ok(affected)
+ }
+}
diff --git a/src/frontend/src/instance/opentsdb.rs b/src/frontend/src/instance/opentsdb.rs
index d2b6b13502ac..1645bc977849 100644
--- a/src/frontend/src/instance/opentsdb.rs
+++ b/src/frontend/src/instance/opentsdb.rs
@@ -7,6 +7,7 @@ use servers::query_handler::OpentsdbProtocolHandler;
use snafu::prelude::*;
use crate::error::{self, Result};
+use crate::frontend::Mode;
use crate::instance::Instance;
#[async_trait]
@@ -14,12 +15,25 @@ impl OpentsdbProtocolHandler for Instance {
async fn exec(&self, data_point: &DataPoint) -> server_error::Result<()> {
// TODO(LFC): Insert metrics in batch, then make OpentsdbLineProtocolHandler::exec received multiple data points, when
// metric table and tags can be created upon insertion.
- self.insert_opentsdb_metric(data_point)
- .await
- .map_err(BoxedError::new)
- .with_context(|_| server_error::PutOpentsdbDataPointSnafu {
- data_point: format!("{:?}", data_point),
- })?;
+ match self.mode {
+ Mode::Standalone => {
+ self.insert_opentsdb_metric(data_point)
+ .await
+ .map_err(BoxedError::new)
+ .with_context(|_| server_error::PutOpentsdbDataPointSnafu {
+ data_point: format!("{:?}", data_point),
+ })?;
+ }
+ Mode::Distributed => {
+ self.dist_insert(vec![data_point.as_insert_request()])
+ .await
+ .map_err(BoxedError::new)
+ .context(server_error::ExecuteInsertSnafu {
+ msg: "execute insert failed",
+ })?;
+ }
+ }
+
Ok(())
}
}
diff --git a/src/frontend/src/instance/prometheus.rs b/src/frontend/src/instance/prometheus.rs
index dc466aaa1c5c..f48c9e463f2b 100644
--- a/src/frontend/src/instance/prometheus.rs
+++ b/src/frontend/src/instance/prometheus.rs
@@ -12,6 +12,7 @@ use servers::prometheus::{self, Metrics};
use servers::query_handler::{PrometheusProtocolHandler, PrometheusResponse};
use snafu::{OptionExt, ResultExt};
+use crate::frontend::Mode;
use crate::instance::Instance;
const SAMPLES_RESPONSE_TYPE: i32 = ResponseType::Samples as i32;
@@ -90,15 +91,29 @@ async fn handle_remote_queries(
#[async_trait]
impl PrometheusProtocolHandler for Instance {
async fn write(&self, request: WriteRequest) -> ServerResult<()> {
- let exprs = prometheus::write_request_to_insert_exprs(request)?;
-
- self.database()
- .batch_insert(exprs)
- .await
- .map_err(BoxedError::new)
- .context(error::ExecuteInsertSnafu {
- msg: "failed to write prometheus remote request",
- })?;
+ match self.mode {
+ Mode::Standalone => {
+ let exprs = prometheus::write_request_to_insert_exprs(request)?;
+
+ self.database()
+ .batch_insert(exprs)
+ .await
+ .map_err(BoxedError::new)
+ .context(error::ExecuteInsertSnafu {
+ msg: "failed to write prometheus remote request",
+ })?;
+ }
+ Mode::Distributed => {
+ let inserts = prometheus::write_request_to_insert_reqs(request)?;
+
+ self.dist_insert(inserts)
+ .await
+ .map_err(BoxedError::new)
+ .context(error::ExecuteInsertSnafu {
+ msg: "execute insert failed",
+ })?;
+ }
+ }
Ok(())
}
|
feat
|
support standalone and distributed insert in frontend (#473)
|
4e04a4e48f2e87a849ebd52ad3b26b1ba1cb9b4e
|
2024-02-18 16:00:01
|
tison
|
build: support build without git (#3309)
| false
|
diff --git a/.github/workflows/develop.yml b/.github/workflows/develop.yml
index e8dfe23f7c1b..4a56e44fb465 100644
--- a/.github/workflows/develop.yml
+++ b/.github/workflows/develop.yml
@@ -81,7 +81,7 @@ jobs:
# Shares across multiple jobs
shared-key: "check-toml"
- name: Install taplo
- run: cargo +stable install taplo-cli --version ^0.8 --locked
+ run: cargo +stable install taplo-cli --version ^0.9 --locked
- name: Run taplo
run: taplo format --check
diff --git a/Cargo.lock b/Cargo.lock
index 2931e3a8ea3c..4113c6f8c7d4 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -1243,7 +1243,6 @@ dependencies = [
"arrow-schema",
"async-stream",
"async-trait",
- "build-data",
"catalog",
"chrono",
"common-catalog",
@@ -1257,6 +1256,7 @@ dependencies = [
"common-telemetry",
"common-test-util",
"common-time",
+ "common-version",
"dashmap",
"datafusion",
"datatypes",
@@ -1782,7 +1782,6 @@ dependencies = [
"api",
"arc-swap",
"async-trait",
- "build-data",
"chrono-tz 0.6.3",
"common-error",
"common-macro",
@@ -1790,6 +1789,7 @@ dependencies = [
"common-runtime",
"common-telemetry",
"common-time",
+ "common-version",
"datafusion",
"datatypes",
"libc",
@@ -8882,7 +8882,6 @@ dependencies = [
"axum-macros",
"axum-test-helper 0.3.0",
"base64 0.21.5",
- "build-data",
"bytes",
"catalog",
"chrono",
@@ -8901,6 +8900,7 @@ dependencies = [
"common-telemetry",
"common-test-util",
"common-time",
+ "common-version",
"datafusion",
"datafusion-common",
"datafusion-expr",
diff --git a/Cargo.toml b/Cargo.toml
index 45b47d6f515e..7dfd452e3204 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -164,7 +164,6 @@ common-grpc-expr = { path = "src/common/grpc-expr" }
common-macro = { path = "src/common/macro" }
common-mem-prof = { path = "src/common/mem-prof" }
common-meta = { path = "src/common/meta" }
-common-pprof = { path = "src/common/pprof" }
common-procedure = { path = "src/common/procedure" }
common-procedure-test = { path = "src/common/procedure-test" }
common-query = { path = "src/common/query" }
diff --git a/src/catalog/Cargo.toml b/src/catalog/Cargo.toml
index 16e407a6995c..b3355dd2d9c6 100644
--- a/src/catalog/Cargo.toml
+++ b/src/catalog/Cargo.toml
@@ -10,11 +10,10 @@ testing = []
[dependencies]
api.workspace = true
arc-swap = "1.0"
-arrow-schema.workspace = true
arrow.workspace = true
+arrow-schema.workspace = true
async-stream.workspace = true
async-trait = "0.1"
-build-data = "0.1"
common-catalog.workspace = true
common-error.workspace = true
common-grpc.workspace = true
@@ -25,6 +24,7 @@ common-recordbatch.workspace = true
common-runtime.workspace = true
common-telemetry.workspace = true
common-time.workspace = true
+common-version.workspace = true
dashmap.workspace = true
datafusion.workspace = true
datatypes.workspace = true
diff --git a/src/catalog/src/information_schema/memory_table/tables.rs b/src/catalog/src/information_schema/memory_table/tables.rs
index 9922edd7c49f..9655725aece2 100644
--- a/src/catalog/src/information_schema/memory_table/tables.rs
+++ b/src/catalog/src/information_schema/memory_table/tables.rs
@@ -21,8 +21,6 @@ use datatypes::vectors::{Int64Vector, StringVector};
use crate::information_schema::table_names::*;
-const UNKNOWN: &str = "unknown";
-
/// Find the schema and columns by the table_name, only valid for memory tables.
/// Safety: the user MUST ensure the table schema exists, panic otherwise.
pub fn get_schema_columns(table_name: &str) -> (SchemaRef, Vec<VectorRef>) {
@@ -72,30 +70,27 @@ pub fn get_schema_columns(table_name: &str) -> (SchemaRef, Vec<VectorRef>) {
],
),
- BUILD_INFO => (
- string_columns(&[
- "GIT_BRANCH",
- "GIT_COMMIT",
- "GIT_COMMIT_SHORT",
- "GIT_DIRTY",
- "PKG_VERSION",
- ]),
- vec![
- Arc::new(StringVector::from(vec![
- build_data::get_git_branch().unwrap_or_else(|_| UNKNOWN.to_string())
- ])),
- Arc::new(StringVector::from(vec![
- build_data::get_git_commit().unwrap_or_else(|_| UNKNOWN.to_string())
- ])),
- Arc::new(StringVector::from(vec![
- build_data::get_git_commit_short().unwrap_or_else(|_| UNKNOWN.to_string())
- ])),
- Arc::new(StringVector::from(vec![
- build_data::get_git_dirty().map_or(UNKNOWN.to_string(), |v| v.to_string())
- ])),
- Arc::new(StringVector::from(vec![option_env!("CARGO_PKG_VERSION")])),
- ],
- ),
+ BUILD_INFO => {
+ let build_info = common_version::build_info();
+ (
+ string_columns(&[
+ "GIT_BRANCH",
+ "GIT_COMMIT",
+ "GIT_COMMIT_SHORT",
+ "GIT_DIRTY",
+ "PKG_VERSION",
+ ]),
+ vec![
+ Arc::new(StringVector::from(vec![build_info.branch.to_string()])),
+ Arc::new(StringVector::from(vec![build_info.commit.to_string()])),
+ Arc::new(StringVector::from(vec![build_info
+ .commit_short
+ .to_string()])),
+ Arc::new(StringVector::from(vec![build_info.dirty.to_string()])),
+ Arc::new(StringVector::from(vec![build_info.version.to_string()])),
+ ],
+ )
+ }
CHARACTER_SETS => (
vec![
diff --git a/src/client/Cargo.toml b/src/client/Cargo.toml
index d9f7feb945fa..c8c827661fab 100644
--- a/src/client/Cargo.toml
+++ b/src/client/Cargo.toml
@@ -36,8 +36,8 @@ prost.workspace = true
rand.workspace = true
session.workspace = true
snafu.workspace = true
-tokio-stream = { workspace = true, features = ["net"] }
tokio.workspace = true
+tokio-stream = { workspace = true, features = ["net"] }
tonic.workspace = true
[dev-dependencies]
diff --git a/src/cmd/build.rs b/src/cmd/build.rs
index 85d72f6df218..5b7f1458843c 100644
--- a/src/cmd/build.rs
+++ b/src/cmd/build.rs
@@ -13,5 +13,5 @@
// limitations under the License.
fn main() {
- common_version::setup_git_versions();
+ common_version::setup_build_info();
}
diff --git a/src/common/datasource/Cargo.toml b/src/common/datasource/Cargo.toml
index 193fc108da74..1e456b40c860 100644
--- a/src/common/datasource/Cargo.toml
+++ b/src/common/datasource/Cargo.toml
@@ -5,8 +5,8 @@ edition.workspace = true
license.workspace = true
[dependencies]
-arrow-schema.workspace = true
arrow.workspace = true
+arrow-schema.workspace = true
async-compression = { version = "0.3", features = [
"bzip2",
"gzip",
@@ -34,8 +34,8 @@ regex = "1.7"
serde.workspace = true
snafu.workspace = true
strum.workspace = true
-tokio-util.workspace = true
tokio.workspace = true
+tokio-util.workspace = true
url = "2.3"
[dev-dependencies]
diff --git a/src/common/function/Cargo.toml b/src/common/function/Cargo.toml
index f3161a5a55fe..7053d10771ad 100644
--- a/src/common/function/Cargo.toml
+++ b/src/common/function/Cargo.toml
@@ -8,7 +8,6 @@ license.workspace = true
api.workspace = true
arc-swap = "1.0"
async-trait.workspace = true
-build-data = "0.1"
chrono-tz = "0.6"
common-error.workspace = true
common-macro.workspace = true
@@ -16,6 +15,7 @@ common-query.workspace = true
common-runtime.workspace = true
common-telemetry.workspace = true
common-time.workspace = true
+common-version.workspace = true
datafusion.workspace = true
datatypes.workspace = true
libc = "0.2"
diff --git a/src/common/function/src/system/build.rs b/src/common/function/src/system/build.rs
index 43433ce425ce..ce9e77fdfb7c 100644
--- a/src/common/function/src/system/build.rs
+++ b/src/common/function/src/system/build.rs
@@ -22,8 +22,6 @@ use datatypes::vectors::{StringVector, VectorRef};
use crate::function::{Function, FunctionContext};
-const DEFAULT_VALUE: &str = "unknown";
-
/// Generates build information
#[derive(Clone, Debug, Default)]
pub struct BuildFunction;
@@ -52,15 +50,7 @@ impl Function for BuildFunction {
}
fn eval(&self, _func_ctx: FunctionContext, _columns: &[VectorRef]) -> Result<VectorRef> {
- let build_info = format!(
- "branch: {}\ncommit: {}\ncommit short: {}\ndirty: {}\nversion: {}",
- build_data::get_git_branch().unwrap_or_else(|_| DEFAULT_VALUE.to_string()),
- build_data::get_git_commit().unwrap_or_else(|_| DEFAULT_VALUE.to_string()),
- build_data::get_git_commit_short().unwrap_or_else(|_| DEFAULT_VALUE.to_string()),
- build_data::get_git_dirty().map_or(DEFAULT_VALUE.to_string(), |v| v.to_string()),
- env!("CARGO_PKG_VERSION")
- );
-
+ let build_info = common_version::build_info().to_string();
let v = Arc::new(StringVector::from(vec![build_info]));
Ok(v)
}
@@ -87,14 +77,7 @@ mod tests {
volatility: Volatility::Immutable
} if valid_types == vec![ConcreteDataType::string_datatype()]
));
- let build_info = format!(
- "branch: {}\ncommit: {}\ncommit short: {}\ndirty: {}\nversion: {}",
- build_data::get_git_branch().unwrap_or_else(|_| DEFAULT_VALUE.to_string()),
- build_data::get_git_commit().unwrap_or_else(|_| DEFAULT_VALUE.to_string()),
- build_data::get_git_commit_short().unwrap_or_else(|_| DEFAULT_VALUE.to_string()),
- build_data::get_git_dirty().map_or(DEFAULT_VALUE.to_string(), |v| v.to_string()),
- env!("CARGO_PKG_VERSION")
- );
+ let build_info = common_version::build_info().to_string();
let vector = build.eval(FunctionContext::default(), &[]).unwrap();
let expect: VectorRef = Arc::new(StringVector::from(vec![build_info]));
assert_eq!(expect, vector);
diff --git a/src/common/greptimedb-telemetry/build.rs b/src/common/greptimedb-telemetry/build.rs
index 85d72f6df218..5b7f1458843c 100644
--- a/src/common/greptimedb-telemetry/build.rs
+++ b/src/common/greptimedb-telemetry/build.rs
@@ -13,5 +13,5 @@
// limitations under the License.
fn main() {
- common_version::setup_git_versions();
+ common_version::setup_build_info();
}
diff --git a/src/common/meta/Cargo.toml b/src/common/meta/Cargo.toml
index 18c8a6e5d555..68983d784336 100644
--- a/src/common/meta/Cargo.toml
+++ b/src/common/meta/Cargo.toml
@@ -28,8 +28,8 @@ common-wal.workspace = true
datatypes.workspace = true
derive_builder.workspace = true
etcd-client.workspace = true
-futures-util.workspace = true
futures.workspace = true
+futures-util.workspace = true
hex = { version = "0.4" }
humantime-serde.workspace = true
lazy_static.workspace = true
diff --git a/src/common/query/Cargo.toml b/src/common/query/Cargo.toml
index 3dabbb84f1f3..0232722499ac 100644
--- a/src/common/query/Cargo.toml
+++ b/src/common/query/Cargo.toml
@@ -11,9 +11,9 @@ common-error.workspace = true
common-macro.workspace = true
common-recordbatch.workspace = true
common-time.workspace = true
+datafusion.workspace = true
datafusion-common.workspace = true
datafusion-expr.workspace = true
-datafusion.workspace = true
datatypes.workspace = true
serde.workspace = true
snafu.workspace = true
diff --git a/src/common/recordbatch/Cargo.toml b/src/common/recordbatch/Cargo.toml
index c90153b65ce5..ac91e934cc7d 100644
--- a/src/common/recordbatch/Cargo.toml
+++ b/src/common/recordbatch/Cargo.toml
@@ -9,8 +9,8 @@ arc-swap = "1.6"
common-base.workspace = true
common-error.workspace = true
common-macro.workspace = true
-datafusion-common.workspace = true
datafusion.workspace = true
+datafusion-common.workspace = true
datatypes.workspace = true
futures.workspace = true
paste = "1.0"
diff --git a/src/common/runtime/Cargo.toml b/src/common/runtime/Cargo.toml
index 629da4f4e0d4..76dc0aa9ffa0 100644
--- a/src/common/runtime/Cargo.toml
+++ b/src/common/runtime/Cargo.toml
@@ -14,10 +14,10 @@ once_cell.workspace = true
paste.workspace = true
prometheus.workspace = true
snafu.workspace = true
+tokio.workspace = true
tokio-metrics = "0.3"
tokio-metrics-collector = "0.2"
tokio-util.workspace = true
-tokio.workspace = true
[dev-dependencies]
tokio-test = "0.4"
diff --git a/src/common/substrait/Cargo.toml b/src/common/substrait/Cargo.toml
index 9ae36600677c..891cba1d833f 100644
--- a/src/common/substrait/Cargo.toml
+++ b/src/common/substrait/Cargo.toml
@@ -13,10 +13,10 @@ common-catalog.workspace = true
common-error.workspace = true
common-macro.workspace = true
common-telemetry.workspace = true
+datafusion.workspace = true
datafusion-common.workspace = true
datafusion-expr.workspace = true
datafusion-substrait.workspace = true
-datafusion.workspace = true
datatypes.workspace = true
futures = "0.3"
promql.workspace = true
diff --git a/src/common/time/Cargo.toml b/src/common/time/Cargo.toml
index 04976ebadd45..032520ffee18 100644
--- a/src/common/time/Cargo.toml
+++ b/src/common/time/Cargo.toml
@@ -6,8 +6,8 @@ license.workspace = true
[dependencies]
arrow.workspace = true
-chrono-tz = "0.8"
chrono.workspace = true
+chrono-tz = "0.8"
common-error.workspace = true
common-macro.workspace = true
once_cell.workspace = true
diff --git a/src/common/version/src/lib.rs b/src/common/version/src/lib.rs
index 28ceaf8129de..b3cb124ffcb5 100644
--- a/src/common/version/src/lib.rs
+++ b/src/common/version/src/lib.rs
@@ -12,24 +12,89 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-const DEFAULT_VALUE: &str = "unknown";
+use std::borrow::Cow;
+use std::fmt::Display;
+use std::sync::OnceLock;
+
+const UNKNOWN: &str = "unknown";
+
+pub struct BuildInfo {
+ pub branch: Cow<'static, str>,
+ pub commit: Cow<'static, str>,
+ pub commit_short: Cow<'static, str>,
+ pub dirty: Cow<'static, str>,
+ pub timestamp: Cow<'static, str>,
+
+ /// Rustc Version
+ pub rustc: Cow<'static, str>,
+ /// GreptimeDB Version
+ pub version: Cow<'static, str>,
+}
+
+impl Display for BuildInfo {
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ write!(
+ f,
+ "{}",
+ [
+ format!("branch: {}", self.branch),
+ format!("commit: {}", self.commit),
+ format!("commit_short: {}", self.commit_short),
+ format!("dirty: {}", self.dirty),
+ format!("version: {}", self.version),
+ ]
+ .join("\n")
+ )
+ }
+}
+
+static BUILD: OnceLock<BuildInfo> = OnceLock::new();
+
+pub fn build_info() -> &'static BuildInfo {
+ BUILD.get_or_init(|| {
+ let branch = build_data::get_git_branch()
+ .map(Cow::Owned)
+ .unwrap_or(Cow::Borrowed(UNKNOWN));
+ let commit = build_data::get_git_commit()
+ .map(Cow::Owned)
+ .unwrap_or(Cow::Borrowed(UNKNOWN));
+ let commit_short = build_data::get_git_commit_short()
+ .map(Cow::Owned)
+ .unwrap_or(Cow::Borrowed(UNKNOWN));
+ let dirty = build_data::get_git_dirty()
+ .map(|b| Cow::Owned(b.to_string()))
+ .unwrap_or(Cow::Borrowed(UNKNOWN));
+ let timestamp = build_data::get_source_time()
+ .map(|ts| Cow::Owned(build_data::format_timestamp(ts)))
+ .unwrap_or(Cow::Borrowed(UNKNOWN));
+ let rustc = build_data::get_rustc_version()
+ .map(Cow::Owned)
+ .unwrap_or(Cow::Borrowed(UNKNOWN));
+ let version = Cow::Borrowed(env!("CARGO_PKG_VERSION"));
+
+ BuildInfo {
+ branch,
+ commit,
+ commit_short,
+ dirty,
+ timestamp,
+ rustc,
+ version,
+ }
+ })
+}
#[allow(clippy::print_stdout)]
-pub fn setup_git_versions() {
- println!(
- "cargo:rustc-env=GIT_COMMIT={}",
- build_data::get_git_commit().unwrap_or_else(|_| DEFAULT_VALUE.to_string())
- );
+pub fn setup_build_info() {
+ let build_info = build_info();
+ println!("cargo:rustc-env=GIT_COMMIT={}", build_info.commit);
println!(
"cargo:rustc-env=GIT_COMMIT_SHORT={}",
- build_data::get_git_commit_short().unwrap_or_else(|_| DEFAULT_VALUE.to_string())
- );
- println!(
- "cargo:rustc-env=GIT_BRANCH={}",
- build_data::get_git_branch().unwrap_or_else(|_| DEFAULT_VALUE.to_string())
- );
- println!(
- "cargo:rustc-env=GIT_DIRTY={}",
- build_data::get_git_dirty().map_or(DEFAULT_VALUE.to_string(), |v| v.to_string())
+ build_info.commit_short
);
+ println!("cargo:rustc-env=GIT_BRANCH={}", build_info.branch);
+ println!("cargo:rustc-env=GIT_DIRTY={}", build_info.dirty);
+ println!("cargo:rustc-env=GIT_DIRTY={}", build_info.dirty);
+ println!("cargo:rustc-env=RUSTC_VERSION={}", build_info.rustc);
+ println!("cargo:rustc-env=SOURCE_TIMESTAMP={}", build_info.timestamp);
}
diff --git a/src/datanode/Cargo.toml b/src/datanode/Cargo.toml
index 64c8d030640b..dc50c905f40a 100644
--- a/src/datanode/Cargo.toml
+++ b/src/datanode/Cargo.toml
@@ -23,8 +23,8 @@ common-datasource.workspace = true
common-error.workspace = true
common-function.workspace = true
common-greptimedb-telemetry.workspace = true
-common-grpc-expr.workspace = true
common-grpc.workspace = true
+common-grpc-expr.workspace = true
common-macro.workspace = true
common-meta.workspace = true
common-procedure.workspace = true
@@ -35,9 +35,9 @@ common-telemetry.workspace = true
common-time.workspace = true
common-wal.workspace = true
dashmap.workspace = true
+datafusion.workspace = true
datafusion-common.workspace = true
datafusion-expr.workspace = true
-datafusion.workspace = true
datatypes.workspace = true
file-engine.workspace = true
futures = "0.3"
@@ -65,8 +65,8 @@ sql.workspace = true
store-api.workspace = true
substrait.workspace = true
table.workspace = true
-tokio-stream = { workspace = true, features = ["net"] }
tokio.workspace = true
+tokio-stream = { workspace = true, features = ["net"] }
toml.workspace = true
tonic.workspace = true
tower = { version = "0.4", features = ["full"] }
diff --git a/src/datatypes/Cargo.toml b/src/datatypes/Cargo.toml
index c7674d9973ae..ce40c640d492 100644
--- a/src/datatypes/Cargo.toml
+++ b/src/datatypes/Cargo.toml
@@ -9,9 +9,9 @@ default = []
test = []
[dependencies]
+arrow.workspace = true
arrow-array.workspace = true
arrow-schema.workspace = true
-arrow.workspace = true
common-base.workspace = true
common-decimal.workspace = true
common-error.workspace = true
diff --git a/src/frontend/Cargo.toml b/src/frontend/Cargo.toml
index 6df16182511c..81990c37f192 100644
--- a/src/frontend/Cargo.toml
+++ b/src/frontend/Cargo.toml
@@ -26,8 +26,8 @@ common-config.workspace = true
common-datasource.workspace = true
common-error.workspace = true
common-function.workspace = true
-common-grpc-expr.workspace = true
common-grpc.workspace = true
+common-grpc-expr.workspace = true
common-macro.workspace = true
common-meta.workspace = true
common-procedure.workspace = true
@@ -36,9 +36,9 @@ common-recordbatch.workspace = true
common-runtime.workspace = true
common-telemetry.workspace = true
common-time.workspace = true
+datafusion.workspace = true
datafusion-common.workspace = true
datafusion-expr.workspace = true
-datafusion.workspace = true
datanode.workspace = true
datatypes.workspace = true
file-engine.workspace = true
diff --git a/src/index/Cargo.toml b/src/index/Cargo.toml
index 4c0dc82b0296..082831b7315c 100644
--- a/src/index/Cargo.toml
+++ b/src/index/Cargo.toml
@@ -19,12 +19,12 @@ greptime-proto.workspace = true
mockall.workspace = true
pin-project.workspace = true
prost.workspace = true
-regex-automata.workspace = true
regex.workspace = true
+regex-automata.workspace = true
snafu.workspace = true
[dev-dependencies]
rand.workspace = true
tempfile.workspace = true
-tokio-util.workspace = true
tokio.workspace = true
+tokio-util.workspace = true
diff --git a/src/log-store/Cargo.toml b/src/log-store/Cargo.toml
index 4f35fa19efb4..82d8c2a6ba01 100644
--- a/src/log-store/Cargo.toml
+++ b/src/log-store/Cargo.toml
@@ -25,8 +25,8 @@ common-telemetry.workspace = true
common-time.workspace = true
common-wal.workspace = true
dashmap.workspace = true
-futures-util.workspace = true
futures.workspace = true
+futures-util.workspace = true
protobuf = { version = "2", features = ["bytes"] }
raft-engine.workspace = true
rskafka.workspace = true
@@ -34,8 +34,8 @@ serde.workspace = true
serde_json.workspace = true
snafu.workspace = true
store-api.workspace = true
-tokio-util.workspace = true
tokio.workspace = true
+tokio-util.workspace = true
[dev-dependencies]
common-meta = { workspace = true, features = ["testing"] }
diff --git a/src/meta-client/Cargo.toml b/src/meta-client/Cargo.toml
index 56765db3337f..921fe610548f 100644
--- a/src/meta-client/Cargo.toml
+++ b/src/meta-client/Cargo.toml
@@ -20,8 +20,8 @@ serde.workspace = true
serde_json.workspace = true
snafu.workspace = true
table.workspace = true
-tokio-stream = { workspace = true, features = ["net"] }
tokio.workspace = true
+tokio-stream = { workspace = true, features = ["net"] }
tonic.workspace = true
[dev-dependencies]
diff --git a/src/meta-srv/Cargo.toml b/src/meta-srv/Cargo.toml
index dd1db0bf8b6f..1202514ac686 100644
--- a/src/meta-srv/Cargo.toml
+++ b/src/meta-srv/Cargo.toml
@@ -18,8 +18,8 @@ common-base.workspace = true
common-catalog.workspace = true
common-error.workspace = true
common-greptimedb-telemetry.workspace = true
-common-grpc-expr.workspace = true
common-grpc.workspace = true
+common-grpc-expr.workspace = true
common-macro.workspace = true
common-meta.workspace = true
common-procedure.workspace = true
@@ -51,8 +51,8 @@ snafu.workspace = true
store-api.workspace = true
strum.workspace = true
table.workspace = true
-tokio-stream = { workspace = true, features = ["net"] }
tokio.workspace = true
+tokio-stream = { workspace = true, features = ["net"] }
toml.workspace = true
tonic.workspace = true
tower = "0.4"
diff --git a/src/mito2/Cargo.toml b/src/mito2/Cargo.toml
index 92f1c63525a0..947208c500b0 100644
--- a/src/mito2/Cargo.toml
+++ b/src/mito2/Cargo.toml
@@ -34,9 +34,9 @@ common-test-util = { workspace = true, optional = true }
common-time.workspace = true
common-wal.workspace = true
dashmap.workspace = true
+datafusion.workspace = true
datafusion-common.workspace = true
datafusion-expr.workspace = true
-datafusion.workspace = true
datatypes.workspace = true
futures.workspace = true
humantime-serde.workspace = true
@@ -62,9 +62,9 @@ snafu.workspace = true
store-api.workspace = true
strum.workspace = true
table.workspace = true
+tokio.workspace = true
tokio-stream.workspace = true
tokio-util.workspace = true
-tokio.workspace = true
uuid.workspace = true
[dev-dependencies]
diff --git a/src/operator/Cargo.toml b/src/operator/Cargo.toml
index 91a410997db6..0543604528f9 100644
--- a/src/operator/Cargo.toml
+++ b/src/operator/Cargo.toml
@@ -27,9 +27,9 @@ common-recordbatch.workspace = true
common-runtime.workspace = true
common-telemetry.workspace = true
common-time.workspace = true
+datafusion.workspace = true
datafusion-common.workspace = true
datafusion-expr.workspace = true
-datafusion.workspace = true
datatypes.workspace = true
file-engine.workspace = true
futures = "0.3"
diff --git a/src/partition/Cargo.toml b/src/partition/Cargo.toml
index cf9ba0a16f2f..d6a747db66ba 100644
--- a/src/partition/Cargo.toml
+++ b/src/partition/Cargo.toml
@@ -13,9 +13,9 @@ common-macro.workspace = true
common-meta.workspace = true
common-query.workspace = true
common-telemetry.workspace = true
+datafusion.workspace = true
datafusion-common.workspace = true
datafusion-expr.workspace = true
-datafusion.workspace = true
datatypes.workspace = true
lazy_static.workspace = true
meta-client.workspace = true
diff --git a/src/puffin/Cargo.toml b/src/puffin/Cargo.toml
index 55001b6b07de..0a42084e390a 100644
--- a/src/puffin/Cargo.toml
+++ b/src/puffin/Cargo.toml
@@ -17,5 +17,5 @@ serde_json.workspace = true
snafu.workspace = true
[dev-dependencies]
-tokio-util.workspace = true
tokio.workspace = true
+tokio-util.workspace = true
diff --git a/src/query/Cargo.toml b/src/query/Cargo.toml
index 866eeaa8a905..a09eb7a9d2ac 100644
--- a/src/query/Cargo.toml
+++ b/src/query/Cargo.toml
@@ -8,8 +8,8 @@ license.workspace = true
ahash.workspace = true
api.workspace = true
arc-swap = "1.0"
-arrow-schema.workspace = true
arrow.workspace = true
+arrow-schema.workspace = true
async-recursion = "1.0"
async-stream.workspace = true
async-trait = "0.1"
@@ -27,12 +27,12 @@ common-query.workspace = true
common-recordbatch.workspace = true
common-telemetry.workspace = true
common-time.workspace = true
+datafusion.workspace = true
datafusion-common.workspace = true
datafusion-expr.workspace = true
datafusion-optimizer.workspace = true
datafusion-physical-expr.workspace = true
datafusion-sql.workspace = true
-datafusion.workspace = true
datatypes.workspace = true
futures = "0.3"
futures-util.workspace = true
@@ -45,8 +45,8 @@ object-store.workspace = true
once_cell.workspace = true
partition.workspace = true
prometheus.workspace = true
-promql-parser = "0.1.1"
promql.workspace = true
+promql-parser = "0.1.1"
regex.workspace = true
serde.workspace = true
serde_json.workspace = true
diff --git a/src/script/Cargo.toml b/src/script/Cargo.toml
index a443faf219ba..825e36df99fa 100644
--- a/src/script/Cargo.toml
+++ b/src/script/Cargo.toml
@@ -45,8 +45,8 @@ datafusion-common = { workspace = true, optional = true }
datafusion-expr = { workspace = true, optional = true }
datafusion-physical-expr = { workspace = true, optional = true }
datatypes.workspace = true
-futures-util.workspace = true
futures.workspace = true
+futures-util.workspace = true
lazy_static.workspace = true
once_cell.workspace = true
paste = { workspace = true, optional = true }
diff --git a/src/servers/Cargo.toml b/src/servers/Cargo.toml
index 1b4fb8db50f5..9a5ded127453 100644
--- a/src/servers/Cargo.toml
+++ b/src/servers/Cargo.toml
@@ -13,14 +13,14 @@ testing = []
[dependencies]
aide = { version = "0.9", features = ["axum"] }
api.workspace = true
+arrow.workspace = true
arrow-flight.workspace = true
arrow-ipc.workspace = true
arrow-schema.workspace = true
-arrow.workspace = true
async-trait = "0.1"
auth.workspace = true
-axum-macros = "0.3.8"
axum.workspace = true
+axum-macros = "0.3.8"
base64.workspace = true
bytes.workspace = true
catalog.workspace = true
@@ -28,8 +28,8 @@ chrono.workspace = true
common-base.workspace = true
common-catalog.workspace = true
common-error.workspace = true
-common-grpc-expr.workspace = true
common-grpc.workspace = true
+common-grpc-expr.workspace = true
common-macro.workspace = true
common-mem-prof = { workspace = true, optional = true }
common-meta.workspace = true
@@ -38,9 +38,9 @@ common-recordbatch.workspace = true
common-runtime.workspace = true
common-telemetry.workspace = true
common-time.workspace = true
+datafusion.workspace = true
datafusion-common.workspace = true
datafusion-expr.workspace = true
-datafusion.workspace = true
datatypes.workspace = true
derive_builder.workspace = true
digest = "0.10"
@@ -91,11 +91,11 @@ snap = "1"
sql.workspace = true
strum.workspace = true
table.workspace = true
+tokio.workspace = true
tokio-rustls = "0.25"
tokio-stream = { workspace = true, features = ["net"] }
-tokio.workspace = true
-tonic-reflection = "0.10"
tonic.workspace = true
+tonic-reflection = "0.10"
tower = { version = "0.4", features = ["full"] }
tower-http = { version = "0.3", features = ["full"] }
urlencoding = "2.1"
@@ -123,4 +123,4 @@ tokio-postgres-rustls = "0.11"
tokio-test = "0.4"
[build-dependencies]
-build-data = "0.1.4"
+common-version.workspace = true
diff --git a/src/servers/build.rs b/src/servers/build.rs
index d6db46b5ae52..0b735a6800fb 100644
--- a/src/servers/build.rs
+++ b/src/servers/build.rs
@@ -13,11 +13,7 @@
// limitations under the License.
fn main() {
- build_data::set_RUSTC_VERSION();
- build_data::set_GIT_BRANCH();
- build_data::set_GIT_COMMIT();
- build_data::set_SOURCE_TIMESTAMP();
-
+ common_version::setup_build_info();
#[cfg(feature = "dashboard")]
fetch_dashboard_assets();
}
diff --git a/src/table/Cargo.toml b/src/table/Cargo.toml
index 467bad1b1354..2fe1fb85bd2e 100644
--- a/src/table/Cargo.toml
+++ b/src/table/Cargo.toml
@@ -21,10 +21,10 @@ common-query.workspace = true
common-recordbatch.workspace = true
common-telemetry.workspace = true
common-time.workspace = true
+datafusion.workspace = true
datafusion-common.workspace = true
datafusion-expr.workspace = true
datafusion-physical-expr.workspace = true
-datafusion.workspace = true
datatypes.workspace = true
derive_builder.workspace = true
futures.workspace = true
diff --git a/tests-integration/Cargo.toml b/tests-integration/Cargo.toml
index e94bf487f4ed..2ac802909fe6 100644
--- a/tests-integration/Cargo.toml
+++ b/tests-integration/Cargo.toml
@@ -12,8 +12,8 @@ api.workspace = true
arrow-flight.workspace = true
async-trait = "0.1"
auth.workspace = true
-axum-test-helper = { git = "https://github.com/sunng87/axum-test-helper.git", branch = "patch-1" }
axum.workspace = true
+axum-test-helper = { git = "https://github.com/sunng87/axum-test-helper.git", branch = "patch-1" }
catalog.workspace = true
chrono.workspace = true
client = { workspace = true, features = ["testing"] }
@@ -71,8 +71,8 @@ tower = "0.4"
uuid.workspace = true
[dev-dependencies]
-datafusion-expr.workspace = true
datafusion.workspace = true
+datafusion-expr.workspace = true
itertools.workspace = true
opentelemetry-proto.workspace = true
partition.workspace = true
|
build
|
support build without git (#3309)
|
c93b5743e837119513719786f4230125d513d799
|
2023-05-09 18:26:20
|
ZonaHe
|
feat: update dashboard to v0.2.4 (#1553)
| false
|
diff --git a/src/servers/dashboard/VERSION b/src/servers/dashboard/VERSION
index 576b77719ade..f82e0685d918 100644
--- a/src/servers/dashboard/VERSION
+++ b/src/servers/dashboard/VERSION
@@ -1 +1 @@
-v0.2.3
+v0.2.4
|
feat
|
update dashboard to v0.2.4 (#1553)
|
3e8ec8b73a96e3cb91ec5293c6465e2441dd973e
|
2023-04-11 14:28:18
|
Lei, HUANG
|
fix: avoid panic when no region found in table (#1359)
| false
|
diff --git a/src/mito/src/table.rs b/src/mito/src/table.rs
index 5b2e80fbd14d..8ba3963ea0ac 100644
--- a/src/mito/src/table.rs
+++ b/src/mito/src/table.rs
@@ -39,7 +39,9 @@ use store_api::storage::{
RegionMeta, RegionNumber, ScanRequest, SchemaRef, Snapshot, WriteContext, WriteRequest,
};
use table::error as table_error;
-use table::error::{RegionSchemaMismatchSnafu, Result as TableResult, TableOperationSnafu};
+use table::error::{
+ InvalidTableSnafu, RegionSchemaMismatchSnafu, Result as TableResult, TableOperationSnafu,
+};
use table::metadata::{
FilterPushDownType, RawTableInfo, TableInfo, TableInfoRef, TableMeta, TableType,
};
@@ -193,7 +195,10 @@ impl<R: Region> Table for MitoTable<R> {
// TODO(hl): we assume table contains at least one region, but with region migration this
// assumption may become invalid.
- let stream_schema = first_schema.unwrap();
+ let stream_schema = first_schema.context(InvalidTableSnafu {
+ table_id: table_info.ident.table_id,
+ })?;
+
let schema = stream_schema.clone();
let stream = Box::pin(async_stream::try_stream! {
for mut reader in readers {
diff --git a/src/table/src/error.rs b/src/table/src/error.rs
index d9ac4d13d1ac..916f77b5bf68 100644
--- a/src/table/src/error.rs
+++ b/src/table/src/error.rs
@@ -20,6 +20,8 @@ use datafusion::error::DataFusionError;
use datatypes::arrow::error::ArrowError;
use snafu::Location;
+use crate::metadata::TableId;
+
pub type Result<T> = std::result::Result<T, Error>;
/// Default error implementation of table.
@@ -106,6 +108,7 @@ pub enum Error {
column_name: String,
location: Location,
},
+
#[snafu(display("Regions schemas mismatch in table: {}", table))]
RegionSchemaMismatch { table: String, location: Location },
@@ -121,6 +124,12 @@ pub enum Error {
value: String,
location: Location,
},
+
+ #[snafu(display("Invalid table state: {}", table_id))]
+ InvalidTable {
+ table_id: TableId,
+ location: Location,
+ },
}
impl ErrorExt for Error {
@@ -143,6 +152,7 @@ impl ErrorExt for Error {
Error::ParseTableOption { .. }
| Error::EngineNotFound { .. }
| Error::EngineExist { .. } => StatusCode::InvalidArguments,
+ Error::InvalidTable { .. } => StatusCode::Internal,
}
}
|
fix
|
avoid panic when no region found in table (#1359)
|
b1f7ad097a91b1080220368f0b26ce7d4029002e
|
2023-04-27 17:55:20
|
Yingwen
|
test: Fix s3 region in test (#1493)
| false
|
diff --git a/.env.example b/.env.example
index 2f842b2f7659..4abec140f648 100644
--- a/.env.example
+++ b/.env.example
@@ -3,6 +3,7 @@ GT_S3_BUCKET=S3 bucket
GT_S3_ACCESS_KEY_ID=S3 access key id
GT_S3_ACCESS_KEY=S3 secret access key
GT_S3_ENDPOINT_URL=S3 endpoint url
+GT_S3_REGION=S3 region
# Settings for oss test
GT_OSS_BUCKET=OSS bucket
GT_OSS_ACCESS_KEY_ID=OSS access key id
diff --git a/src/storage/src/region/tests/compact.rs b/src/storage/src/region/tests/compact.rs
index 22ab357e38d8..b6e5c7c7d89f 100644
--- a/src/storage/src/region/tests/compact.rs
+++ b/src/storage/src/region/tests/compact.rs
@@ -51,6 +51,7 @@ fn new_object_store(store_dir: &str, s3_bucket: Option<String>) -> ObjectStore {
.root(&root)
.access_key_id(&env::var("GT_S3_ACCESS_KEY_ID").unwrap())
.secret_access_key(&env::var("GT_S3_ACCESS_KEY").unwrap())
+ .region(&env::var("GT_S3_REGION").unwrap())
.bucket(&bucket);
return ObjectStore::new(builder).unwrap().finish();
diff --git a/tests-integration/src/test_util.rs b/tests-integration/src/test_util.rs
index 78babbf34daf..971480a8ed27 100644
--- a/tests-integration/src/test_util.rs
+++ b/tests-integration/src/test_util.rs
@@ -151,6 +151,13 @@ fn get_test_store_config(
.secret_access_key(&s3_config.secret_access_key)
.bucket(&s3_config.bucket);
+ if s3_config.endpoint.is_some() {
+ builder.endpoint(s3_config.endpoint.as_ref().unwrap());
+ }
+ if s3_config.region.is_some() {
+ builder.region(s3_config.region.as_ref().unwrap());
+ }
+
let config = ObjectStoreConfig::S3(s3_config);
let store = ObjectStore::new(builder).unwrap().finish();
|
test
|
Fix s3 region in test (#1493)
|
3217b56cc10eeb7f334ee08ecb72b0a0557388f5
|
2023-10-17 13:03:41
|
zyy17
|
ci: release new version '0.4.0' -> '0.4.1' (#2611)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index 95a0d66cc619..e9b0ffd779b7 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -204,7 +204,7 @@ checksum = "8f1f8f5a6f3d50d89e3797d7593a50f96bb2aaa20ca0cc7be1fb673232c91d72"
[[package]]
name = "api"
-version = "0.4.0"
+version = "0.4.1"
dependencies = [
"common-base",
"common-error",
@@ -666,7 +666,7 @@ dependencies = [
[[package]]
name = "auth"
-version = "0.4.0"
+version = "0.4.1"
dependencies = [
"api",
"async-trait",
@@ -839,7 +839,7 @@ dependencies = [
[[package]]
name = "benchmarks"
-version = "0.4.0"
+version = "0.4.1"
dependencies = [
"arrow",
"chrono",
@@ -1222,7 +1222,7 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5"
[[package]]
name = "catalog"
-version = "0.4.0"
+version = "0.4.1"
dependencies = [
"api",
"arc-swap",
@@ -1506,7 +1506,7 @@ checksum = "cd7cc57abe963c6d3b9d8be5b06ba7c8957a930305ca90304f24ef040aa6f961"
[[package]]
name = "client"
-version = "0.4.0"
+version = "0.4.1"
dependencies = [
"api",
"arrow-flight",
@@ -1536,7 +1536,7 @@ dependencies = [
"rand",
"session",
"snafu",
- "substrait 0.4.0",
+ "substrait 0.4.1",
"substrait 0.7.5",
"tokio",
"tokio-stream",
@@ -1573,7 +1573,7 @@ dependencies = [
[[package]]
name = "cmd"
-version = "0.4.0"
+version = "0.4.1"
dependencies = [
"anymap",
"async-trait",
@@ -1621,7 +1621,7 @@ dependencies = [
"servers",
"session",
"snafu",
- "substrait 0.4.0",
+ "substrait 0.4.1",
"table",
"temp-env",
"tikv-jemallocator",
@@ -1654,7 +1654,7 @@ checksum = "55b672471b4e9f9e95499ea597ff64941a309b2cdbffcc46f2cc5e2d971fd335"
[[package]]
name = "common-base"
-version = "0.4.0"
+version = "0.4.1"
dependencies = [
"anymap",
"bitvec",
@@ -1669,7 +1669,7 @@ dependencies = [
[[package]]
name = "common-catalog"
-version = "0.4.0"
+version = "0.4.1"
dependencies = [
"chrono",
"common-error",
@@ -1682,7 +1682,7 @@ dependencies = [
[[package]]
name = "common-config"
-version = "0.4.0"
+version = "0.4.1"
dependencies = [
"common-base",
"humantime-serde",
@@ -1691,7 +1691,7 @@ dependencies = [
[[package]]
name = "common-datasource"
-version = "0.4.0"
+version = "0.4.1"
dependencies = [
"arrow",
"arrow-schema",
@@ -1720,7 +1720,7 @@ dependencies = [
[[package]]
name = "common-error"
-version = "0.4.0"
+version = "0.4.1"
dependencies = [
"snafu",
"strum 0.25.0",
@@ -1728,7 +1728,7 @@ dependencies = [
[[package]]
name = "common-function"
-version = "0.4.0"
+version = "0.4.1"
dependencies = [
"arc-swap",
"chrono-tz 0.6.3",
@@ -1751,7 +1751,7 @@ dependencies = [
[[package]]
name = "common-greptimedb-telemetry"
-version = "0.4.0"
+version = "0.4.1"
dependencies = [
"async-trait",
"common-error",
@@ -1770,7 +1770,7 @@ dependencies = [
[[package]]
name = "common-grpc"
-version = "0.4.0"
+version = "0.4.1"
dependencies = [
"api",
"arrow-flight",
@@ -1800,7 +1800,7 @@ dependencies = [
[[package]]
name = "common-grpc-expr"
-version = "0.4.0"
+version = "0.4.1"
dependencies = [
"api",
"async-trait",
@@ -1819,7 +1819,7 @@ dependencies = [
[[package]]
name = "common-macro"
-version = "0.4.0"
+version = "0.4.1"
dependencies = [
"arc-swap",
"backtrace",
@@ -1836,7 +1836,7 @@ dependencies = [
[[package]]
name = "common-mem-prof"
-version = "0.4.0"
+version = "0.4.1"
dependencies = [
"common-error",
"common-macro",
@@ -1849,7 +1849,7 @@ dependencies = [
[[package]]
name = "common-meta"
-version = "0.4.0"
+version = "0.4.1"
dependencies = [
"api",
"arrow-flight",
@@ -1887,7 +1887,7 @@ dependencies = [
[[package]]
name = "common-procedure"
-version = "0.4.0"
+version = "0.4.1"
dependencies = [
"async-stream",
"async-trait",
@@ -1911,7 +1911,7 @@ dependencies = [
[[package]]
name = "common-procedure-test"
-version = "0.4.0"
+version = "0.4.1"
dependencies = [
"async-trait",
"common-procedure",
@@ -1919,7 +1919,7 @@ dependencies = [
[[package]]
name = "common-query"
-version = "0.4.0"
+version = "0.4.1"
dependencies = [
"api",
"async-trait",
@@ -1942,7 +1942,7 @@ dependencies = [
[[package]]
name = "common-recordbatch"
-version = "0.4.0"
+version = "0.4.1"
dependencies = [
"common-error",
"common-macro",
@@ -1959,7 +1959,7 @@ dependencies = [
[[package]]
name = "common-runtime"
-version = "0.4.0"
+version = "0.4.1"
dependencies = [
"async-trait",
"common-error",
@@ -1976,7 +1976,7 @@ dependencies = [
[[package]]
name = "common-telemetry"
-version = "0.4.0"
+version = "0.4.1"
dependencies = [
"backtrace",
"common-error",
@@ -2003,7 +2003,7 @@ dependencies = [
[[package]]
name = "common-test-util"
-version = "0.4.0"
+version = "0.4.1"
dependencies = [
"once_cell",
"rand",
@@ -2012,7 +2012,7 @@ dependencies = [
[[package]]
name = "common-time"
-version = "0.4.0"
+version = "0.4.1"
dependencies = [
"arrow",
"chrono",
@@ -2027,7 +2027,7 @@ dependencies = [
[[package]]
name = "common-version"
-version = "0.4.0"
+version = "0.4.1"
dependencies = [
"build-data",
]
@@ -2665,7 +2665,7 @@ dependencies = [
[[package]]
name = "datanode"
-version = "0.4.0"
+version = "0.4.1"
dependencies = [
"api",
"arrow-flight",
@@ -2724,7 +2724,7 @@ dependencies = [
"sql",
"storage",
"store-api",
- "substrait 0.4.0",
+ "substrait 0.4.1",
"table",
"tokio",
"tokio-stream",
@@ -2738,7 +2738,7 @@ dependencies = [
[[package]]
name = "datatypes"
-version = "0.4.0"
+version = "0.4.1"
dependencies = [
"arrow",
"arrow-array",
@@ -3201,7 +3201,7 @@ dependencies = [
[[package]]
name = "file-engine"
-version = "0.4.0"
+version = "0.4.1"
dependencies = [
"api",
"async-trait",
@@ -3311,7 +3311,7 @@ dependencies = [
[[package]]
name = "frontend"
-version = "0.4.0"
+version = "0.4.1"
dependencies = [
"api",
"arc-swap",
@@ -3375,7 +3375,7 @@ dependencies = [
"storage",
"store-api",
"strfmt",
- "substrait 0.4.0",
+ "substrait 0.4.1",
"table",
"tokio",
"toml 0.7.6",
@@ -5006,7 +5006,7 @@ checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f"
[[package]]
name = "log-store"
-version = "0.4.0"
+version = "0.4.1"
dependencies = [
"async-stream",
"async-trait",
@@ -5276,7 +5276,7 @@ dependencies = [
[[package]]
name = "meta-client"
-version = "0.4.0"
+version = "0.4.1"
dependencies = [
"api",
"async-trait",
@@ -5306,7 +5306,7 @@ dependencies = [
[[package]]
name = "meta-srv"
-version = "0.4.0"
+version = "0.4.1"
dependencies = [
"anymap",
"api",
@@ -5498,7 +5498,7 @@ dependencies = [
[[package]]
name = "mito2"
-version = "0.4.0"
+version = "0.4.1"
dependencies = [
"anymap",
"api",
@@ -5960,7 +5960,7 @@ dependencies = [
[[package]]
name = "object-store"
-version = "0.4.0"
+version = "0.4.1"
dependencies = [
"anyhow",
"async-trait",
@@ -6184,7 +6184,7 @@ dependencies = [
[[package]]
name = "operator"
-version = "0.4.0"
+version = "0.4.1"
dependencies = [
"api",
"async-compat",
@@ -6229,7 +6229,7 @@ dependencies = [
"sqlparser 0.34.0",
"storage",
"store-api",
- "substrait 0.4.0",
+ "substrait 0.4.1",
"table",
"tokio",
"tonic 0.9.2",
@@ -6449,7 +6449,7 @@ dependencies = [
[[package]]
name = "partition"
-version = "0.4.0"
+version = "0.4.1"
dependencies = [
"api",
"async-trait",
@@ -6775,7 +6775,7 @@ dependencies = [
[[package]]
name = "plugins"
-version = "0.4.0"
+version = "0.4.1"
dependencies = [
"auth",
"common-base",
@@ -7025,7 +7025,7 @@ dependencies = [
[[package]]
name = "promql"
-version = "0.4.0"
+version = "0.4.1"
dependencies = [
"async-recursion",
"async-trait",
@@ -7287,7 +7287,7 @@ dependencies = [
[[package]]
name = "query"
-version = "0.4.0"
+version = "0.4.1"
dependencies = [
"ahash 0.8.3",
"api",
@@ -7344,7 +7344,7 @@ dependencies = [
"stats-cli",
"store-api",
"streaming-stats",
- "substrait 0.4.0",
+ "substrait 0.4.1",
"table",
"tokio",
"tokio-stream",
@@ -8543,7 +8543,7 @@ checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49"
[[package]]
name = "script"
-version = "0.4.0"
+version = "0.4.1"
dependencies = [
"api",
"arc-swap",
@@ -8823,7 +8823,7 @@ dependencies = [
[[package]]
name = "servers"
-version = "0.4.0"
+version = "0.4.1"
dependencies = [
"aide",
"api",
@@ -8917,7 +8917,7 @@ dependencies = [
[[package]]
name = "session"
-version = "0.4.0"
+version = "0.4.1"
dependencies = [
"api",
"arc-swap",
@@ -9195,7 +9195,7 @@ dependencies = [
[[package]]
name = "sql"
-version = "0.4.0"
+version = "0.4.1"
dependencies = [
"api",
"common-base",
@@ -9246,7 +9246,7 @@ dependencies = [
[[package]]
name = "sqlness-runner"
-version = "0.4.0"
+version = "0.4.1"
dependencies = [
"async-trait",
"clap 4.4.1",
@@ -9452,7 +9452,7 @@ dependencies = [
[[package]]
name = "storage"
-version = "0.4.0"
+version = "0.4.1"
dependencies = [
"api",
"arc-swap",
@@ -9506,7 +9506,7 @@ dependencies = [
[[package]]
name = "store-api"
-version = "0.4.0"
+version = "0.4.1"
dependencies = [
"api",
"aquamarine",
@@ -9644,7 +9644,7 @@ dependencies = [
[[package]]
name = "substrait"
-version = "0.4.0"
+version = "0.4.1"
dependencies = [
"async-recursion",
"async-trait",
@@ -9802,7 +9802,7 @@ dependencies = [
[[package]]
name = "table"
-version = "0.4.0"
+version = "0.4.1"
dependencies = [
"anymap",
"async-trait",
@@ -9908,7 +9908,7 @@ dependencies = [
[[package]]
name = "tests-integration"
-version = "0.4.0"
+version = "0.4.1"
dependencies = [
"api",
"async-trait",
@@ -9961,7 +9961,7 @@ dependencies = [
"sql",
"sqlx",
"store-api",
- "substrait 0.4.0",
+ "substrait 0.4.1",
"table",
"tempfile",
"tokio",
diff --git a/Cargo.toml b/Cargo.toml
index b55061715bd2..cfb8eb0b06e5 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -55,7 +55,7 @@ members = [
resolver = "2"
[workspace.package]
-version = "0.4.0"
+version = "0.4.1"
edition = "2021"
license = "Apache-2.0"
|
ci
|
release new version '0.4.0' -> '0.4.1' (#2611)
|
567f5105bfbe01e71ce6c550346d941a33d84c26
|
2024-07-30 10:25:19
|
shuiyisong
|
fix: missing `pre_write` check on prometheus remote write (#4460)
| false
|
diff --git a/src/frontend/src/instance/prom_store.rs b/src/frontend/src/instance/prom_store.rs
index 6bfd53c41423..20f66ae853fa 100644
--- a/src/frontend/src/instance/prom_store.rs
+++ b/src/frontend/src/instance/prom_store.rs
@@ -170,6 +170,10 @@ impl PromStoreProtocolHandler for Instance {
.as_ref()
.check_permission(ctx.current_user(), PermissionReq::PromStoreWrite)
.context(AuthSnafu)?;
+ let interceptor_ref = self
+ .plugins
+ .get::<PromStoreProtocolInterceptorRef<servers::error::Error>>();
+ interceptor_ref.pre_write(&request, ctx.clone())?;
let output = if with_metric_engine {
let physical_table = ctx
diff --git a/src/servers/src/interceptor.rs b/src/servers/src/interceptor.rs
index 941a424be4f7..e4aceeb4422d 100644
--- a/src/servers/src/interceptor.rs
+++ b/src/servers/src/interceptor.rs
@@ -15,7 +15,7 @@
use std::borrow::Cow;
use std::sync::Arc;
-use api::prom_store::remote::{ReadRequest, WriteRequest};
+use api::prom_store::remote::ReadRequest;
use api::v1::greptime_request::Request;
use api::v1::RowInsertRequests;
use async_trait::async_trait;
@@ -360,7 +360,7 @@ pub trait PromStoreProtocolInterceptor {
fn pre_write(
&self,
- _write_req: &WriteRequest,
+ _write_req: &RowInsertRequests,
_ctx: QueryContextRef,
) -> Result<(), Self::Error> {
Ok(())
@@ -377,7 +377,11 @@ pub type PromStoreProtocolInterceptorRef<E> =
impl<E: ErrorExt> PromStoreProtocolInterceptor for Option<PromStoreProtocolInterceptorRef<E>> {
type Error = E;
- fn pre_write(&self, write_req: &WriteRequest, ctx: QueryContextRef) -> Result<(), Self::Error> {
+ fn pre_write(
+ &self,
+ write_req: &RowInsertRequests,
+ ctx: QueryContextRef,
+ ) -> Result<(), Self::Error> {
if let Some(this) = self {
this.pre_write(write_req, ctx)
} else {
|
fix
|
missing `pre_write` check on prometheus remote write (#4460)
|
7a4276c24a1396bc7a8e6df998799dbecdb838d0
|
2024-11-04 07:26:11
|
dennis zhuang
|
fix: typo (#4931)
| false
|
diff --git a/src/datanode/src/region_server.rs b/src/datanode/src/region_server.rs
index b6e1eb040402..43baf32ec323 100644
--- a/src/datanode/src/region_server.rs
+++ b/src/datanode/src/region_server.rs
@@ -1355,7 +1355,7 @@ mod tests {
}
#[tokio::test]
- async fn test_region_server_parallism() {
+ async fn test_region_server_parallelism() {
let p = RegionServerParallelism::from_opts(2, Duration::from_millis(1)).unwrap();
let first_query = p.acquire().await;
assert!(first_query.is_ok());
diff --git a/src/flow/src/server.rs b/src/flow/src/server.rs
index 4381dd06a03b..ff80bb64fe6f 100644
--- a/src/flow/src/server.rs
+++ b/src/flow/src/server.rs
@@ -303,7 +303,7 @@ impl FlownodeBuilder {
///
/// or recover all existing flow tasks if in standalone mode(nodeid is None)
///
- /// TODO(discord9): persisent flow tasks with internal state
+ /// TODO(discord9): persistent flow tasks with internal state
async fn recover_flows(&self, manager: &FlowWorkerManagerRef) -> Result<usize, Error> {
let nodeid = self.opts.node_id;
let to_be_recovered: Vec<_> = if let Some(nodeid) = nodeid {
diff --git a/src/mito2/src/engine.rs b/src/mito2/src/engine.rs
index 8e628772703c..32b69f620517 100644
--- a/src/mito2/src/engine.rs
+++ b/src/mito2/src/engine.rs
@@ -89,7 +89,7 @@ use crate::error::{
};
use crate::manifest::action::RegionEdit;
use crate::metrics::HANDLE_REQUEST_ELAPSED;
-use crate::read::scan_region::{ScanParallism, ScanRegion, Scanner};
+use crate::read::scan_region::{ScanParallelism, ScanRegion, Scanner};
use crate::request::{RegionEditRequest, WorkerRequest};
use crate::wal::entry_distributor::{
build_wal_entry_distributor_and_receivers, DEFAULT_ENTRY_RECEIVER_BUFFER_SIZE,
@@ -417,7 +417,7 @@ impl EngineInner {
let version = region.version();
// Get cache.
let cache_manager = self.workers.cache_manager();
- let scan_parallelism = ScanParallism {
+ let scan_parallelism = ScanParallelism {
parallelism: self.config.scan_parallelism,
channel_size: self.config.parallel_scan_channel_size,
};
diff --git a/src/mito2/src/read/scan_region.rs b/src/mito2/src/read/scan_region.rs
index 6e2a0344338b..1a7fb29b2e56 100644
--- a/src/mito2/src/read/scan_region.rs
+++ b/src/mito2/src/read/scan_region.rs
@@ -169,7 +169,7 @@ pub(crate) struct ScanRegion {
/// Cache.
cache_manager: Option<CacheManagerRef>,
/// Parallelism to scan.
- parallelism: ScanParallism,
+ parallelism: ScanParallelism,
/// Whether to ignore inverted index.
ignore_inverted_index: bool,
/// Whether to ignore fulltext index.
@@ -191,7 +191,7 @@ impl ScanRegion {
access_layer,
request,
cache_manager,
- parallelism: ScanParallism::default(),
+ parallelism: ScanParallelism::default(),
ignore_inverted_index: false,
ignore_fulltext_index: false,
start_time: None,
@@ -200,7 +200,7 @@ impl ScanRegion {
/// Sets parallelism.
#[must_use]
- pub(crate) fn with_parallelism(mut self, parallelism: ScanParallism) -> Self {
+ pub(crate) fn with_parallelism(mut self, parallelism: ScanParallelism) -> Self {
self.parallelism = parallelism;
self
}
@@ -447,7 +447,7 @@ impl ScanRegion {
/// Config for parallel scan.
#[derive(Debug, Clone, Copy, Default)]
-pub(crate) struct ScanParallism {
+pub(crate) struct ScanParallelism {
/// Number of tasks expect to spawn to read data.
pub(crate) parallelism: usize,
/// Channel size to send batches. Only takes effect when the parallelism > 1.
@@ -484,7 +484,7 @@ pub(crate) struct ScanInput {
/// Ignores file not found error.
ignore_file_not_found: bool,
/// Parallelism to scan data.
- pub(crate) parallelism: ScanParallism,
+ pub(crate) parallelism: ScanParallelism,
/// Index appliers.
inverted_index_applier: Option<InvertedIndexApplierRef>,
fulltext_index_applier: Option<FulltextIndexApplierRef>,
@@ -513,7 +513,7 @@ impl ScanInput {
files: Vec::new(),
cache_manager: None,
ignore_file_not_found: false,
- parallelism: ScanParallism::default(),
+ parallelism: ScanParallelism::default(),
inverted_index_applier: None,
fulltext_index_applier: None,
query_start: None,
@@ -568,7 +568,7 @@ impl ScanInput {
/// Sets scan parallelism.
#[must_use]
- pub(crate) fn with_parallelism(mut self, parallelism: ScanParallism) -> Self {
+ pub(crate) fn with_parallelism(mut self, parallelism: ScanParallelism) -> Self {
self.parallelism = parallelism;
self
}
diff --git a/src/servers/src/mysql/federated.rs b/src/servers/src/mysql/federated.rs
index 71733dcc1288..ae4ac70ed6e5 100644
--- a/src/servers/src/mysql/federated.rs
+++ b/src/servers/src/mysql/federated.rs
@@ -302,7 +302,7 @@ mod test {
let result = check(query, QueryContext::arc(), session.clone());
assert!(result.is_none());
- let query = "select versiona";
+ let query = "select version";
let output = check(query, QueryContext::arc(), session.clone());
assert!(output.is_none());
|
fix
|
typo (#4931)
|
f02dc0e27420590351615eac1db6aa1ffaa8b79e
|
2023-11-09 18:14:50
|
Ruihang Xia
|
feat: run dist planner after simplify expression rule (#2723)
| false
|
diff --git a/src/query/src/dist_plan/analyzer.rs b/src/query/src/dist_plan/analyzer.rs
index 06aee5844d45..a7f4fa5b4c26 100644
--- a/src/query/src/dist_plan/analyzer.rs
+++ b/src/query/src/dist_plan/analyzer.rs
@@ -21,6 +21,8 @@ use datafusion_common::tree_node::{RewriteRecursion, Transformed, TreeNode, Tree
use datafusion_expr::expr::{Exists, InSubquery};
use datafusion_expr::{col, Expr, LogicalPlan, LogicalPlanBuilder, Subquery};
use datafusion_optimizer::analyzer::AnalyzerRule;
+use datafusion_optimizer::simplify_expressions::SimplifyExpressions;
+use datafusion_optimizer::{OptimizerContext, OptimizerRule};
use substrait::{DFLogicalSubstraitConvertor, SubstraitPlan};
use table::metadata::TableType;
use table::table::adapter::DfTableProviderAdapter;
@@ -42,6 +44,12 @@ impl AnalyzerRule for DistPlannerAnalyzer {
plan: LogicalPlan,
_config: &ConfigOptions,
) -> datafusion_common::Result<LogicalPlan> {
+ // preprocess the input plan
+ let optimizer_context = OptimizerContext::new();
+ let plan = SimplifyExpressions::new()
+ .try_optimize(&plan, &optimizer_context)?
+ .unwrap_or(plan);
+
let plan = plan.transform(&Self::inspect_plan_with_subquery)?;
let mut rewriter = PlanRewriter::default();
let result = plan.rewrite(&mut rewriter)?;
diff --git a/src/query/src/query_engine/state.rs b/src/query/src/query_engine/state.rs
index b5dc35b3e9b4..c80ecadc9fb8 100644
--- a/src/query/src/query_engine/state.rs
+++ b/src/query/src/query_engine/state.rs
@@ -80,13 +80,13 @@ impl QueryEngineState {
let session_config = SessionConfig::new().with_create_default_catalog_and_schema(false);
// Apply the type conversion rule first.
let mut analyzer = Analyzer::new();
- if with_dist_planner {
- analyzer.rules.insert(0, Arc::new(DistPlannerAnalyzer));
- }
analyzer.rules.insert(0, Arc::new(TypeConversionRule));
analyzer.rules.insert(0, Arc::new(StringNormalizationRule));
Self::remove_analyzer_rule(&mut analyzer.rules, CountWildcardRule {}.name());
analyzer.rules.insert(0, Arc::new(CountWildcardRule {}));
+ if with_dist_planner {
+ analyzer.rules.push(Arc::new(DistPlannerAnalyzer));
+ }
let mut optimizer = Optimizer::new();
optimizer.rules.push(Arc::new(OrderHintRule));
|
feat
|
run dist planner after simplify expression rule (#2723)
|
768239eb49c18b5bf7d293400510547c0bb7d1bf
|
2023-08-14 19:50:20
|
Yingwen
|
fix: panic on truncate table in distributed mode (#2173)
| false
|
diff --git a/src/meta-srv/src/ddl.rs b/src/meta-srv/src/ddl.rs
index ad3ee5e2b6f7..3c4b0a5828f3 100644
--- a/src/meta-srv/src/ddl.rs
+++ b/src/meta-srv/src/ddl.rs
@@ -19,11 +19,15 @@ use common_meta::key::TableMetadataManagerRef;
use common_meta::rpc::ddl::{AlterTableTask, CreateTableTask, DropTableTask, TruncateTableTask};
use common_meta::rpc::router::TableRoute;
use common_procedure::{watcher, ProcedureId, ProcedureManagerRef, ProcedureWithId};
+use common_telemetry::error;
use snafu::ResultExt;
use table::metadata::RawTableInfo;
use table::requests::AlterTableRequest;
-use crate::error::{self, Result};
+use crate::error::{
+ RegisterProcedureLoaderSnafu, Result, SubmitProcedureSnafu, UnsupportedSnafu,
+ WaitProcedureSnafu,
+};
use crate::procedure::alter_table::AlterTableProcedure;
use crate::procedure::create_table::CreateTableProcedure;
use crate::procedure::drop_table::DropTableProcedure;
@@ -91,7 +95,7 @@ impl DdlManager {
CreateTableProcedure::from_json(json, context).map(|p| Box::new(p) as _)
}),
)
- .context(error::RegisterProcedureLoaderSnafu {
+ .context(RegisterProcedureLoaderSnafu {
type_name: CreateTableProcedure::TYPE_NAME,
})?;
@@ -105,7 +109,7 @@ impl DdlManager {
DropTableProcedure::from_json(json, context).map(|p| Box::new(p) as _)
}),
)
- .context(error::RegisterProcedureLoaderSnafu {
+ .context(RegisterProcedureLoaderSnafu {
type_name: DropTableProcedure::TYPE_NAME,
})?;
@@ -119,7 +123,7 @@ impl DdlManager {
AlterTableProcedure::from_json(json, context).map(|p| Box::new(p) as _)
}),
)
- .context(error::RegisterProcedureLoaderSnafu {
+ .context(RegisterProcedureLoaderSnafu {
type_name: AlterTableProcedure::TYPE_NAME,
})
}
@@ -183,8 +187,13 @@ impl DdlManager {
truncate_table_task: TruncateTableTask,
table_route: TableRoute,
) -> Result<ProcedureId> {
- todo!("implement truncate table procedure, cluster_id = {}, truncate_table_task = {:?}, table_route = {:?}",
+ error!("truncate table procedure is not supported, cluster_id = {}, truncate_table_task = {:?}, table_route = {:?}",
cluster_id, truncate_table_task, table_route);
+
+ UnsupportedSnafu {
+ operation: "TRUNCATE TABLE",
+ }
+ .fail()
}
async fn submit_procedure(&self, procedure_with_id: ProcedureWithId) -> Result<ProcedureId> {
@@ -194,11 +203,11 @@ impl DdlManager {
.procedure_manager
.submit(procedure_with_id)
.await
- .context(error::SubmitProcedureSnafu)?;
+ .context(SubmitProcedureSnafu)?;
watcher::wait(&mut watcher)
.await
- .context(error::WaitProcedureSnafu)?;
+ .context(WaitProcedureSnafu)?;
Ok(procedure_id)
}
diff --git a/src/meta-srv/src/error.rs b/src/meta-srv/src/error.rs
index e0c7a1753015..bde755372934 100644
--- a/src/meta-srv/src/error.rs
+++ b/src/meta-srv/src/error.rs
@@ -489,6 +489,12 @@ pub enum Error {
#[snafu(display("Too many partitions, location: {}", location))]
TooManyPartitions { location: Location },
+
+ #[snafu(display("Unsupported operation {}, location: {}", operation, location))]
+ Unsupported {
+ operation: String,
+ location: Location,
+ },
}
pub type Result<T> = std::result::Result<T, Error>;
@@ -535,7 +541,8 @@ impl ErrorExt for Error {
| Error::NoEnoughAvailableDatanode { .. }
| Error::ConvertGrpcExpr { .. }
| Error::PublishMessage { .. }
- | Error::Join { .. } => StatusCode::Internal,
+ | Error::Join { .. }
+ | Error::Unsupported { .. } => StatusCode::Internal,
Error::EmptyKey { .. }
| Error::MissingRequiredParameter { .. }
| Error::MissingRequestHeader { .. }
|
fix
|
panic on truncate table in distributed mode (#2173)
|
83643eb195e22d350278f82ee4373048ac25d464
|
2024-03-27 07:24:41
|
JohnsonLee
|
feat: Support printing postgresql's `bytea` data type in its "hex" and "escape" format (#3567)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index cb732d1ab0cb..2cd6d46c46bb 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -9030,6 +9030,7 @@ dependencies = [
"common-time",
"common-version",
"criterion",
+ "dashmap",
"datafusion",
"datafusion-common",
"datatypes",
@@ -9103,8 +9104,12 @@ dependencies = [
"arc-swap",
"auth",
"common-catalog",
+ "common-error",
+ "common-macro",
+ "common-telemetry",
"common-time",
"derive_builder 0.12.0",
+ "snafu",
"sql",
]
diff --git a/src/operator/src/error.rs b/src/operator/src/error.rs
index 731fbe288b3e..43bd75865b78 100644
--- a/src/operator/src/error.rs
+++ b/src/operator/src/error.rs
@@ -20,9 +20,9 @@ use common_error::status_code::StatusCode;
use common_macro::stack_trace_debug;
use datafusion::parquet;
use datatypes::arrow::error::ArrowError;
-use datatypes::value::Value;
use servers::define_into_tonic_status;
use snafu::{Location, Snafu};
+use sql::ast::Value;
#[derive(Snafu)]
#[snafu(visibility(pub))]
@@ -528,6 +528,12 @@ pub enum Error {
#[snafu(display("Invalid partition rule: {}", reason))]
InvalidPartitionRule { reason: String, location: Location },
+
+ #[snafu(display("Invalid configuration value."))]
+ InvalidConfigValue {
+ source: session::session_config::Error,
+ location: Location,
+ },
}
pub type Result<T> = std::result::Result<T, Error>;
@@ -536,6 +542,7 @@ impl ErrorExt for Error {
fn status_code(&self) -> StatusCode {
match self {
Error::InvalidSql { .. }
+ | Error::InvalidConfigValue { .. }
| Error::InvalidInsertRequest { .. }
| Error::InvalidDeleteRequest { .. }
| Error::IllegalPrimaryKeysDef { .. }
diff --git a/src/operator/src/statement.rs b/src/operator/src/statement.rs
index 1a30c596ce8b..2fb9267f7918 100644
--- a/src/operator/src/statement.rs
+++ b/src/operator/src/statement.rs
@@ -39,6 +39,7 @@ use query::parser::QueryStatement;
use query::plan::LogicalPlan;
use query::QueryEngineRef;
use session::context::QueryContextRef;
+use session::session_config::PGByteaOutputValue;
use session::table_name::table_idents_to_full_name;
use snafu::{ensure, OptionExt, ResultExt};
use sql::statements::copy::{CopyDatabase, CopyDatabaseArgument, CopyTable, CopyTableArgument};
@@ -52,8 +53,8 @@ use table::table_reference::TableReference;
use table::TableRef;
use crate::error::{
- self, CatalogSnafu, ExecLogicalPlanSnafu, ExternalSnafu, InvalidSqlSnafu, NotSupportedSnafu,
- PlanStatementSnafu, Result, TableNotFoundSnafu,
+ self, CatalogSnafu, ExecLogicalPlanSnafu, ExternalSnafu, InvalidConfigValueSnafu,
+ InvalidSqlSnafu, NotSupportedSnafu, PlanStatementSnafu, Result, TableNotFoundSnafu,
};
use crate::insert::InserterRef;
use crate::statement::copy_database::{COPY_DATABASE_TIME_END_KEY, COPY_DATABASE_TIME_START_KEY};
@@ -219,8 +220,7 @@ impl StatementExecutor {
// so we just ignore it here instead of returning an error to break the connection.
// Since the "bytea_output" only determines the output format of binary values,
// it won't cause much trouble if we do so.
- // TODO(#3438): Remove this temporary workaround after the feature is implemented.
- "BYTEA_OUTPUT" => (),
+ "BYTEA_OUTPUT" => set_bytea_output(set_var.value, query_ctx)?,
// Same as "bytea_output", we just ignore it here.
// Not harmful since it only relates to how date is viewed in client app's output.
@@ -339,6 +339,25 @@ fn set_timezone(exprs: Vec<Expr>, ctx: QueryContextRef) -> Result<()> {
}
}
+fn set_bytea_output(exprs: Vec<Expr>, ctx: QueryContextRef) -> Result<()> {
+ let Some((var_value, [])) = exprs.split_first() else {
+ return (NotSupportedSnafu {
+ feat: "Set variable value must have one and only one value for bytea_output",
+ })
+ .fail();
+ };
+ let Expr::Value(value) = var_value else {
+ return (NotSupportedSnafu {
+ feat: "Set variable value must be a value",
+ })
+ .fail();
+ };
+ ctx.configuration_parameter().set_postgres_bytea_output(
+ PGByteaOutputValue::try_from(value.clone()).context(InvalidConfigValueSnafu)?,
+ );
+ Ok(())
+}
+
fn to_copy_table_request(stmt: CopyTable, query_ctx: QueryContextRef) -> Result<CopyTableRequest> {
let direction = match stmt {
CopyTable::To(_) => CopyDirection::Export,
diff --git a/src/servers/Cargo.toml b/src/servers/Cargo.toml
index 43dbc55703b6..5cfbdd546221 100644
--- a/src/servers/Cargo.toml
+++ b/src/servers/Cargo.toml
@@ -41,6 +41,7 @@ common-recordbatch.workspace = true
common-runtime.workspace = true
common-telemetry.workspace = true
common-time.workspace = true
+dashmap.workspace = true
datafusion.workspace = true
datafusion-common.workspace = true
datatypes.workspace = true
diff --git a/src/servers/src/mysql/federated.rs b/src/servers/src/mysql/federated.rs
index a9c9d630b8cc..d9f16bd654a1 100644
--- a/src/servers/src/mysql/federated.rs
+++ b/src/servers/src/mysql/federated.rs
@@ -310,7 +310,7 @@ mod test {
#[test]
fn test_check() {
- let session = Arc::new(Session::new(None, Channel::Mysql));
+ let session = Arc::new(Session::new(None, Channel::Mysql, Default::default()));
let query = "select 1";
let result = check(query, QueryContext::arc(), session.clone());
assert!(result.is_none());
@@ -320,7 +320,7 @@ mod test {
assert!(output.is_none());
fn test(query: &str, expected: &str) {
- let session = Arc::new(Session::new(None, Channel::Mysql));
+ let session = Arc::new(Session::new(None, Channel::Mysql, Default::default()));
let output = check(query, QueryContext::arc(), session.clone());
match output.unwrap().data {
OutputData::RecordBatches(r) => {
diff --git a/src/servers/src/mysql/handler.rs b/src/servers/src/mysql/handler.rs
index 8c1814580fc7..9fe088cb6604 100644
--- a/src/servers/src/mysql/handler.rs
+++ b/src/servers/src/mysql/handler.rs
@@ -85,7 +85,11 @@ impl MysqlInstanceShim {
MysqlInstanceShim {
query_handler,
salt: scramble,
- session: Arc::new(Session::new(Some(client_addr), Channel::Mysql)),
+ session: Arc::new(Session::new(
+ Some(client_addr),
+ Channel::Mysql,
+ Default::default(),
+ )),
user_provider,
prepared_stmts: Default::default(),
prepared_stmts_counter: AtomicU32::new(1),
diff --git a/src/servers/src/postgres.rs b/src/servers/src/postgres.rs
index 0836ea51bb21..c6e10ad8dbee 100644
--- a/src/servers/src/postgres.rs
+++ b/src/servers/src/postgres.rs
@@ -88,7 +88,7 @@ pub(crate) struct MakePostgresServerHandler {
impl MakePostgresServerHandler {
fn make(&self, addr: Option<SocketAddr>) -> PostgresServerHandler {
- let session = Arc::new(Session::new(addr, Channel::Postgres));
+ let session = Arc::new(Session::new(addr, Channel::Postgres, Default::default()));
PostgresServerHandler {
query_handler: self.query_handler.clone(),
login_verifier: PgLoginVerifier::new(self.user_provider.clone()),
diff --git a/src/servers/src/postgres/handler.rs b/src/servers/src/postgres/handler.rs
index 711c2b30dbce..bb3db5bc9cc0 100644
--- a/src/servers/src/postgres/handler.rs
+++ b/src/servers/src/postgres/handler.rs
@@ -31,6 +31,7 @@ use pgwire::api::stmt::{QueryParser, StoredStatement};
use pgwire::api::{ClientInfo, Type};
use pgwire::error::{ErrorInfo, PgWireError, PgWireResult};
use query::query_engine::DescribeResult;
+use session::context::QueryContextRef;
use session::Session;
use sql::dialect::PostgreSqlDialect;
use sql::parser::{ParseOptions, ParserContext};
@@ -63,7 +64,7 @@ impl SimpleQueryHandler for PostgresServerHandler {
let mut results = Vec::with_capacity(outputs.len());
for output in outputs {
- let resp = output_to_query_response(output, &Format::UnifiedText)?;
+ let resp = output_to_query_response(query_ctx.clone(), output, &Format::UnifiedText)?;
results.push(resp);
}
@@ -72,6 +73,7 @@ impl SimpleQueryHandler for PostgresServerHandler {
}
fn output_to_query_response<'a>(
+ query_ctx: QueryContextRef,
output: Result<Output>,
field_format: &Format,
) -> PgWireResult<Response<'a>> {
@@ -82,11 +84,16 @@ fn output_to_query_response<'a>(
}
OutputData::Stream(record_stream) => {
let schema = record_stream.schema();
- recordbatches_to_query_response(record_stream, schema, field_format)
+ recordbatches_to_query_response(query_ctx, record_stream, schema, field_format)
}
OutputData::RecordBatches(recordbatches) => {
let schema = recordbatches.schema();
- recordbatches_to_query_response(recordbatches.as_stream(), schema, field_format)
+ recordbatches_to_query_response(
+ query_ctx,
+ recordbatches.as_stream(),
+ schema,
+ field_format,
+ )
}
},
Err(e) => Ok(Response::Error(Box::new(ErrorInfo::new(
@@ -98,6 +105,7 @@ fn output_to_query_response<'a>(
}
fn recordbatches_to_query_response<'a, S>(
+ query_ctx: QueryContextRef,
recordbatches_stream: S,
schema: SchemaRef,
field_format: &Format,
@@ -125,7 +133,7 @@ where
row.and_then(|row| {
let mut encoder = DataRowEncoder::new(pg_schema_ref.clone());
for value in row.iter() {
- encode_value(value, &mut encoder)?;
+ encode_value(&query_ctx, value, &mut encoder)?;
}
encoder.finish()
})
@@ -224,7 +232,9 @@ impl ExtendedQueryHandler for PostgresServerHandler {
let plan = plan
.replace_params_with_values(parameters_to_scalar_values(plan, portal)?.as_ref())
.map_err(|e| PgWireError::ApiError(Box::new(e)))?;
- self.query_handler.do_exec_plan(plan, query_ctx).await
+ self.query_handler
+ .do_exec_plan(plan, query_ctx.clone())
+ .await
} else {
// manually replace variables in prepared statement when no
// logical_plan is generated. This happens when logical plan is not
@@ -234,10 +244,13 @@ impl ExtendedQueryHandler for PostgresServerHandler {
sql = sql.replace(&format!("${}", i + 1), ¶meter_to_string(portal, i)?);
}
- self.query_handler.do_query(&sql, query_ctx).await.remove(0)
+ self.query_handler
+ .do_query(&sql, query_ctx.clone())
+ .await
+ .remove(0)
};
- output_to_query_response(output, &portal.result_column_format)
+ output_to_query_response(query_ctx, output, &portal.result_column_format)
}
async fn do_describe_statement<C>(
diff --git a/src/servers/src/postgres/types.rs b/src/servers/src/postgres/types.rs
index d863c4d4e0a8..01351f85419b 100644
--- a/src/servers/src/postgres/types.rs
+++ b/src/servers/src/postgres/types.rs
@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+pub mod bytea;
mod interval;
use std::collections::HashMap;
@@ -28,7 +29,10 @@ use pgwire::api::results::{DataRowEncoder, FieldInfo};
use pgwire::api::Type;
use pgwire::error::{ErrorInfo, PgWireError, PgWireResult};
use query::plan::LogicalPlan;
+use session::context::QueryContextRef;
+use session::session_config::PGByteaOutputValue;
+use self::bytea::{EscapeOutputBytea, HexOutputBytea};
use self::interval::PgInterval;
use crate::error::{self, Error, Result};
use crate::SqlPlan;
@@ -50,7 +54,11 @@ pub(super) fn schema_to_pg(origin: &Schema, field_formats: &Format) -> Result<Ve
.collect::<Result<Vec<FieldInfo>>>()
}
-pub(super) fn encode_value(value: &Value, builder: &mut DataRowEncoder) -> PgWireResult<()> {
+pub(super) fn encode_value(
+ query_ctx: &QueryContextRef,
+ value: &Value,
+ builder: &mut DataRowEncoder,
+) -> PgWireResult<()> {
match value {
Value::Null => builder.encode_field(&None::<&i8>),
Value::Boolean(v) => builder.encode_field(v),
@@ -65,7 +73,13 @@ pub(super) fn encode_value(value: &Value, builder: &mut DataRowEncoder) -> PgWir
Value::Float32(v) => builder.encode_field(&v.0),
Value::Float64(v) => builder.encode_field(&v.0),
Value::String(v) => builder.encode_field(&v.as_utf8()),
- Value::Binary(v) => builder.encode_field(&v.deref()),
+ Value::Binary(v) => {
+ let bytea_output = query_ctx.configuration_parameter().postgres_bytea_output();
+ match *bytea_output {
+ PGByteaOutputValue::ESCAPE => builder.encode_field(&EscapeOutputBytea(v.deref())),
+ PGByteaOutputValue::HEX => builder.encode_field(&HexOutputBytea(v.deref())),
+ }
+ }
Value::Date(v) => {
if let Some(date) = v.to_chrono_date() {
builder.encode_field(&date)
@@ -563,6 +577,7 @@ mod test {
use datatypes::value::ListValue;
use pgwire::api::results::{FieldFormat, FieldInfo};
use pgwire::api::Type;
+ use session::context::QueryContextBuilder;
use super::*;
@@ -784,12 +799,16 @@ mod test {
Value::Timestamp(1000001i64.into()),
Value::Interval(1000001i128.into()),
];
+ let query_context = QueryContextBuilder::default()
+ .configuration_parameter(Default::default())
+ .build();
let mut builder = DataRowEncoder::new(Arc::new(schema));
for i in values.iter() {
- encode_value(i, &mut builder).unwrap();
+ encode_value(&query_context, i, &mut builder).unwrap();
}
let err = encode_value(
+ &query_context,
&Value::List(ListValue::new(
Some(Box::default()),
ConcreteDataType::int16_datatype(),
diff --git a/src/servers/src/postgres/types/bytea.rs b/src/servers/src/postgres/types/bytea.rs
new file mode 100644
index 000000000000..975d670f9c00
--- /dev/null
+++ b/src/servers/src/postgres/types/bytea.rs
@@ -0,0 +1,152 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use bytes::BufMut;
+use pgwire::types::ToSqlText;
+use postgres_types::{IsNull, ToSql, Type};
+
+#[derive(Debug)]
+pub struct HexOutputBytea<'a>(pub &'a [u8]);
+impl ToSqlText for HexOutputBytea<'_> {
+ fn to_sql_text(
+ &self,
+ ty: &Type,
+ out: &mut bytes::BytesMut,
+ ) -> std::result::Result<IsNull, Box<dyn std::error::Error + Sync + Send>>
+ where
+ Self: Sized,
+ {
+ out.put_slice(b"\\x");
+ let _ = self.0.to_sql_text(ty, out);
+ Ok(IsNull::No)
+ }
+}
+
+impl ToSql for HexOutputBytea<'_> {
+ fn to_sql(
+ &self,
+ ty: &Type,
+ out: &mut bytes::BytesMut,
+ ) -> std::result::Result<IsNull, Box<dyn std::error::Error + Sync + Send>>
+ where
+ Self: Sized,
+ {
+ self.0.to_sql(ty, out)
+ }
+
+ fn accepts(ty: &Type) -> bool
+ where
+ Self: Sized,
+ {
+ <&[u8] as ToSql>::accepts(ty)
+ }
+
+ fn to_sql_checked(
+ &self,
+ ty: &Type,
+ out: &mut bytes::BytesMut,
+ ) -> std::result::Result<IsNull, Box<dyn std::error::Error + Sync + Send>> {
+ self.0.to_sql_checked(ty, out)
+ }
+}
+#[derive(Debug)]
+pub struct EscapeOutputBytea<'a>(pub &'a [u8]);
+impl ToSqlText for EscapeOutputBytea<'_> {
+ fn to_sql_text(
+ &self,
+ _ty: &Type,
+ out: &mut bytes::BytesMut,
+ ) -> std::result::Result<IsNull, Box<dyn std::error::Error + Sync + Send>>
+ where
+ Self: Sized,
+ {
+ self.0.iter().for_each(|b| match b {
+ 0..=31 | 127..=255 => {
+ out.put_slice(b"\\");
+ out.put_slice(format!("{:03o}", b).as_bytes());
+ }
+ 92 => out.put_slice(b"\\\\"),
+ 32..=126 => out.put_u8(*b),
+ });
+ Ok(IsNull::No)
+ }
+}
+impl ToSql for EscapeOutputBytea<'_> {
+ fn to_sql(
+ &self,
+ ty: &Type,
+ out: &mut bytes::BytesMut,
+ ) -> std::result::Result<IsNull, Box<dyn std::error::Error + Sync + Send>>
+ where
+ Self: Sized,
+ {
+ self.0.to_sql(ty, out)
+ }
+
+ fn accepts(ty: &Type) -> bool
+ where
+ Self: Sized,
+ {
+ <&[u8] as ToSql>::accepts(ty)
+ }
+
+ fn to_sql_checked(
+ &self,
+ ty: &Type,
+ out: &mut bytes::BytesMut,
+ ) -> std::result::Result<IsNull, Box<dyn std::error::Error + Sync + Send>> {
+ self.0.to_sql_checked(ty, out)
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn test_escape_output_bytea() {
+ let input: &[u8] = &[97, 98, 99, 107, 108, 109, 42, 169, 84];
+ let input = EscapeOutputBytea(input);
+
+ let expected = b"abcklm*\\251T";
+ let mut out = bytes::BytesMut::new();
+ let is_null = input.to_sql_text(&Type::BYTEA, &mut out).unwrap();
+ assert!(matches!(is_null, IsNull::No));
+ assert_eq!(&out[..], expected);
+
+ let expected = &[97, 98, 99, 107, 108, 109, 42, 169, 84];
+ let mut out = bytes::BytesMut::new();
+ let is_null = input.to_sql(&Type::BYTEA, &mut out).unwrap();
+ assert!(matches!(is_null, IsNull::No));
+ assert_eq!(&out[..], expected);
+ }
+
+ #[test]
+ fn test_hex_output_bytea() {
+ let input = b"hello, world!";
+ let input = HexOutputBytea(input);
+
+ let expected = b"\\x68656c6c6f2c20776f726c6421";
+ let mut out = bytes::BytesMut::new();
+ let is_null = input.to_sql_text(&Type::BYTEA, &mut out).unwrap();
+ assert!(matches!(is_null, IsNull::No));
+ assert_eq!(&out[..], expected);
+
+ let expected = b"hello, world!";
+ let mut out = bytes::BytesMut::new();
+ let is_null = input.to_sql(&Type::BYTEA, &mut out).unwrap();
+ assert!(matches!(is_null, IsNull::No));
+ assert_eq!(&out[..], expected);
+ }
+}
diff --git a/src/session/Cargo.toml b/src/session/Cargo.toml
index 85697d4ca7c9..8e0baeaa0f2d 100644
--- a/src/session/Cargo.toml
+++ b/src/session/Cargo.toml
@@ -15,6 +15,10 @@ api.workspace = true
arc-swap = "1.5"
auth.workspace = true
common-catalog.workspace = true
+common-error.workspace = true
+common-macro.workspace = true
+common-telemetry.workspace = true
common-time.workspace = true
derive_builder.workspace = true
+snafu.workspace = true
sql.workspace = true
diff --git a/src/session/src/context.rs b/src/session/src/context.rs
index cc41af37445b..d401b0331637 100644
--- a/src/session/src/context.rs
+++ b/src/session/src/context.rs
@@ -27,6 +27,7 @@ use common_time::Timezone;
use derive_builder::Builder;
use sql::dialect::{Dialect, GreptimeDbDialect, MySqlDialect, PostgreSqlDialect};
+use crate::session_config::PGByteaOutputValue;
use crate::SessionRef;
pub type QueryContextRef = Arc<QueryContext>;
@@ -44,6 +45,9 @@ pub struct QueryContext {
sql_dialect: Arc<dyn Dialect + Send + Sync>,
#[builder(default)]
extension: HashMap<String, String>,
+ // The configuration parameter are used to store the parameters that are set by the user
+ #[builder(default)]
+ configuration_parameter: Arc<ConfigurationVariables>,
}
impl QueryContextBuilder {
@@ -73,6 +77,7 @@ impl Clone for QueryContext {
timezone: self.timezone.load().clone().into(),
sql_dialect: self.sql_dialect.clone(),
extension: self.extension.clone(),
+ configuration_parameter: self.configuration_parameter.clone(),
}
}
}
@@ -88,6 +93,7 @@ impl From<&RegionRequestHeader> for QueryContext {
timezone: ArcSwap::new(Arc::new(get_timezone(None).clone())),
sql_dialect: Arc::new(GreptimeDbDialect {}),
extension: Default::default(),
+ configuration_parameter: Default::default(),
}
}
}
@@ -183,6 +189,10 @@ impl QueryContext {
'`'
}
}
+
+ pub fn configuration_parameter(&self) -> &ConfigurationVariables {
+ &self.configuration_parameter
+ }
}
impl QueryContextBuilder {
@@ -204,6 +214,7 @@ impl QueryContextBuilder {
.sql_dialect
.unwrap_or_else(|| Arc::new(GreptimeDbDialect {})),
extension: self.extension.unwrap_or_default(),
+ configuration_parameter: self.configuration_parameter.unwrap_or_default(),
})
}
@@ -268,6 +279,33 @@ impl Display for Channel {
}
}
+#[derive(Default, Debug)]
+pub struct ConfigurationVariables {
+ postgres_bytea_output: ArcSwap<PGByteaOutputValue>,
+}
+
+impl Clone for ConfigurationVariables {
+ fn clone(&self) -> Self {
+ Self {
+ postgres_bytea_output: ArcSwap::new(self.postgres_bytea_output.load().clone()),
+ }
+ }
+}
+
+impl ConfigurationVariables {
+ pub fn new() -> Self {
+ Self::default()
+ }
+
+ pub fn set_postgres_bytea_output(&self, value: PGByteaOutputValue) {
+ let _ = self.postgres_bytea_output.swap(Arc::new(value));
+ }
+
+ pub fn postgres_bytea_output(&self) -> Arc<PGByteaOutputValue> {
+ self.postgres_bytea_output.load().clone()
+ }
+}
+
#[cfg(test)]
mod test {
use common_catalog::consts::DEFAULT_CATALOG_NAME;
@@ -278,7 +316,11 @@ mod test {
#[test]
fn test_session() {
- let session = Session::new(Some("127.0.0.1:9000".parse().unwrap()), Channel::Mysql);
+ let session = Session::new(
+ Some("127.0.0.1:9000".parse().unwrap()),
+ Channel::Mysql,
+ Default::default(),
+ );
// test user_info
assert_eq!(session.user_info().username(), "greptime");
diff --git a/src/session/src/lib.rs b/src/session/src/lib.rs
index b51511ce6dfd..e89a733553d9 100644
--- a/src/session/src/lib.rs
+++ b/src/session/src/lib.rs
@@ -13,6 +13,7 @@
// limitations under the License.
pub mod context;
+pub mod session_config;
pub mod table_name;
use std::net::SocketAddr;
@@ -24,7 +25,7 @@ use common_catalog::build_db_string;
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use common_time::timezone::get_timezone;
use common_time::Timezone;
-use context::QueryContextBuilder;
+use context::{ConfigurationVariables, QueryContextBuilder};
use crate::context::{Channel, ConnInfo, QueryContextRef};
@@ -36,18 +37,24 @@ pub struct Session {
user_info: ArcSwap<UserInfoRef>,
conn_info: ConnInfo,
timezone: ArcSwap<Timezone>,
+ configuration_variables: Arc<ConfigurationVariables>,
}
pub type SessionRef = Arc<Session>;
impl Session {
- pub fn new(addr: Option<SocketAddr>, channel: Channel) -> Self {
+ pub fn new(
+ addr: Option<SocketAddr>,
+ channel: Channel,
+ configuration_variables: ConfigurationVariables,
+ ) -> Self {
Session {
catalog: ArcSwap::new(Arc::new(DEFAULT_CATALOG_NAME.into())),
schema: ArcSwap::new(Arc::new(DEFAULT_SCHEMA_NAME.into())),
user_info: ArcSwap::new(Arc::new(auth::userinfo_by_name(None))),
conn_info: ConnInfo::new(addr, channel),
timezone: ArcSwap::new(Arc::new(get_timezone(None).clone())),
+ configuration_variables: Arc::new(configuration_variables),
}
}
@@ -60,6 +67,7 @@ impl Session {
.current_catalog(self.catalog.load().to_string())
.current_schema(self.schema.load().to_string())
.sql_dialect(self.conn_info.channel.dialect())
+ .configuration_parameter(self.configuration_variables.clone())
.timezone(self.timezone())
.build()
}
diff --git a/src/session/src/session_config.rs b/src/session/src/session_config.rs
new file mode 100644
index 000000000000..aad50e70c1ac
--- /dev/null
+++ b/src/session/src/session_config.rs
@@ -0,0 +1,64 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use common_macro::stack_trace_debug;
+use snafu::{Location, Snafu};
+use sql::ast::Value;
+
+#[derive(Snafu)]
+#[snafu(visibility(pub))]
+#[stack_trace_debug]
+pub enum Error {
+ #[snafu(display("Invalid value for parameter \"{}\": {}\nHint: {}", name, value, hint,))]
+ InvalidConfigValue {
+ name: String,
+ value: String,
+ hint: String,
+ location: Location,
+ },
+}
+
+#[derive(Clone, Copy, Debug, Default)]
+pub enum PGByteaOutputValue {
+ #[default]
+ HEX,
+ ESCAPE,
+}
+
+impl TryFrom<Value> for PGByteaOutputValue {
+ type Error = Error;
+
+ fn try_from(value: Value) -> Result<Self, Self::Error> {
+ match &value {
+ Value::DoubleQuotedString(s) | Value::SingleQuotedString(s) => {
+ match s.to_uppercase().as_str() {
+ "ESCAPE" => Ok(PGByteaOutputValue::ESCAPE),
+ "HEX" => Ok(PGByteaOutputValue::HEX),
+ _ => InvalidConfigValueSnafu {
+ name: "BYTEA_OUTPUT",
+ value: value.to_string(),
+ hint: "Available values: escape, hex",
+ }
+ .fail(),
+ }
+ }
+ _ => InvalidConfigValueSnafu {
+ name: "BYTEA_OUTPUT",
+ value: value.to_string(),
+ hint: "Available values: escape, hex",
+ }
+ .fail(),
+ }
+ }
+}
diff --git a/tests-integration/tests/sql.rs b/tests-integration/tests/sql.rs
index 7cff590c39eb..4628bc372f3d 100644
--- a/tests-integration/tests/sql.rs
+++ b/tests-integration/tests/sql.rs
@@ -60,6 +60,7 @@ macro_rules! sql_tests {
test_postgres_auth,
test_postgres_crud,
test_postgres_timezone,
+ test_postgres_bytea,
test_postgres_parameter_inference,
test_mysql_prepare_stmt_insert_timestamp,
);
@@ -415,7 +416,69 @@ pub async fn test_postgres_crud(store_type: StorageType) {
let _ = fe_pg_server.shutdown().await;
guard.remove_all().await;
}
+pub async fn test_postgres_bytea(store_type: StorageType) {
+ let (addr, mut guard, fe_pg_server) = setup_pg_server(store_type, "sql_bytea_output").await;
+ let (client, connection) = tokio_postgres::connect(&format!("postgres://{addr}/public"), NoTls)
+ .await
+ .unwrap();
+ tokio::spawn(async move {
+ connection.await.unwrap();
+ });
+ let _ = client
+ .simple_query("CREATE TABLE test(b BLOB, ts TIMESTAMP TIME INDEX)")
+ .await
+ .unwrap();
+ let _ = client
+ .simple_query("INSERT INTO test VALUES(X'6162636b6c6d2aa954', 0)")
+ .await
+ .unwrap();
+ let get_row = |mess: Vec<SimpleQueryMessage>| -> String {
+ match &mess[0] {
+ SimpleQueryMessage::Row(row) => row.get(0).unwrap().to_string(),
+ _ => unreachable!(),
+ }
+ };
+
+ let r = client.simple_query("SELECT b FROM test").await.unwrap();
+ let b = get_row(r);
+ assert_eq!(b, "\\x6162636b6c6d2aa954");
+
+ let _ = client.simple_query("SET bytea_output='hex'").await.unwrap();
+ let r = client.simple_query("SELECT b FROM test").await.unwrap();
+ let b = get_row(r);
+ assert_eq!(b, "\\x6162636b6c6d2aa954");
+
+ let _ = client
+ .simple_query("SET bytea_output='escape'")
+ .await
+ .unwrap();
+ let r = client.simple_query("SELECT b FROM test").await.unwrap();
+ let b = get_row(r);
+ assert_eq!(b, "abcklm*\\251T");
+
+ let _e = client
+ .simple_query("SET bytea_output='invalid'")
+ .await
+ .unwrap_err();
+
+ // binary format shall not be affected by bytea_output
+ let pool = PgPoolOptions::new()
+ .max_connections(2)
+ .connect(&format!("postgres://{addr}/public"))
+ .await
+ .unwrap();
+
+ let row = sqlx::query("select b from test")
+ .fetch_one(&pool)
+ .await
+ .unwrap();
+ let val: Vec<u8> = row.get("b");
+ assert_eq!(val, [97, 98, 99, 107, 108, 109, 42, 169, 84]);
+
+ let _ = fe_pg_server.shutdown().await;
+ guard.remove_all().await;
+}
pub async fn test_postgres_timezone(store_type: StorageType) {
let (addr, mut guard, fe_pg_server) = setup_pg_server(store_type, "sql_inference").await;
|
feat
|
Support printing postgresql's `bytea` data type in its "hex" and "escape" format (#3567)
|
494ce6572991d9efb2b4644ee8e0967e32fe339f
|
2024-05-14 07:27:30
|
maco
|
feat: limiting the size of query results to Dashboard (#3901)
| false
|
diff --git a/src/servers/src/http.rs b/src/servers/src/http.rs
index 4ba4e56b088c..a06b8c6f4b20 100644
--- a/src/servers/src/http.rs
+++ b/src/servers/src/http.rs
@@ -190,6 +190,10 @@ impl From<SchemaRef> for OutputSchema {
pub struct HttpRecordsOutput {
schema: OutputSchema,
rows: Vec<Vec<Value>>,
+ // total_rows is equal to rows.len() in most cases,
+ // the Dashboard query result may be truncated, so we need to return the total_rows.
+ #[serde(default)]
+ total_rows: usize,
// plan level execution metrics
#[serde(skip_serializing_if = "HashMap::is_empty")]
@@ -224,6 +228,7 @@ impl HttpRecordsOutput {
Ok(HttpRecordsOutput {
schema: OutputSchema::from(schema),
rows: vec![],
+ total_rows: 0,
metrics: Default::default(),
})
} else {
@@ -244,6 +249,7 @@ impl HttpRecordsOutput {
Ok(HttpRecordsOutput {
schema: OutputSchema::from(schema),
+ total_rows: rows.len(),
rows,
metrics: Default::default(),
})
@@ -357,6 +363,34 @@ impl HttpResponse {
HttpResponse::Error(resp) => resp.with_execution_time(execution_time).into(),
}
}
+
+ pub fn with_limit(self, limit: usize) -> Self {
+ match self {
+ HttpResponse::Csv(resp) => resp.with_limit(limit).into(),
+ HttpResponse::Table(resp) => resp.with_limit(limit).into(),
+ HttpResponse::GreptimedbV1(resp) => resp.with_limit(limit).into(),
+ _ => self,
+ }
+ }
+}
+
+pub fn process_with_limit(
+ mut outputs: Vec<GreptimeQueryOutput>,
+ limit: usize,
+) -> Vec<GreptimeQueryOutput> {
+ outputs
+ .drain(..)
+ .map(|data| match data {
+ GreptimeQueryOutput::Records(mut records) => {
+ if records.rows.len() > limit {
+ records.rows.truncate(limit);
+ records.total_rows = limit;
+ }
+ GreptimeQueryOutput::Records(records)
+ }
+ _ => data,
+ })
+ .collect()
}
impl IntoResponse for HttpResponse {
diff --git a/src/servers/src/http/csv_result.rs b/src/servers/src/http/csv_result.rs
index ad89ac21b7e4..d6b512653bde 100644
--- a/src/servers/src/http/csv_result.rs
+++ b/src/servers/src/http/csv_result.rs
@@ -23,6 +23,7 @@ use mime_guess::mime;
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
+use super::process_with_limit;
use crate::http::error_result::ErrorResponse;
use crate::http::header::{GREPTIME_DB_HEADER_EXECUTION_TIME, GREPTIME_DB_HEADER_FORMAT};
use crate::http::{handler, GreptimeQueryOutput, HttpResponse, ResponseFormat};
@@ -65,6 +66,11 @@ impl CsvResponse {
pub fn execution_time_ms(&self) -> u64 {
self.execution_time_ms
}
+
+ pub fn with_limit(mut self, limit: usize) -> Self {
+ self.output = process_with_limit(self.output, limit);
+ self
+ }
}
impl IntoResponse for CsvResponse {
diff --git a/src/servers/src/http/greptime_result_v1.rs b/src/servers/src/http/greptime_result_v1.rs
index ee87a5dca639..9cb2924ba689 100644
--- a/src/servers/src/http/greptime_result_v1.rs
+++ b/src/servers/src/http/greptime_result_v1.rs
@@ -23,6 +23,7 @@ use serde::{Deserialize, Serialize};
use serde_json::Value;
use super::header::GREPTIME_DB_HEADER_METRICS;
+use super::process_with_limit;
use crate::http::header::{GREPTIME_DB_HEADER_EXECUTION_TIME, GREPTIME_DB_HEADER_FORMAT};
use crate::http::{handler, GreptimeQueryOutput, HttpResponse, ResponseFormat};
@@ -62,6 +63,11 @@ impl GreptimedbV1Response {
pub fn execution_time_ms(&self) -> u64 {
self.execution_time_ms
}
+
+ pub fn with_limit(mut self, limit: usize) -> Self {
+ self.output = process_with_limit(self.output, limit);
+ self
+ }
}
impl IntoResponse for GreptimedbV1Response {
diff --git a/src/servers/src/http/handler.rs b/src/servers/src/http/handler.rs
index 8a16065df2c4..fa8fe98e4cf1 100644
--- a/src/servers/src/http/handler.rs
+++ b/src/servers/src/http/handler.rs
@@ -62,6 +62,7 @@ pub struct SqlQuery {
// specified time precision. Maybe greptimedb format can support this
// param too.
pub epoch: Option<String>,
+ pub limit: Option<usize>,
}
/// Handler to execute sql
@@ -98,7 +99,7 @@ pub async fn sql(
if let Some((status, msg)) = validate_schema(sql_handler.clone(), query_ctx.clone()).await {
Err((status, msg))
} else {
- Ok(sql_handler.do_query(sql, query_ctx).await)
+ Ok(sql_handler.do_query(sql, query_ctx.clone()).await)
}
} else {
Err((
@@ -117,7 +118,7 @@ pub async fn sql(
Ok(outputs) => outputs,
};
- let resp = match format {
+ let mut resp = match format {
ResponseFormat::Arrow => ArrowResponse::from_output(outputs).await,
ResponseFormat::Csv => CsvResponse::from_output(outputs).await,
ResponseFormat::Table => TableResponse::from_output(outputs).await,
@@ -125,6 +126,9 @@ pub async fn sql(
ResponseFormat::InfluxdbV1 => InfluxdbV1Response::from_output(outputs, epoch).await,
};
+ if let Some(limit) = query_params.limit {
+ resp = resp.with_limit(limit);
+ }
resp.with_execution_time(start.elapsed().as_millis() as u64)
}
diff --git a/src/servers/src/http/table_result.rs b/src/servers/src/http/table_result.rs
index a7fac46e89a7..dacef51beace 100644
--- a/src/servers/src/http/table_result.rs
+++ b/src/servers/src/http/table_result.rs
@@ -24,6 +24,7 @@ use mime_guess::mime;
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
+use super::process_with_limit;
use crate::http::error_result::ErrorResponse;
use crate::http::header::{GREPTIME_DB_HEADER_EXECUTION_TIME, GREPTIME_DB_HEADER_FORMAT};
use crate::http::{handler, GreptimeQueryOutput, HttpResponse, ResponseFormat};
@@ -66,6 +67,11 @@ impl TableResponse {
pub fn execution_time_ms(&self) -> u64 {
self.execution_time_ms
}
+
+ pub fn with_limit(mut self, limit: usize) -> Self {
+ self.output = process_with_limit(self.output, limit);
+ self
+ }
}
impl Display for TableResponse {
diff --git a/src/servers/tests/http/http_handler_test.rs b/src/servers/tests/http/http_handler_test.rs
index 0f0c8966ca5d..cb4a5e8ada9f 100644
--- a/src/servers/tests/http/http_handler_test.rs
+++ b/src/servers/tests/http/http_handler_test.rs
@@ -23,6 +23,7 @@ use headers::HeaderValue;
use http_body::combinators::UnsyncBoxBody;
use hyper::Response;
use mime_guess::mime;
+use servers::http::GreptimeQueryOutput::Records;
use servers::http::{
handler as http_handler, script as script_handler, ApiState, GreptimeOptionsConfigState,
GreptimeQueryOutput, HttpResponse,
@@ -48,10 +49,8 @@ async fn test_sql_not_provided() {
for format in ["greptimedb_v1", "influxdb_v1", "csv", "table"] {
let query = http_handler::SqlQuery {
- db: None,
- sql: None,
format: Some(format.to_string()),
- epoch: None,
+ ..Default::default()
};
let HttpResponse::Error(resp) = http_handler::sql(
@@ -82,8 +81,9 @@ async fn test_sql_output_rows() {
script_handler: None,
};
+ let query_sql = "select sum(uint32s) from numbers limit 20";
for format in ["greptimedb_v1", "influxdb_v1", "csv", "table"] {
- let query = create_query(format);
+ let query = create_query(format, query_sql, None);
let json = http_handler::sql(
State(api_state.clone()),
query,
@@ -112,7 +112,8 @@ async fn test_sql_output_rows() {
[
4950
]
- ]
+ ],
+ "total_rows": 1
}"#
);
}
@@ -176,6 +177,49 @@ async fn test_sql_output_rows() {
}
}
+#[tokio::test]
+async fn test_dashboard_sql_limit() {
+ let sql_handler = create_testing_sql_query_handler(MemTable::specified_numbers_table(2000));
+ let ctx = QueryContext::arc();
+ ctx.set_current_user(Some(auth::userinfo_by_name(None)));
+ let api_state = ApiState {
+ sql_handler,
+ script_handler: None,
+ };
+ for format in ["greptimedb_v1", "csv", "table"] {
+ let query = create_query(format, "select * from numbers", Some(1000));
+ let sql_response = http_handler::sql(
+ State(api_state.clone()),
+ query,
+ axum::Extension(ctx.clone()),
+ Form(http_handler::SqlQuery::default()),
+ )
+ .await;
+
+ match sql_response {
+ HttpResponse::GreptimedbV1(resp) => match resp.output().first().unwrap() {
+ Records(records) => {
+ assert_eq!(records.num_rows(), 1000);
+ }
+ _ => unreachable!(),
+ },
+ HttpResponse::Csv(resp) => match resp.output().first().unwrap() {
+ Records(records) => {
+ assert_eq!(records.num_rows(), 1000);
+ }
+ _ => unreachable!(),
+ },
+ HttpResponse::Table(resp) => match resp.output().first().unwrap() {
+ Records(records) => {
+ assert_eq!(records.num_rows(), 1000);
+ }
+ _ => unreachable!(),
+ },
+ _ => unreachable!(),
+ }
+ }
+}
+
#[tokio::test]
async fn test_sql_form() {
common_telemetry::init_default_ut_logging();
@@ -219,7 +263,8 @@ async fn test_sql_form() {
[
4950
]
- ]
+ ],
+ "total_rows": 1
}"#
);
}
@@ -393,7 +438,8 @@ def test(n) -> vector[i64]:
[
4
]
- ]
+ ],
+ "total_rows": 5
}"#
);
}
@@ -460,7 +506,8 @@ def test(n, **params) -> vector[i64]:
[
46
]
- ]
+ ],
+ "total_rows": 5
}"#
);
}
@@ -484,21 +531,20 @@ fn create_invalid_script_query() -> Query<script_handler::ScriptQuery> {
})
}
-fn create_query(format: &str) -> Query<http_handler::SqlQuery> {
+fn create_query(format: &str, sql: &str, limit: Option<usize>) -> Query<http_handler::SqlQuery> {
Query(http_handler::SqlQuery {
- sql: Some("select sum(uint32s) from numbers limit 20".to_string()),
- db: None,
+ sql: Some(sql.to_string()),
format: Some(format.to_string()),
- epoch: None,
+ limit,
+ ..Default::default()
})
}
fn create_form(format: &str) -> Form<http_handler::SqlQuery> {
Form(http_handler::SqlQuery {
sql: Some("select sum(uint32s) from numbers limit 20".to_string()),
- db: None,
format: Some(format.to_string()),
- epoch: None,
+ ..Default::default()
})
}
diff --git a/src/table/src/test_util/memtable.rs b/src/table/src/test_util/memtable.rs
index 22562fa1a719..737ef644f637 100644
--- a/src/table/src/test_util/memtable.rs
+++ b/src/table/src/test_util/memtable.rs
@@ -101,6 +101,10 @@ impl MemTable {
/// Creates a 1 column 100 rows table, with table name "numbers", column name "uint32s" and
/// column type "uint32". Column data increased from 0 to 100.
pub fn default_numbers_table() -> TableRef {
+ Self::specified_numbers_table(100)
+ }
+
+ pub fn specified_numbers_table(rows: u32) -> TableRef {
let column_schemas = vec![ColumnSchema::new(
"uint32s",
ConcreteDataType::uint32_datatype(),
@@ -108,7 +112,7 @@ impl MemTable {
)];
let schema = Arc::new(Schema::new(column_schemas));
let columns: Vec<VectorRef> = vec![Arc::new(UInt32Vector::from_slice(
- (0..100).collect::<Vec<_>>(),
+ (0..rows).collect::<Vec<_>>(),
))];
let recordbatch = RecordBatch::new(schema, columns).unwrap();
MemTable::table("numbers", recordbatch)
diff --git a/tests-integration/tests/http.rs b/tests-integration/tests/http.rs
index 74ce4f6add1d..14c3e8cac265 100644
--- a/tests-integration/tests/http.rs
+++ b/tests-integration/tests/http.rs
@@ -147,7 +147,7 @@ pub async fn test_sql_api(store_type: StorageType) {
assert_eq!(
output[0],
serde_json::from_value::<GreptimeQueryOutput>(json!({
- "records" :{"schema":{"column_schemas":[{"name":"number","data_type":"UInt32"}]},"rows":[[0],[1],[2],[3],[4],[5],[6],[7],[8],[9]]}
+ "records" :{"schema":{"column_schemas":[{"name":"number","data_type":"UInt32"}]},"rows":[[0],[1],[2],[3],[4],[5],[6],[7],[8],[9]],"total_rows":10}
})).unwrap()
);
@@ -189,7 +189,7 @@ pub async fn test_sql_api(store_type: StorageType) {
assert_eq!(
output[0],
serde_json::from_value::<GreptimeQueryOutput>(json!({
- "records":{"schema":{"column_schemas":[{"name":"host","data_type":"String"},{"name":"cpu","data_type":"Float64"},{"name":"memory","data_type":"Float64"},{"name":"ts","data_type":"TimestampMillisecond"}]},"rows":[["host",66.6,1024.0,0]]}
+ "records":{"schema":{"column_schemas":[{"name":"host","data_type":"String"},{"name":"cpu","data_type":"Float64"},{"name":"memory","data_type":"Float64"},{"name":"ts","data_type":"TimestampMillisecond"}]},"rows":[["host",66.6,1024.0,0]],"total_rows":1}
})).unwrap()
);
@@ -207,7 +207,7 @@ pub async fn test_sql_api(store_type: StorageType) {
assert_eq!(
output[0],
serde_json::from_value::<GreptimeQueryOutput>(json!({
- "records":{"schema":{"column_schemas":[{"name":"cpu","data_type":"Float64"},{"name":"ts","data_type":"TimestampMillisecond"}]},"rows":[[66.6,0]]}
+ "records":{"schema":{"column_schemas":[{"name":"cpu","data_type":"Float64"},{"name":"ts","data_type":"TimestampMillisecond"}]},"rows":[[66.6,0]],"total_rows":1}
})).unwrap()
);
@@ -224,7 +224,7 @@ pub async fn test_sql_api(store_type: StorageType) {
assert_eq!(
output[0],
serde_json::from_value::<GreptimeQueryOutput>(json!({
- "records":{"schema":{"column_schemas":[{"name":"c","data_type":"Float64"},{"name":"time","data_type":"TimestampMillisecond"}]},"rows":[[66.6,0]]}
+ "records":{"schema":{"column_schemas":[{"name":"c","data_type":"Float64"},{"name":"time","data_type":"TimestampMillisecond"}]},"rows":[[66.6,0]],"total_rows":1}
})).unwrap()
);
@@ -241,13 +241,13 @@ pub async fn test_sql_api(store_type: StorageType) {
assert_eq!(
outputs[0],
serde_json::from_value::<GreptimeQueryOutput>(json!({
- "records":{"schema":{"column_schemas":[{"name":"cpu","data_type":"Float64"},{"name":"ts","data_type":"TimestampMillisecond"}]},"rows":[[66.6,0]]}
+ "records":{"schema":{"column_schemas":[{"name":"cpu","data_type":"Float64"},{"name":"ts","data_type":"TimestampMillisecond"}]},"rows":[[66.6,0]],"total_rows":1}
})).unwrap()
);
assert_eq!(
outputs[1],
serde_json::from_value::<GreptimeQueryOutput>(json!({
- "records":{"rows":[], "schema":{"column_schemas":[{"name":"cpu","data_type":"Float64"},{"name":"ts","data_type":"TimestampMillisecond"}]}}
+ "records":{"rows":[], "schema":{"column_schemas":[{"name":"cpu","data_type":"Float64"},{"name":"ts","data_type":"TimestampMillisecond"}]}, "total_rows":0}
}))
.unwrap()
);
@@ -276,7 +276,7 @@ pub async fn test_sql_api(store_type: StorageType) {
assert_eq!(
outputs[0],
serde_json::from_value::<GreptimeQueryOutput>(json!({
- "records":{"schema":{"column_schemas":[{"name":"cpu","data_type":"Float64"},{"name":"ts","data_type":"TimestampMillisecond"}]},"rows":[[66.6,0]]}
+ "records":{"schema":{"column_schemas":[{"name":"cpu","data_type":"Float64"},{"name":"ts","data_type":"TimestampMillisecond"}]},"rows":[[66.6,0]],"total_rows":1}
})).unwrap()
);
@@ -302,7 +302,7 @@ pub async fn test_sql_api(store_type: StorageType) {
assert_eq!(
outputs[0],
serde_json::from_value::<GreptimeQueryOutput>(json!({
- "records":{"schema":{"column_schemas":[{"name":"cpu","data_type":"Float64"},{"name":"ts","data_type":"TimestampMillisecond"}]},"rows":[[66.6,0]]}
+ "records":{"schema":{"column_schemas":[{"name":"cpu","data_type":"Float64"},{"name":"ts","data_type":"TimestampMillisecond"}]},"rows":[[66.6,0]],"total_rows":1}
})).unwrap()
);
@@ -673,7 +673,7 @@ def test(n) -> vector[f64]:
assert_eq!(
output[0],
serde_json::from_value::<GreptimeQueryOutput>(json!({
- "records":{"schema":{"column_schemas":[{"name":"n","data_type":"Float64"}]},"rows":[[1.0],[2.0],[3.0],[4.0],[5.0],[6.0],[7.0],[8.0],[9.0],[10.0]]}
+ "records":{"schema":{"column_schemas":[{"name":"n","data_type":"Float64"}]},"rows":[[1.0],[2.0],[3.0],[4.0],[5.0],[6.0],[7.0],[8.0],[9.0],[10.0]],"total_rows": 10}
})).unwrap()
);
|
feat
|
limiting the size of query results to Dashboard (#3901)
|
6e9a9dc333324cef054171feb9a5d0998c040527
|
2024-05-29 19:14:01
|
Weny Xu
|
refactor(log_store): remove associated type `Namespace` and `Entry` in `LogStore` (#4038)
| false
|
diff --git a/benchmarks/src/wal_bench.rs b/benchmarks/src/wal_bench.rs
index 10e88f99f37c..681dacfbb60e 100644
--- a/benchmarks/src/wal_bench.rs
+++ b/benchmarks/src/wal_bench.rs
@@ -28,6 +28,7 @@ use rand::distributions::{Alphanumeric, DistString, Uniform};
use rand::rngs::SmallRng;
use rand::{Rng, SeedableRng};
use serde::{Deserialize, Serialize};
+use store_api::logstore::provider::Provider;
use store_api::logstore::LogStore;
use store_api::storage::RegionId;
@@ -210,7 +211,7 @@ impl From<Args> for Config {
pub struct Region {
id: RegionId,
schema: Vec<ColumnSchema>,
- wal_options: WalOptions,
+ provider: Provider,
next_sequence: AtomicU64,
next_entry_id: AtomicU64,
next_timestamp: AtomicI64,
@@ -227,10 +228,14 @@ impl Region {
num_rows: u32,
rng_seed: u64,
) -> Self {
+ let provider = match wal_options {
+ WalOptions::RaftEngine => Provider::raft_engine_provider(id.as_u64()),
+ WalOptions::Kafka(opts) => Provider::kafka_provider(opts.topic),
+ };
Self {
id,
schema,
- wal_options,
+ provider,
next_sequence: AtomicU64::new(1),
next_entry_id: AtomicU64::new(1),
next_timestamp: AtomicI64::new(1655276557000),
@@ -258,14 +263,14 @@ impl Region {
self.id,
self.next_entry_id.fetch_add(1, Ordering::Relaxed),
&entry,
- &self.wal_options,
+ &self.provider,
)
.unwrap();
}
/// Replays the region.
pub async fn replay<S: LogStore>(&self, wal: &Arc<Wal<S>>) {
- let mut wal_stream = wal.scan(self.id, 0, &self.wal_options).unwrap();
+ let mut wal_stream = wal.scan(self.id, 0, &self.provider).unwrap();
while let Some(res) = wal_stream.next().await {
let (_, entry) = res.unwrap();
metrics::METRIC_WAL_READ_BYTES_TOTAL.inc_by(Self::entry_estimated_size(&entry) as u64);
diff --git a/src/common/telemetry/src/logging.rs b/src/common/telemetry/src/logging.rs
index 62fa9a5bf60b..4088c5236ca8 100644
--- a/src/common/telemetry/src/logging.rs
+++ b/src/common/telemetry/src/logging.rs
@@ -94,7 +94,7 @@ pub fn init_default_ut_logging() {
env::var("UNITTEST_LOG_DIR").unwrap_or_else(|_| "/tmp/__unittest_logs".to_string());
let level = env::var("UNITTEST_LOG_LEVEL").unwrap_or_else(|_|
- "debug,hyper=warn,tower=warn,datafusion=warn,reqwest=warn,sqlparser=warn,h2=info,opendal=info".to_string()
+ "debug,hyper=warn,tower=warn,datafusion=warn,reqwest=warn,sqlparser=warn,h2=info,opendal=info,rskafka=info".to_string()
);
let opts = LoggingOptions {
dir: dir.clone(),
diff --git a/src/log-store/src/error.rs b/src/log-store/src/error.rs
index 45449c9d65e8..280ce6410609 100644
--- a/src/log-store/src/error.rs
+++ b/src/log-store/src/error.rs
@@ -21,12 +21,18 @@ use serde_json::error::Error as JsonError;
use snafu::{Location, Snafu};
use store_api::storage::RegionId;
-use crate::kafka::NamespaceImpl as KafkaNamespace;
-
#[derive(Snafu)]
#[snafu(visibility(pub))]
#[stack_trace_debug]
pub enum Error {
+ #[snafu(display("Invalid provider type, expected: {}, actual: {}", expected, actual))]
+ InvalidProvider {
+ #[snafu(implicit)]
+ location: Location,
+ expected: String,
+ actual: String,
+ },
+
#[snafu(display("Failed to start log store gc task"))]
StartGcTask {
#[snafu(implicit)]
@@ -170,34 +176,28 @@ pub enum Error {
location: Location,
},
- #[snafu(display(
- "Failed to produce records to Kafka, topic: {}, size: {}, limit: {}",
- topic,
- size,
- limit,
- ))]
+ #[snafu(display("Failed to produce records to Kafka, topic: {}, size: {}", topic, size))]
ProduceRecord {
topic: String,
size: usize,
- limit: usize,
#[snafu(implicit)]
location: Location,
#[snafu(source)]
error: rskafka::client::producer::Error,
},
- #[snafu(display("Failed to read a record from Kafka, ns: {}", ns))]
+ #[snafu(display("Failed to read a record from Kafka, topic: {}", topic))]
ConsumeRecord {
- ns: KafkaNamespace,
+ topic: String,
#[snafu(implicit)]
location: Location,
#[snafu(source)]
error: rskafka::client::error::Error,
},
- #[snafu(display("Failed to get the latest offset, ns: {}", ns))]
+ #[snafu(display("Failed to get the latest offset, topic: {}", topic))]
GetOffset {
- ns: KafkaNamespace,
+ topic: String,
#[snafu(implicit)]
location: Location,
#[snafu(source)]
diff --git a/src/log-store/src/kafka.rs b/src/log-store/src/kafka.rs
index dc068f3b4b52..415cc53ddbce 100644
--- a/src/log-store/src/kafka.rs
+++ b/src/log-store/src/kafka.rs
@@ -12,17 +12,12 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-use std::mem::size_of;
pub(crate) mod client_manager;
pub mod log_store;
pub(crate) mod util;
-use std::fmt::Display;
-
use serde::{Deserialize, Serialize};
-use store_api::logstore::entry::{Entry, Id as EntryId, RawEntry};
-use store_api::logstore::namespace::Namespace;
-use store_api::storage::RegionId;
+use store_api::logstore::entry::Id as EntryId;
/// Kafka Namespace implementation.
#[derive(Debug, PartialEq, Eq, Hash, Clone, Serialize, Deserialize)]
@@ -31,18 +26,6 @@ pub struct NamespaceImpl {
pub topic: String,
}
-impl Namespace for NamespaceImpl {
- fn id(&self) -> u64 {
- self.region_id
- }
-}
-
-impl Display for NamespaceImpl {
- fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
- write!(f, "[topic: {}, region: {}]", self.topic, self.region_id)
- }
-}
-
/// Kafka Entry implementation.
#[derive(Debug, PartialEq, Clone)]
pub struct EntryImpl {
@@ -53,65 +36,3 @@ pub struct EntryImpl {
/// The namespace used to identify and isolate log entries from different regions.
pub ns: NamespaceImpl,
}
-
-impl Entry for EntryImpl {
- fn into_raw_entry(self) -> RawEntry {
- RawEntry {
- region_id: self.region_id(),
- entry_id: self.id(),
- data: self.data,
- }
- }
-
- fn data(&self) -> &[u8] {
- &self.data
- }
-
- fn id(&self) -> EntryId {
- self.id
- }
-
- fn region_id(&self) -> RegionId {
- RegionId::from_u64(self.ns.region_id)
- }
-
- fn estimated_size(&self) -> usize {
- size_of::<Self>() + self.data.capacity() * size_of::<u8>() + self.ns.topic.capacity()
- }
-}
-
-impl Display for EntryImpl {
- fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
- write!(
- f,
- "Entry [ns: {}, id: {}, data_len: {}]",
- self.ns,
- self.id,
- self.data.len()
- )
- }
-}
-
-#[cfg(test)]
-mod tests {
- use std::mem::size_of;
-
- use store_api::logstore::entry::Entry;
-
- use crate::kafka::{EntryImpl, NamespaceImpl};
-
- #[test]
- fn test_estimated_size() {
- let entry = EntryImpl {
- data: Vec::with_capacity(100),
- id: 0,
- ns: NamespaceImpl {
- region_id: 0,
- topic: String::with_capacity(10),
- },
- };
- let expected = size_of::<EntryImpl>() + 100 * size_of::<u8>() + 10;
- let got = entry.estimated_size();
- assert_eq!(expected, got);
- }
-}
diff --git a/src/log-store/src/kafka/client_manager.rs b/src/log-store/src/kafka/client_manager.rs
index 1708efed1d09..81feaddb6627 100644
--- a/src/log-store/src/kafka/client_manager.rs
+++ b/src/log-store/src/kafka/client_manager.rs
@@ -27,6 +27,7 @@ use tokio::sync::RwLock;
use crate::error::{
BuildClientSnafu, BuildPartitionClientSnafu, ResolveKafkaEndpointSnafu, Result,
};
+use crate::kafka::util::record::MIN_BATCH_SIZE;
// Each topic only has one partition for now.
// The `DEFAULT_PARTITION` refers to the index of the partition.
@@ -48,7 +49,8 @@ pub(crate) struct Client {
impl Client {
/// Creates a Client from the raw client.
pub(crate) fn new(raw_client: Arc<PartitionClient>, config: &DatanodeKafkaConfig) -> Self {
- let record_aggregator = RecordAggregator::new(config.max_batch_size.as_bytes() as usize);
+ let record_aggregator =
+ RecordAggregator::new((config.max_batch_size.as_bytes() as usize).max(MIN_BATCH_SIZE));
let batch_producer = BatchProducerBuilder::new(raw_client.clone())
.with_compression(config.compression)
.with_linger(config.linger)
diff --git a/src/log-store/src/kafka/log_store.rs b/src/log-store/src/kafka/log_store.rs
index 1a0f96b6587b..ceca6fc30bd7 100644
--- a/src/log-store/src/kafka/log_store.rs
+++ b/src/log-store/src/kafka/log_store.rs
@@ -17,21 +17,23 @@ use std::sync::Arc;
use common_telemetry::{debug, warn};
use common_wal::config::kafka::DatanodeKafkaConfig;
-use common_wal::options::WalOptions;
use futures_util::StreamExt;
use rskafka::client::consumer::{StartOffset, StreamConsumerBuilder};
use rskafka::client::partition::OffsetAt;
-use snafu::ResultExt;
-use store_api::logstore::entry::{Entry as EntryTrait, Id as EntryId};
-use store_api::logstore::entry_stream::SendableEntryStream;
-use store_api::logstore::namespace::Id as NamespaceId;
-use store_api::logstore::{AppendBatchResponse, AppendResponse, LogStore};
-
-use crate::error::{ConsumeRecordSnafu, Error, GetOffsetSnafu, IllegalSequenceSnafu, Result};
+use snafu::{OptionExt, ResultExt};
+use store_api::logstore::entry::{
+ Entry, Id as EntryId, MultiplePartEntry, MultiplePartHeader, NaiveEntry,
+};
+use store_api::logstore::provider::{KafkaProvider, Provider};
+use store_api::logstore::{AppendBatchResponse, LogStore, SendableEntryStream};
+use store_api::storage::RegionId;
+
+use crate::error::{self, ConsumeRecordSnafu, Error, GetOffsetSnafu, InvalidProviderSnafu, Result};
use crate::kafka::client_manager::{ClientManager, ClientManagerRef};
use crate::kafka::util::offset::Offset;
-use crate::kafka::util::record::{maybe_emit_entry, Record, RecordProducer};
-use crate::kafka::{EntryImpl, NamespaceImpl};
+use crate::kafka::util::record::{
+ maybe_emit_entry, remaining_entries, Record, RecordProducer, ESTIMATED_META_SIZE,
+};
use crate::metrics;
/// A log store backed by Kafka.
@@ -52,41 +54,81 @@ impl KafkaLogStore {
}
}
+fn build_entry(
+ data: &mut Vec<u8>,
+ entry_id: EntryId,
+ region_id: RegionId,
+ provider: &Provider,
+ max_data_size: usize,
+) -> Entry {
+ if data.len() <= max_data_size {
+ Entry::Naive(NaiveEntry {
+ provider: provider.clone(),
+ region_id,
+ entry_id,
+ data: std::mem::take(data),
+ })
+ } else {
+ let parts = std::mem::take(data)
+ .chunks(max_data_size)
+ .map(|s| s.into())
+ .collect::<Vec<_>>();
+ let num_parts = parts.len();
+
+ let mut headers = Vec::with_capacity(num_parts);
+ headers.push(MultiplePartHeader::First);
+ headers.extend((1..num_parts - 1).map(MultiplePartHeader::Middle));
+ headers.push(MultiplePartHeader::Last);
+
+ Entry::MultiplePart(MultiplePartEntry {
+ provider: provider.clone(),
+ region_id,
+ entry_id,
+ headers,
+ parts,
+ })
+ }
+}
+
#[async_trait::async_trait]
impl LogStore for KafkaLogStore {
type Error = Error;
- type Entry = EntryImpl;
- type Namespace = NamespaceImpl;
-
- /// Creates an entry of the associated Entry type.
- fn entry(&self, data: &mut Vec<u8>, entry_id: EntryId, ns: Self::Namespace) -> Self::Entry {
- EntryImpl {
- data: std::mem::take(data),
- id: entry_id,
- ns,
- }
- }
- /// Appends an entry to the log store and returns a response containing the entry id of the appended entry.
- async fn append(&self, entry: Self::Entry) -> Result<AppendResponse> {
- let entry_id = RecordProducer::new(entry.ns.clone())
- .with_entries(vec![entry])
- .produce(&self.client_manager)
- .await
- .map(TryInto::try_into)??;
- Ok(AppendResponse {
- last_entry_id: entry_id,
- })
+ /// Creates an [Entry].
+ fn entry(
+ &self,
+ data: &mut Vec<u8>,
+ entry_id: EntryId,
+ region_id: RegionId,
+ provider: &Provider,
+ ) -> Result<Entry> {
+ provider
+ .as_kafka_provider()
+ .with_context(|| InvalidProviderSnafu {
+ expected: KafkaProvider::type_name(),
+ actual: provider.type_name(),
+ })?;
+
+ let max_data_size =
+ self.client_manager.config.max_batch_size.as_bytes() as usize - ESTIMATED_META_SIZE;
+ Ok(build_entry(
+ data,
+ entry_id,
+ region_id,
+ provider,
+ max_data_size,
+ ))
}
+ // TODO(weny): refactor the writing.
/// Appends a batch of entries and returns a response containing a map where the key is a region id
/// while the value is the id of the last successfully written entry of the region.
- async fn append_batch(&self, entries: Vec<Self::Entry>) -> Result<AppendBatchResponse> {
+ async fn append_batch(&self, entries: Vec<Entry>) -> Result<AppendBatchResponse> {
metrics::METRIC_KAFKA_APPEND_BATCH_CALLS_TOTAL.inc();
metrics::METRIC_KAFKA_APPEND_BATCH_BYTES_TOTAL.inc_by(
entries
.iter()
- .map(EntryTrait::estimated_size)
+ .map(|entry| entry.estimated_size())
.sum::<usize>() as u64,
);
let _timer = metrics::METRIC_KAFKA_APPEND_BATCH_ELAPSED.start_timer();
@@ -98,9 +140,17 @@ impl LogStore for KafkaLogStore {
// Groups entries by region id and pushes them to an associated record producer.
let mut producers = HashMap::with_capacity(entries.len());
for entry in entries {
+ let provider = entry
+ .provider()
+ .as_kafka_provider()
+ .context(error::InvalidProviderSnafu {
+ expected: KafkaProvider::type_name(),
+ actual: entry.provider().type_name(),
+ })?
+ .clone();
producers
- .entry(entry.ns.region_id)
- .or_insert_with(|| RecordProducer::new(entry.ns.clone()))
+ .entry(entry.region_id())
+ .or_insert_with(|| RecordProducer::new(provider))
.push(entry);
}
@@ -122,20 +172,27 @@ impl LogStore for KafkaLogStore {
Ok(AppendBatchResponse { last_entry_ids })
}
- /// Creates a new `EntryStream` to asynchronously generates `Entry` with entry ids
- /// starting from `entry_id`. The generated entries will be filtered by the namespace.
+ /// Creates a new `EntryStream` to asynchronously generates `Entry` with entry ids.
+ /// Returns entries belonging to `provider`, starting from `entry_id`.
async fn read(
&self,
- ns: &Self::Namespace,
+ provider: &Provider,
entry_id: EntryId,
- ) -> Result<SendableEntryStream<Self::Entry, Self::Error>> {
+ ) -> Result<SendableEntryStream<'static, Entry, Self::Error>> {
+ let provider = provider
+ .as_kafka_provider()
+ .with_context(|| InvalidProviderSnafu {
+ expected: KafkaProvider::type_name(),
+ actual: provider.type_name(),
+ })?;
+
metrics::METRIC_KAFKA_READ_CALLS_TOTAL.inc();
let _timer = metrics::METRIC_KAFKA_READ_ELAPSED.start_timer();
// Gets the client associated with the topic.
let client = self
.client_manager
- .get_or_insert(&ns.topic)
+ .get_or_insert(&provider.topic)
.await?
.raw_client
.clone();
@@ -147,14 +204,16 @@ impl LogStore for KafkaLogStore {
let end_offset = client
.get_offset(OffsetAt::Latest)
.await
- .context(GetOffsetSnafu { ns: ns.clone() })?
+ .context(GetOffsetSnafu {
+ topic: &provider.topic,
+ })?
- 1;
// Reads entries with offsets in the range [start_offset, end_offset].
let start_offset = Offset::try_from(entry_id)?.0;
debug!(
"Start reading entries in range [{}, {}] for ns {}",
- start_offset, end_offset, ns
+ start_offset, end_offset, provider
);
// Abort if there're no new entries.
@@ -162,7 +221,7 @@ impl LogStore for KafkaLogStore {
if start_offset > end_offset {
warn!(
"No new entries for ns {} in range [{}, {}]",
- ns, start_offset, end_offset
+ provider, start_offset, end_offset
);
return Ok(futures_util::stream::empty().boxed());
}
@@ -174,20 +233,20 @@ impl LogStore for KafkaLogStore {
debug!(
"Built a stream consumer for ns {} to consume entries in range [{}, {}]",
- ns, start_offset, end_offset
+ provider, start_offset, end_offset
);
- // Key: entry id, Value: the records associated with the entry.
- let mut entry_records: HashMap<_, Vec<_>> = HashMap::new();
- let ns_clone = ns.clone();
+ // A buffer is used to collect records to construct a complete entry.
+ let mut entry_records: HashMap<RegionId, Vec<Record>> = HashMap::new();
+ let provider = provider.clone();
let stream = async_stream::stream!({
while let Some(consume_result) = stream_consumer.next().await {
// Each next on the stream consumer produces a `RecordAndOffset` and a high watermark offset.
// The `RecordAndOffset` contains the record data and its start offset.
// The high watermark offset is the offset of the last record plus one.
let (record_and_offset, high_watermark) =
- consume_result.with_context(|_| ConsumeRecordSnafu {
- ns: ns_clone.clone(),
+ consume_result.context(ConsumeRecordSnafu {
+ topic: &provider.topic,
})?;
let (kafka_record, offset) = (record_and_offset.record, record_and_offset.offset);
@@ -195,37 +254,35 @@ impl LogStore for KafkaLogStore {
.inc_by(kafka_record.approximate_size() as u64);
debug!(
- "Read a record at offset {} for ns {}, high watermark: {}",
- offset, ns_clone, high_watermark
+ "Read a record at offset {} for topic {}, high watermark: {}",
+ offset, provider.topic, high_watermark
);
// Ignores no-op records.
if kafka_record.value.is_none() {
- if check_termination(offset, end_offset, &entry_records)? {
+ if check_termination(offset, end_offset) {
+ if let Some(entries) = remaining_entries(&provider, &mut entry_records) {
+ yield Ok(entries);
+ }
break;
}
continue;
}
- // Filters records by namespace.
let record = Record::try_from(kafka_record)?;
- if record.meta.ns != ns_clone {
- if check_termination(offset, end_offset, &entry_records)? {
- break;
- }
- continue;
- }
-
// Tries to construct an entry from records consumed so far.
- if let Some(mut entry) = maybe_emit_entry(record, &mut entry_records)? {
+ if let Some(mut entry) = maybe_emit_entry(&provider, record, &mut entry_records)? {
// We don't rely on the EntryId generated by mito2.
// Instead, we use the offset return from Kafka as EntryId.
// Therefore, we MUST overwrite the EntryId with RecordOffset.
- entry.id = offset as u64;
+ entry.set_entry_id(offset as u64);
yield Ok(vec![entry]);
}
- if check_termination(offset, end_offset, &entry_records)? {
+ if check_termination(offset, end_offset) {
+ if let Some(entries) = remaining_entries(&provider, &mut entry_records) {
+ yield Ok(entries);
+ }
break;
}
}
@@ -233,39 +290,25 @@ impl LogStore for KafkaLogStore {
Ok(Box::pin(stream))
}
- /// Creates a namespace of the associated Namespace type.
- fn namespace(&self, ns_id: NamespaceId, wal_options: &WalOptions) -> Self::Namespace {
- // Safety: upon start, the datanode checks the consistency of the wal providers in the wal config of the
- // datanode and that of the metasrv. Therefore, the wal options passed into the kafka log store
- // must be of type WalOptions::Kafka.
- let WalOptions::Kafka(kafka_options) = wal_options else {
- unreachable!()
- };
- NamespaceImpl {
- region_id: ns_id,
- topic: kafka_options.topic.clone(),
- }
- }
-
/// Creates a new `Namespace` from the given ref.
- async fn create_namespace(&self, _ns: &Self::Namespace) -> Result<()> {
+ async fn create_namespace(&self, _provider: &Provider) -> Result<()> {
Ok(())
}
/// Deletes an existing `Namespace` specified by the given ref.
- async fn delete_namespace(&self, _ns: &Self::Namespace) -> Result<()> {
+ async fn delete_namespace(&self, _provider: &Provider) -> Result<()> {
Ok(())
}
/// Lists all existing namespaces.
- async fn list_namespaces(&self) -> Result<Vec<Self::Namespace>> {
+ async fn list_namespaces(&self) -> Result<Vec<Provider>> {
Ok(vec![])
}
/// Marks all entries with ids `<=entry_id` of the given `namespace` as obsolete,
/// so that the log store can safely delete those entries. This method does not guarantee
/// that the obsolete entries are deleted immediately.
- async fn obsolete(&self, _ns: Self::Namespace, _entry_id: EntryId) -> Result<()> {
+ async fn obsolete(&self, _provider: &Provider, _entry_id: EntryId) -> Result<()> {
Ok(())
}
@@ -275,227 +318,249 @@ impl LogStore for KafkaLogStore {
}
}
-fn check_termination(
- offset: i64,
- end_offset: i64,
- entry_records: &HashMap<EntryId, Vec<Record>>,
-) -> Result<bool> {
+fn check_termination(offset: i64, end_offset: i64) -> bool {
// Terminates the stream if the entry with the end offset was read.
if offset >= end_offset {
debug!("Stream consumer terminates at offset {}", offset);
// There must have no records when the stream terminates.
- if !entry_records.is_empty() {
- return IllegalSequenceSnafu {
- error: "Found records leftover",
- }
- .fail();
- }
- Ok(true)
+ true
} else {
- Ok(false)
+ false
}
}
#[cfg(test)]
mod tests {
+
+ use std::assert_matches::assert_matches;
+ use std::collections::HashMap;
+
use common_base::readable_size::ReadableSize;
- use rand::seq::IteratorRandom;
-
- use super::*;
- use crate::test_util::kafka::{
- create_topics, entries_with_random_data, new_namespace, EntryBuilder,
- };
-
- // Stores test context for a region.
- struct RegionContext {
- ns: NamespaceImpl,
- entry_builder: EntryBuilder,
- expected: Vec<EntryImpl>,
- flushed_entry_id: EntryId,
+ use common_telemetry::info;
+ use common_telemetry::tracing::warn;
+ use common_wal::config::kafka::DatanodeKafkaConfig;
+ use futures::TryStreamExt;
+ use rand::prelude::SliceRandom;
+ use rand::Rng;
+ use store_api::logstore::entry::{Entry, MultiplePartEntry, MultiplePartHeader, NaiveEntry};
+ use store_api::logstore::provider::Provider;
+ use store_api::logstore::LogStore;
+ use store_api::storage::RegionId;
+
+ use super::build_entry;
+ use crate::kafka::log_store::KafkaLogStore;
+
+ #[test]
+ fn test_build_naive_entry() {
+ let provider = Provider::kafka_provider("my_topic".to_string());
+ let region_id = RegionId::new(1, 1);
+ let entry = build_entry(&mut vec![1; 100], 1, region_id, &provider, 120);
+
+ assert_eq!(
+ entry.into_naive_entry().unwrap(),
+ NaiveEntry {
+ provider,
+ region_id,
+ entry_id: 1,
+ data: vec![1; 100]
+ }
+ )
}
- /// Prepares for a test in that a log store is constructed and a collection of topics is created.
- async fn prepare(
- test_name: &str,
- num_topics: usize,
- broker_endpoints: Vec<String>,
- ) -> (KafkaLogStore, Vec<String>) {
- let topics = create_topics(
- num_topics,
- |i| format!("{test_name}_{}_{}", i, uuid::Uuid::new_v4()),
- &broker_endpoints,
+ #[test]
+ fn test_build_into_multiple_part_entry() {
+ let provider = Provider::kafka_provider("my_topic".to_string());
+ let region_id = RegionId::new(1, 1);
+ let entry = build_entry(&mut vec![1; 100], 1, region_id, &provider, 50);
+
+ assert_eq!(
+ entry.into_multiple_part_entry().unwrap(),
+ MultiplePartEntry {
+ provider: provider.clone(),
+ region_id,
+ entry_id: 1,
+ headers: vec![MultiplePartHeader::First, MultiplePartHeader::Last],
+ parts: vec![vec![1; 50], vec![1; 50]],
+ }
+ );
+
+ let region_id = RegionId::new(1, 1);
+ let entry = build_entry(&mut vec![1; 100], 1, region_id, &provider, 21);
+
+ assert_eq!(
+ entry.into_multiple_part_entry().unwrap(),
+ MultiplePartEntry {
+ provider,
+ region_id,
+ entry_id: 1,
+ headers: vec![
+ MultiplePartHeader::First,
+ MultiplePartHeader::Middle(1),
+ MultiplePartHeader::Middle(2),
+ MultiplePartHeader::Middle(3),
+ MultiplePartHeader::Last
+ ],
+ parts: vec![
+ vec![1; 21],
+ vec![1; 21],
+ vec![1; 21],
+ vec![1; 21],
+ vec![1; 16]
+ ],
+ }
)
- .await;
+ }
+
+ fn generate_entries(
+ logstore: &KafkaLogStore,
+ provider: &Provider,
+ num_entries: usize,
+ region_id: RegionId,
+ data_len: usize,
+ ) -> Vec<Entry> {
+ (0..num_entries)
+ .map(|_| {
+ let mut data: Vec<u8> = (0..data_len).map(|_| rand::random::<u8>()).collect();
+ // Always set `entry_id` to 0, the real entry_id will be set during the read.
+ logstore.entry(&mut data, 0, region_id, provider).unwrap()
+ })
+ .collect()
+ }
+ #[tokio::test]
+ async fn test_append_batch_basic() {
+ common_telemetry::init_default_ut_logging();
+ let Ok(broker_endpoints) = std::env::var("GT_KAFKA_ENDPOINTS") else {
+ warn!("The endpoints is empty, skipping the test 'test_append_batch_basic'");
+ return;
+ };
+ let broker_endpoints = broker_endpoints
+ .split(',')
+ .map(|s| s.trim().to_string())
+ .collect::<Vec<_>>();
let config = DatanodeKafkaConfig {
broker_endpoints,
max_batch_size: ReadableSize::kb(32),
..Default::default()
};
let logstore = KafkaLogStore::try_new(&config).await.unwrap();
+ let topic_name = uuid::Uuid::new_v4().to_string();
+ let provider = Provider::kafka_provider(topic_name);
+ let region_entries = (0..5)
+ .map(|i| {
+ let region_id = RegionId::new(1, i);
+ (
+ region_id,
+ generate_entries(&logstore, &provider, 20, region_id, 1024),
+ )
+ })
+ .collect::<HashMap<RegionId, Vec<_>>>();
- // Appends a no-op record to each topic.
- for topic in topics.iter() {
- let last_entry_id = logstore
- .append(EntryImpl {
- data: vec![],
- id: 0,
- ns: new_namespace(topic, 0),
- })
- .await
- .unwrap()
- .last_entry_id;
- assert_eq!(last_entry_id, 0);
- }
-
- (logstore, topics)
- }
-
- /// Creates a vector containing indexes of all regions if the `all` is true.
- /// Otherwise, creates a subset of the indexes. The cardinality of the subset
- /// is nearly a quarter of that of the universe set.
- fn all_or_subset(all: bool, num_regions: usize) -> Vec<u64> {
- assert!(num_regions > 0);
- let amount = if all {
- num_regions
- } else {
- (num_regions / 4).max(1)
- };
- (0..num_regions as u64).choose_multiple(&mut rand::thread_rng(), amount)
- }
+ let mut all_entries = region_entries
+ .values()
+ .flatten()
+ .cloned()
+ .collect::<Vec<_>>();
+ all_entries.shuffle(&mut rand::thread_rng());
- /// Builds entries for regions specified by `which`. Builds large entries if `large` is true.
- /// Returns the aggregated entries.
- fn build_entries(
- region_contexts: &mut HashMap<u64, RegionContext>,
- which: &[u64],
- large: bool,
- ) -> Vec<EntryImpl> {
- let mut aggregated = Vec::with_capacity(which.len());
- for region_id in which {
- let ctx = region_contexts.get_mut(region_id).unwrap();
- // Builds entries for the region.
- ctx.expected = if !large {
- entries_with_random_data(3, &ctx.entry_builder)
- } else {
- // Builds a large entry of size 256KB which is way greater than the configured `max_batch_size` which is 32KB.
- let large_entry = ctx.entry_builder.with_data([b'1'; 256 * 1024]);
- vec![large_entry]
- };
- // Aggregates entries of all regions.
- aggregated.push(ctx.expected.clone());
+ let response = logstore.append_batch(all_entries.clone()).await.unwrap();
+ // 5 region
+ assert_eq!(response.last_entry_ids.len(), 5);
+ let got_entries = logstore
+ .read(&provider, 0)
+ .await
+ .unwrap()
+ .try_collect::<Vec<_>>()
+ .await
+ .unwrap()
+ .into_iter()
+ .flatten()
+ .collect::<Vec<_>>();
+ for (region_id, _) in region_entries {
+ let expected_entries = all_entries
+ .iter()
+ .filter(|entry| entry.region_id() == region_id)
+ .cloned()
+ .collect::<Vec<_>>();
+ let mut actual_entries = got_entries
+ .iter()
+ .filter(|entry| entry.region_id() == region_id)
+ .cloned()
+ .collect::<Vec<_>>();
+ actual_entries
+ .iter_mut()
+ .for_each(|entry| entry.set_entry_id(0));
+ assert_eq!(expected_entries, actual_entries);
}
- aggregated.into_iter().flatten().collect()
}
- /// Starts a test with:
- /// * `test_name` - The name of the test.
- /// * `num_topics` - Number of topics to be created in the preparation phase.
- /// * `num_regions` - Number of regions involved in the test.
- /// * `num_appends` - Number of append operations to be performed.
- /// * `all` - All regions will be involved in an append operation if `all` is true. Otherwise,
- /// an append operation will only randomly choose a subset of regions.
- /// * `large` - Builds large entries for each region is `large` is true.
- async fn test_with(
- test_name: &str,
- num_topics: usize,
- num_regions: usize,
- num_appends: usize,
- all: bool,
- large: bool,
- ) {
+ #[tokio::test]
+ async fn test_append_batch_basic_large() {
+ common_telemetry::init_default_ut_logging();
let Ok(broker_endpoints) = std::env::var("GT_KAFKA_ENDPOINTS") else {
- warn!("The endpoints is empty, skipping the test {test_name}");
+ warn!("The endpoints is empty, skipping the test 'test_append_batch_basic_large'");
return;
};
+ let data_size_kb = rand::thread_rng().gen_range(9..31usize);
+ info!("Entry size: {}Ki", data_size_kb);
let broker_endpoints = broker_endpoints
.split(',')
.map(|s| s.trim().to_string())
.collect::<Vec<_>>();
-
- let (logstore, topics) = prepare(test_name, num_topics, broker_endpoints).await;
- let mut region_contexts = (0..num_regions)
+ let config = DatanodeKafkaConfig {
+ broker_endpoints,
+ max_batch_size: ReadableSize::kb(8),
+ ..Default::default()
+ };
+ let logstore = KafkaLogStore::try_new(&config).await.unwrap();
+ let topic_name = uuid::Uuid::new_v4().to_string();
+ let provider = Provider::kafka_provider(topic_name);
+ let region_entries = (0..5)
.map(|i| {
- let topic = &topics[i % topics.len()];
- let ns = new_namespace(topic, i as u64);
- let entry_builder = EntryBuilder::new(ns.clone());
+ let region_id = RegionId::new(1, i);
(
- i as u64,
- RegionContext {
- ns,
- entry_builder,
- expected: Vec::new(),
- flushed_entry_id: 0,
- },
+ region_id,
+ generate_entries(&logstore, &provider, 20, region_id, data_size_kb * 1024),
)
})
- .collect();
-
- for _ in 0..num_appends {
- // Appends entries for a subset of regions.
- let which = all_or_subset(all, num_regions);
- let entries = build_entries(&mut region_contexts, &which, large);
- let last_entry_ids = logstore.append_batch(entries).await.unwrap().last_entry_ids;
-
- // Reads entries for regions and checks for each region that the gotten entries are identical with the expected ones.
- for region_id in which {
- let ctx = region_contexts.get_mut(®ion_id).unwrap();
- let stream = logstore
- .read(&ctx.ns, ctx.flushed_entry_id + 1)
- .await
- .unwrap();
- let mut got = stream
- .collect::<Vec<_>>()
- .await
- .into_iter()
- .flat_map(|x| x.unwrap())
- .collect::<Vec<_>>();
- //FIXME(weny): https://github.com/GreptimeTeam/greptimedb/issues/3152
- ctx.expected.iter_mut().for_each(|entry| entry.id = 0);
- got.iter_mut().for_each(|entry| entry.id = 0);
- assert_eq!(ctx.expected, got);
- }
+ .collect::<HashMap<RegionId, Vec<_>>>();
- // Simulates a flush for regions.
- for (region_id, last_entry_id) in last_entry_ids {
- let ctx = region_contexts.get_mut(®ion_id).unwrap();
- ctx.flushed_entry_id = last_entry_id;
- }
+ let mut all_entries = region_entries
+ .values()
+ .flatten()
+ .cloned()
+ .collect::<Vec<_>>();
+ assert_matches!(all_entries[0], Entry::MultiplePart(_));
+ all_entries.shuffle(&mut rand::thread_rng());
+
+ let response = logstore.append_batch(all_entries.clone()).await.unwrap();
+ // 5 region
+ assert_eq!(response.last_entry_ids.len(), 5);
+ let got_entries = logstore
+ .read(&provider, 0)
+ .await
+ .unwrap()
+ .try_collect::<Vec<_>>()
+ .await
+ .unwrap()
+ .into_iter()
+ .flatten()
+ .collect::<Vec<_>>();
+ for (region_id, _) in region_entries {
+ let expected_entries = all_entries
+ .iter()
+ .filter(|entry| entry.region_id() == region_id)
+ .cloned()
+ .collect::<Vec<_>>();
+ let mut actual_entries = got_entries
+ .iter()
+ .filter(|entry| entry.region_id() == region_id)
+ .cloned()
+ .collect::<Vec<_>>();
+ actual_entries
+ .iter_mut()
+ .for_each(|entry| entry.set_entry_id(0));
+ assert_eq!(expected_entries, actual_entries);
}
}
-
- /// Appends entries for one region and checks all entries can be read successfully.
- #[tokio::test]
- async fn test_one_region() {
- test_with("test_one_region", 1, 1, 1, true, false).await;
- }
-
- /// Appends entries for multiple regions and checks entries for each region can be read successfully.
- /// A topic is assigned only a single region.
- #[tokio::test]
- async fn test_multi_regions_disjoint() {
- test_with("test_multi_regions_disjoint", 5, 5, 1, true, false).await;
- }
-
- /// Appends entries for multiple regions and checks entries for each region can be read successfully.
- /// A topic is assigned multiple regions.
- #[tokio::test]
- async fn test_multi_regions_overlapped() {
- test_with("test_multi_regions_overlapped", 5, 20, 1, true, false).await;
- }
-
- /// Appends entries for multiple regions and checks entries for each region can be read successfully.
- /// A topic may be assigned multiple regions. The append operation repeats for a several iterations.
- /// Each append operation will only append entries for a subset of randomly chosen regions.
- #[tokio::test]
- async fn test_multi_appends() {
- test_with("test_multi_appends", 5, 20, 3, false, false).await;
- }
-
- /// Appends large entries for multiple regions and checks entries for each region can be read successfully.
- /// A topic may be assigned multiple regions.
- #[tokio::test]
- async fn test_append_large_entries() {
- test_with("test_append_large_entries", 5, 20, 3, true, true).await;
- }
}
diff --git a/src/log-store/src/kafka/util/record.rs b/src/log-store/src/kafka/util/record.rs
index e2035318c4c7..fa6f77171645 100644
--- a/src/log-store/src/kafka/util/record.rs
+++ b/src/log-store/src/kafka/util/record.rs
@@ -13,10 +13,14 @@
// limitations under the License.
use std::collections::HashMap;
+use std::sync::Arc;
use rskafka::record::Record as KafkaRecord;
use serde::{Deserialize, Serialize};
use snafu::{ensure, OptionExt, ResultExt};
+use store_api::logstore::entry::{Entry, MultiplePartEntry, MultiplePartHeader, NaiveEntry};
+use store_api::logstore::provider::{KafkaProvider, Provider};
+use store_api::storage::RegionId;
use crate::error::{
DecodeJsonSnafu, EmptyEntriesSnafu, EncodeJsonSnafu, GetClientSnafu, IllegalSequenceSnafu,
@@ -24,7 +28,7 @@ use crate::error::{
};
use crate::kafka::client_manager::ClientManagerRef;
use crate::kafka::util::offset::Offset;
-use crate::kafka::{EntryId, EntryImpl, NamespaceImpl};
+use crate::kafka::{EntryId, NamespaceImpl};
use crate::metrics;
/// The current version of Record.
@@ -32,7 +36,10 @@ pub(crate) const VERSION: u32 = 0;
/// The estimated size in bytes of a serialized RecordMeta.
/// A record is guaranteed to have sizeof(meta) + sizeof(data) <= max_batch_size - ESTIMATED_META_SIZE.
-const ESTIMATED_META_SIZE: usize = 256;
+pub(crate) const ESTIMATED_META_SIZE: usize = 256;
+
+/// The minimum batch size
+pub(crate) const MIN_BATCH_SIZE: usize = 4 * 1024;
/// The type of a record.
///
@@ -110,43 +117,25 @@ impl TryFrom<KafkaRecord> for Record {
}
}
-impl From<Vec<Record>> for EntryImpl {
- fn from(records: Vec<Record>) -> Self {
- let entry_id = records[0].meta.entry_id;
- let ns = records[0].meta.ns.clone();
- let data = records.into_iter().flat_map(|record| record.data).collect();
- EntryImpl {
- data,
- id: entry_id,
- ns,
- }
- }
-}
-
/// Produces a record to a kafka topic.
pub(crate) struct RecordProducer {
- /// The namespace of the entries.
- ns: NamespaceImpl,
+ /// The provide of the entries.
+ provider: Arc<KafkaProvider>,
/// Entries are buffered before being built into a record.
- entries: Vec<EntryImpl>,
+ entries: Vec<Entry>,
}
impl RecordProducer {
/// Creates a new producer for producing entries with the given namespace.
- pub(crate) fn new(ns: NamespaceImpl) -> Self {
+ pub(crate) fn new(provider: Arc<KafkaProvider>) -> Self {
Self {
- ns,
+ provider,
entries: Vec::new(),
}
}
- /// Populates the entry buffer with the given entries.
- pub(crate) fn with_entries(self, entries: Vec<EntryImpl>) -> Self {
- Self { entries, ..self }
- }
-
/// Pushes an entry into the entry buffer.
- pub(crate) fn push(&mut self, entry: EntryImpl) {
+ pub(crate) fn push(&mut self, entry: Entry) {
self.entries.push(entry);
}
@@ -158,11 +147,11 @@ impl RecordProducer {
// Gets the producer in which a record buffer is maintained.
let producer = client_manager
- .get_or_insert(&self.ns.topic)
+ .get_or_insert(&self.provider.topic)
.await
.map_err(|e| {
GetClientSnafu {
- topic: &self.ns.topic,
+ topic: &self.provider.topic,
error: e.to_string(),
}
.build()
@@ -171,10 +160,8 @@ impl RecordProducer {
// Stores the offset of the last successfully produced record.
let mut last_offset = None;
- let max_record_size =
- client_manager.config.max_batch_size.as_bytes() as usize - ESTIMATED_META_SIZE;
for entry in self.entries {
- for record in build_records(entry, max_record_size) {
+ for record in convert_to_records(entry) {
let kafka_record = KafkaRecord::try_from(record)?;
metrics::METRIC_KAFKA_PRODUCE_RECORD_COUNTS.inc();
@@ -187,9 +174,8 @@ impl RecordProducer {
.await
.map(Offset)
.with_context(|_| ProduceRecordSnafu {
- topic: &self.ns.topic,
+ topic: &self.provider.topic,
size: kafka_record.approximate_size(),
- limit: max_record_size,
})?;
last_offset = Some(offset);
}
@@ -199,100 +185,188 @@ impl RecordProducer {
}
}
-fn record_type(seq: usize, num_records: usize) -> RecordType {
- if seq == 0 {
- RecordType::First
- } else if seq == num_records - 1 {
- RecordType::Last
- } else {
- RecordType::Middle(seq)
- }
-}
-
-fn build_records(entry: EntryImpl, max_record_size: usize) -> Vec<Record> {
- if entry.data.len() <= max_record_size {
- let record = Record {
+fn convert_to_records(entry: Entry) -> Vec<Record> {
+ match entry {
+ Entry::Naive(entry) => vec![Record {
meta: RecordMeta {
version: VERSION,
tp: RecordType::Full,
- entry_id: entry.id,
- ns: entry.ns,
+ // TODO(weny): refactor the record meta.
+ entry_id: 0,
+ ns: NamespaceImpl {
+ region_id: entry.region_id.as_u64(),
+ // TODO(weny): refactor the record meta.
+ topic: String::new(),
+ },
},
data: entry.data,
+ }],
+ Entry::MultiplePart(entry) => {
+ let mut entries = Vec::with_capacity(entry.parts.len());
+
+ for (idx, part) in entry.parts.into_iter().enumerate() {
+ let tp = match entry.headers[idx] {
+ MultiplePartHeader::First => RecordType::First,
+ MultiplePartHeader::Middle(i) => RecordType::Middle(i),
+ MultiplePartHeader::Last => RecordType::Last,
+ };
+ entries.push(Record {
+ meta: RecordMeta {
+ version: VERSION,
+ tp,
+ // TODO(weny): refactor the record meta.
+ entry_id: 0,
+ ns: NamespaceImpl {
+ region_id: entry.region_id.as_u64(),
+ topic: String::new(),
+ },
+ },
+ data: part,
+ })
+ }
+ entries
+ }
+ }
+}
+
+fn convert_to_naive_entry(provider: Arc<KafkaProvider>, record: Record) -> Entry {
+ let region_id = RegionId::from_u64(record.meta.ns.region_id);
+
+ Entry::Naive(NaiveEntry {
+ provider: Provider::Kafka(provider),
+ region_id,
+ // TODO(weny): should be the offset in the topic
+ entry_id: record.meta.entry_id,
+ data: record.data,
+ })
+}
+
+fn convert_to_multiple_entry(
+ provider: Arc<KafkaProvider>,
+ region_id: RegionId,
+ records: Vec<Record>,
+) -> Entry {
+ let mut headers = Vec::with_capacity(records.len());
+ let mut parts = Vec::with_capacity(records.len());
+
+ for record in records {
+ let header = match record.meta.tp {
+ RecordType::Full => unreachable!(),
+ RecordType::First => MultiplePartHeader::First,
+ RecordType::Middle(i) => MultiplePartHeader::Middle(i),
+ RecordType::Last => MultiplePartHeader::Last,
};
- return vec![record];
+ headers.push(header);
+ parts.push(record.data);
}
- let chunks = entry.data.chunks(max_record_size);
- let num_chunks = chunks.len();
- chunks
- .enumerate()
- .map(|(i, chunk)| Record {
- meta: RecordMeta {
- version: VERSION,
- tp: record_type(i, num_chunks),
- entry_id: entry.id,
- ns: entry.ns.clone(),
- },
- data: chunk.to_vec(),
- })
- .collect()
+ Entry::MultiplePart(MultiplePartEntry {
+ provider: Provider::Kafka(provider),
+ region_id,
+ // TODO(weny): should be the offset in the topic
+ entry_id: 0,
+ headers,
+ parts,
+ })
}
-pub fn maybe_emit_entry(
+/// Constructs entries from `buffered_records`
+pub fn remaining_entries(
+ provider: &Arc<KafkaProvider>,
+ buffered_records: &mut HashMap<RegionId, Vec<Record>>,
+) -> Option<Vec<Entry>> {
+ if buffered_records.is_empty() {
+ None
+ } else {
+ let mut entries = Vec::with_capacity(buffered_records.len());
+ for (region_id, records) in buffered_records.drain() {
+ entries.push(convert_to_multiple_entry(
+ provider.clone(),
+ region_id,
+ records,
+ ));
+ }
+ Some(entries)
+ }
+}
+
+/// For type of [Entry::Naive] Entry:
+/// - Emits a [RecordType::Full] type record immediately.
+///
+/// For type of [Entry::MultiplePart] Entry:
+/// - Emits a complete or incomplete [Entry] while the next same [RegionId] record arrives.
+///
+/// **Incomplete Entry:**
+/// If the records arrive in the following order, it emits **the incomplete [Entry]** when the next record arrives.
+/// - **[RecordType::First], [RecordType::Middle]**, [RecordType::First]
+/// - **[RecordType::Middle]**, [RecordType::First]
+/// - **[RecordType::Last]**
+pub(crate) fn maybe_emit_entry(
+ provider: &Arc<KafkaProvider>,
record: Record,
- entry_records: &mut HashMap<EntryId, Vec<Record>>,
-) -> Result<Option<EntryImpl>> {
+ buffered_records: &mut HashMap<RegionId, Vec<Record>>,
+) -> Result<Option<Entry>> {
let mut entry = None;
match record.meta.tp {
- RecordType::Full => {
- entry = Some(EntryImpl::from(vec![record]));
- }
+ RecordType::Full => entry = Some(convert_to_naive_entry(provider.clone(), record)),
RecordType::First => {
- ensure!(
- !entry_records.contains_key(&record.meta.entry_id),
- IllegalSequenceSnafu {
- error: "First record must be the first"
- }
- );
- entry_records.insert(record.meta.entry_id, vec![record]);
+ let region_id = record.meta.ns.region_id.into();
+ if let Some(records) = buffered_records.insert(region_id, vec![record]) {
+ // Incomplete entry
+ entry = Some(convert_to_multiple_entry(
+ provider.clone(),
+ region_id,
+ records,
+ ))
+ }
}
RecordType::Middle(seq) => {
- let prefix =
- entry_records
- .get_mut(&record.meta.entry_id)
- .context(IllegalSequenceSnafu {
- error: "Middle record must not be the first",
- })?;
- // Safety: the records are guaranteed not empty if the key exists.
- let last_record = prefix.last().unwrap();
- let legal = match last_record.meta.tp {
- // Legal if this record follows a First record.
- RecordType::First => seq == 1,
- // Legal if this record follows a Middle record just prior to this record.
- RecordType::Middle(last_seq) => last_seq + 1 == seq,
- // Illegal sequence.
- _ => false,
- };
- ensure!(
- legal,
- IllegalSequenceSnafu {
- error: "Illegal prefix for a Middle record"
- }
- );
+ let region_id = record.meta.ns.region_id.into();
+ let records = buffered_records.entry(region_id).or_default();
+
+ // Only validate complete entries.
+ if !records.is_empty() {
+ // Safety: the records are guaranteed not empty if the key exists.
+ let last_record = records.last().unwrap();
+ let legal = match last_record.meta.tp {
+ // Legal if this record follows a First record.
+ RecordType::First => seq == 1,
+ // Legal if this record follows a Middle record just prior to this record.
+ RecordType::Middle(last_seq) => last_seq + 1 == seq,
+ // Illegal sequence.
+ _ => false,
+ };
+ ensure!(
+ legal,
+ IllegalSequenceSnafu {
+ error: format!(
+ "Illegal sequence of a middle record, last record: {:?}, incoming record: {:?}",
+ last_record.meta.tp,
+ record.meta.tp
+ )
+ }
+ );
+ }
- prefix.push(record);
+ records.push(record);
}
RecordType::Last => {
- // There must have a sequence prefix before a Last record is read.
- let mut records =
- entry_records
- .remove(&record.meta.entry_id)
- .context(IllegalSequenceSnafu {
- error: "Missing prefix for a Last record",
- })?;
- records.push(record);
- entry = Some(EntryImpl::from(records));
+ let region_id = record.meta.ns.region_id.into();
+ if let Some(mut records) = buffered_records.remove(®ion_id) {
+ records.push(record);
+ entry = Some(convert_to_multiple_entry(
+ provider.clone(),
+ region_id,
+ records,
+ ))
+ } else {
+ // Incomplete entry
+ entry = Some(convert_to_multiple_entry(
+ provider.clone(),
+ region_id,
+ vec![record],
+ ))
+ }
}
}
Ok(entry)
@@ -300,278 +374,141 @@ pub fn maybe_emit_entry(
#[cfg(test)]
mod tests {
+ use std::assert_matches::assert_matches;
use std::sync::Arc;
- use common_base::readable_size::ReadableSize;
- use common_wal::config::kafka::DatanodeKafkaConfig;
- use common_wal::test_util::run_test_with_kafka_wal;
- use uuid::Uuid;
-
use super::*;
- use crate::kafka::client_manager::ClientManager;
-
- // Implements some utility methods for testing.
- impl Default for Record {
- fn default() -> Self {
- Self {
- meta: RecordMeta {
- version: VERSION,
- tp: RecordType::Full,
- ns: NamespaceImpl {
- region_id: 0,
- topic: "greptimedb_wal_topic".to_string(),
- },
- entry_id: 0,
- },
- data: Vec::new(),
- }
- }
- }
-
- impl Record {
- /// Overrides tp.
- fn with_tp(&self, tp: RecordType) -> Self {
- Self {
- meta: RecordMeta {
- tp,
- ..self.meta.clone()
- },
- ..self.clone()
- }
- }
-
- /// Overrides data with the given data.
- fn with_data(&self, data: &[u8]) -> Self {
- Self {
- data: data.to_vec(),
- ..self.clone()
- }
- }
-
- /// Overrides entry id.
- fn with_entry_id(&self, entry_id: EntryId) -> Self {
- Self {
- meta: RecordMeta {
- entry_id,
- ..self.meta.clone()
- },
- ..self.clone()
- }
- }
-
- /// Overrides namespace.
- fn with_ns(&self, ns: NamespaceImpl) -> Self {
- Self {
- meta: RecordMeta { ns, ..self.meta },
- ..self.clone()
- }
- }
- }
-
- fn new_test_entry<D: AsRef<[u8]>>(data: D, entry_id: EntryId, ns: NamespaceImpl) -> EntryImpl {
- EntryImpl {
- data: data.as_ref().to_vec(),
- id: entry_id,
- ns,
- }
- }
-
- /// Tests that the `build_records` works as expected.
- #[test]
- fn test_build_records() {
- let max_record_size = 128;
-
- // On a small entry.
- let ns = NamespaceImpl {
- region_id: 1,
- topic: "greptimedb_wal_topic".to_string(),
- };
- let entry = new_test_entry([b'1'; 100], 0, ns.clone());
- let records = build_records(entry.clone(), max_record_size);
- assert!(records.len() == 1);
- assert_eq!(entry.data, records[0].data);
-
- // On a large entry.
- let entry = new_test_entry([b'1'; 150], 0, ns.clone());
- let records = build_records(entry.clone(), max_record_size);
- assert!(records.len() == 2);
- assert_eq!(&records[0].data, &[b'1'; 128]);
- assert_eq!(&records[1].data, &[b'1'; 22]);
-
- // On a way-too large entry.
- let entry = new_test_entry([b'1'; 5000], 0, ns.clone());
- let records = build_records(entry.clone(), max_record_size);
- let matched = entry
- .data
- .chunks(max_record_size)
- .enumerate()
- .all(|(i, chunk)| records[i].data == chunk);
- assert!(matched);
- }
+ use crate::error;
- /// Tests that Record and KafkaRecord are able to be converted back and forth.
- #[test]
- fn test_record_conversion() {
- let record = Record {
+ fn new_test_record(tp: RecordType, entry_id: EntryId, region_id: u64, data: Vec<u8>) -> Record {
+ Record {
meta: RecordMeta {
version: VERSION,
- tp: RecordType::Full,
- entry_id: 1,
+ tp,
ns: NamespaceImpl {
- region_id: 1,
+ region_id,
topic: "greptimedb_wal_topic".to_string(),
},
+ entry_id,
},
- data: b"12345".to_vec(),
- };
- let kafka_record: KafkaRecord = record.clone().try_into().unwrap();
- let got = Record::try_from(kafka_record).unwrap();
- assert_eq!(record, got);
+ data,
+ }
}
- /// Tests that the reconstruction of an entry works as expected.
#[test]
- fn test_reconstruct_entry() {
- let template = Record::default();
- let records = vec![
- template.with_data(b"111").with_tp(RecordType::First),
- template.with_data(b"222").with_tp(RecordType::Middle(1)),
- template.with_data(b"333").with_tp(RecordType::Last),
- ];
- let entry = EntryImpl::from(records.clone());
- assert_eq!(records[0].meta.entry_id, entry.id);
- assert_eq!(records[0].meta.ns, entry.ns);
+ fn test_maybe_emit_entry_emit_naive_entry() {
+ let provider = Arc::new(KafkaProvider::new("my_topic".to_string()));
+ let region_id = RegionId::new(1, 1);
+ let mut buffer = HashMap::new();
+ let record = new_test_record(RecordType::Full, 1, region_id.as_u64(), vec![1; 100]);
+ let entry = maybe_emit_entry(&provider, record, &mut buffer)
+ .unwrap()
+ .unwrap();
assert_eq!(
- entry.data,
- records
- .into_iter()
- .flat_map(|record| record.data)
- .collect::<Vec<_>>()
+ entry,
+ Entry::Naive(NaiveEntry {
+ provider: Provider::Kafka(provider),
+ region_id,
+ entry_id: 1,
+ data: vec![1; 100]
+ })
);
}
- /// Tests that `maybe_emit_entry` works as expected.
- /// This test does not check for illegal record sequences since they're already tested in the `test_check_records` test.
#[test]
- fn test_maybe_emit_entry() {
- let ns = NamespaceImpl {
- region_id: 1,
- topic: "greptimedb_wal_topic".to_string(),
- };
- let template = Record::default().with_ns(ns);
- let mut entry_records = HashMap::from([
- (
- 1,
- vec![template.with_entry_id(1).with_tp(RecordType::First)],
- ),
- (
- 2,
- vec![template.with_entry_id(2).with_tp(RecordType::First)],
- ),
- (
- 3,
- vec![
- template.with_entry_id(3).with_tp(RecordType::First),
- template.with_entry_id(3).with_tp(RecordType::Middle(1)),
- ],
- ),
- ]);
-
- // A Full record arrives.
- let got = maybe_emit_entry(
- template.with_entry_id(0).with_tp(RecordType::Full),
- &mut entry_records,
- )
- .unwrap();
- assert!(got.is_some());
-
- // A First record arrives with no prefix.
- let got = maybe_emit_entry(
- template.with_entry_id(0).with_tp(RecordType::First),
- &mut entry_records,
- )
- .unwrap();
- assert!(got.is_none());
-
- // A First record arrives with some prefix.
- let got = maybe_emit_entry(
- template.with_entry_id(1).with_tp(RecordType::First),
- &mut entry_records,
- );
- assert!(got.is_err());
-
- // A Middle record arrives with legal prefix (First).
- let got = maybe_emit_entry(
- template.with_entry_id(2).with_tp(RecordType::Middle(1)),
- &mut entry_records,
- )
- .unwrap();
- assert!(got.is_none());
-
- // A Middle record arrives with legal prefix (Middle).
- let got = maybe_emit_entry(
- template.with_entry_id(2).with_tp(RecordType::Middle(2)),
- &mut entry_records,
- )
- .unwrap();
- assert!(got.is_none());
-
- // A Middle record arrives with illegal prefix.
- let got = maybe_emit_entry(
- template.with_entry_id(2).with_tp(RecordType::Middle(1)),
- &mut entry_records,
+ fn test_maybe_emit_entry_emit_incomplete_entry() {
+ let provider = Arc::new(KafkaProvider::new("my_topic".to_string()));
+ let region_id = RegionId::new(1, 1);
+ // `First` overwrite `First`
+ let mut buffer = HashMap::new();
+ let record = new_test_record(RecordType::First, 1, region_id.as_u64(), vec![1; 100]);
+ assert!(maybe_emit_entry(&provider, record, &mut buffer)
+ .unwrap()
+ .is_none());
+ let record = new_test_record(RecordType::First, 2, region_id.as_u64(), vec![2; 100]);
+ let incomplete_entry = maybe_emit_entry(&provider, record, &mut buffer)
+ .unwrap()
+ .unwrap();
+
+ assert_eq!(
+ incomplete_entry,
+ Entry::MultiplePart(MultiplePartEntry {
+ provider: Provider::Kafka(provider.clone()),
+ region_id,
+ // TODO(weny): always be 0.
+ entry_id: 0,
+ headers: vec![MultiplePartHeader::First],
+ parts: vec![vec![1; 100]],
+ })
);
- assert!(got.is_err());
- // A Middle record arrives with no prefix.
- let got = maybe_emit_entry(
- template.with_entry_id(22).with_tp(RecordType::Middle(1)),
- &mut entry_records,
+ // `Last` overwrite `None`
+ let mut buffer = HashMap::new();
+ let record = new_test_record(RecordType::Last, 1, region_id.as_u64(), vec![1; 100]);
+ let incomplete_entry = maybe_emit_entry(&provider, record, &mut buffer)
+ .unwrap()
+ .unwrap();
+
+ assert_eq!(
+ incomplete_entry,
+ Entry::MultiplePart(MultiplePartEntry {
+ provider: Provider::Kafka(provider.clone()),
+ region_id,
+ // TODO(weny): always be 0.
+ entry_id: 0,
+ headers: vec![MultiplePartHeader::Last],
+ parts: vec![vec![1; 100]],
+ })
);
- assert!(got.is_err());
- // A Last record arrives with no prefix.
- let got = maybe_emit_entry(
- template.with_entry_id(33).with_tp(RecordType::Last),
- &mut entry_records,
+ // `First` overwrite `Middle(0)`
+ let mut buffer = HashMap::new();
+ let record = new_test_record(RecordType::Middle(0), 1, region_id.as_u64(), vec![1; 100]);
+ assert!(maybe_emit_entry(&provider, record, &mut buffer)
+ .unwrap()
+ .is_none());
+ let record = new_test_record(RecordType::First, 2, region_id.as_u64(), vec![2; 100]);
+ let incomplete_entry = maybe_emit_entry(&provider, record, &mut buffer)
+ .unwrap()
+ .unwrap();
+
+ assert_eq!(
+ incomplete_entry,
+ Entry::MultiplePart(MultiplePartEntry {
+ provider: Provider::Kafka(provider),
+ region_id,
+ // TODO(weny): always be 0.
+ entry_id: 0,
+ headers: vec![MultiplePartHeader::Middle(0)],
+ parts: vec![vec![1; 100]],
+ })
);
- assert!(got.is_err());
-
- // A Last record arrives with legal prefix.
- let got = maybe_emit_entry(
- template.with_entry_id(3).with_tp(RecordType::Last),
- &mut entry_records,
- )
- .unwrap();
- assert!(got.is_some());
-
- // Check state.
- assert_eq!(entry_records.len(), 3);
- assert_eq!(entry_records[&0].len(), 1);
- assert_eq!(entry_records[&1].len(), 1);
- assert_eq!(entry_records[&2].len(), 3);
}
- #[tokio::test]
- async fn test_produce_large_entry() {
- run_test_with_kafka_wal(|broker_endpoints| {
- Box::pin(async {
- let topic = format!("greptimedb_wal_topic_{}", Uuid::new_v4());
- let ns = NamespaceImpl {
- region_id: 1,
- topic,
- };
- let entry = new_test_entry([b'1'; 2000000], 0, ns.clone());
- let producer = RecordProducer::new(ns.clone()).with_entries(vec![entry]);
- let config = DatanodeKafkaConfig {
- broker_endpoints,
- max_batch_size: ReadableSize::mb(1),
- ..Default::default()
- };
- let manager = Arc::new(ClientManager::try_new(&config).await.unwrap());
- producer.produce(&manager).await.unwrap();
- })
- })
- .await
+ #[test]
+ fn test_maybe_emit_entry_illegal_seq() {
+ let provider = Arc::new(KafkaProvider::new("my_topic".to_string()));
+ let region_id = RegionId::new(1, 1);
+ let mut buffer = HashMap::new();
+ let record = new_test_record(RecordType::First, 1, region_id.as_u64(), vec![1; 100]);
+ assert!(maybe_emit_entry(&provider, record, &mut buffer)
+ .unwrap()
+ .is_none());
+ let record = new_test_record(RecordType::Middle(2), 1, region_id.as_u64(), vec![2; 100]);
+ let err = maybe_emit_entry(&provider, record, &mut buffer).unwrap_err();
+ assert_matches!(err, error::Error::IllegalSequence { .. });
+
+ let mut buffer = HashMap::new();
+ let record = new_test_record(RecordType::First, 1, region_id.as_u64(), vec![1; 100]);
+ assert!(maybe_emit_entry(&provider, record, &mut buffer)
+ .unwrap()
+ .is_none());
+ let record = new_test_record(RecordType::Middle(1), 1, region_id.as_u64(), vec![2; 100]);
+ assert!(maybe_emit_entry(&provider, record, &mut buffer)
+ .unwrap()
+ .is_none());
+ let record = new_test_record(RecordType::Middle(3), 1, region_id.as_u64(), vec![2; 100]);
+ let err = maybe_emit_entry(&provider, record, &mut buffer).unwrap_err();
+ assert_matches!(err, error::Error::IllegalSequence { .. });
}
}
diff --git a/src/log-store/src/lib.rs b/src/log-store/src/lib.rs
index c035e5fcff80..a119aac390c2 100644
--- a/src/log-store/src/lib.rs
+++ b/src/log-store/src/lib.rs
@@ -14,12 +14,10 @@
#![feature(let_chains)]
#![feature(io_error_more)]
+#![feature(assert_matches)]
pub mod error;
pub mod kafka;
pub mod metrics;
-mod noop;
pub mod raft_engine;
pub mod test_util;
-
-pub use noop::NoopLogStore;
diff --git a/src/log-store/src/noop.rs b/src/log-store/src/noop.rs
deleted file mode 100644
index e5ed7fd66bd2..000000000000
--- a/src/log-store/src/noop.rs
+++ /dev/null
@@ -1,153 +0,0 @@
-// Copyright 2023 Greptime Team
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-use common_wal::options::WalOptions;
-use store_api::logstore::entry::{Entry, Id as EntryId, RawEntry};
-use store_api::logstore::namespace::{Id as NamespaceId, Namespace};
-use store_api::logstore::{AppendBatchResponse, AppendResponse, LogStore};
-use store_api::storage::RegionId;
-
-use crate::error::{Error, Result};
-
-/// A noop log store which only for test
-#[derive(Debug, Default)]
-pub struct NoopLogStore;
-
-#[derive(Debug, Default, Clone, PartialEq)]
-pub struct EntryImpl;
-
-#[derive(Debug, Clone, Default, Eq, PartialEq, Hash)]
-pub struct NamespaceImpl;
-
-impl Namespace for NamespaceImpl {
- fn id(&self) -> NamespaceId {
- 0
- }
-}
-
-impl Entry for EntryImpl {
- fn into_raw_entry(self) -> RawEntry {
- RawEntry {
- region_id: self.region_id(),
- entry_id: self.id(),
- data: vec![],
- }
- }
-
- fn data(&self) -> &[u8] {
- &[]
- }
-
- fn id(&self) -> EntryId {
- 0
- }
-
- fn region_id(&self) -> RegionId {
- RegionId::from_u64(0)
- }
-
- fn estimated_size(&self) -> usize {
- 0
- }
-}
-
-#[async_trait::async_trait]
-impl LogStore for NoopLogStore {
- type Error = Error;
- type Namespace = NamespaceImpl;
- type Entry = EntryImpl;
-
- async fn stop(&self) -> Result<()> {
- Ok(())
- }
-
- async fn append(&self, mut _e: Self::Entry) -> Result<AppendResponse> {
- Ok(AppendResponse::default())
- }
-
- async fn append_batch(&self, _e: Vec<Self::Entry>) -> Result<AppendBatchResponse> {
- Ok(AppendBatchResponse::default())
- }
-
- async fn read(
- &self,
- _ns: &Self::Namespace,
- _entry_id: EntryId,
- ) -> Result<store_api::logstore::entry_stream::SendableEntryStream<'_, Self::Entry, Self::Error>>
- {
- Ok(Box::pin(futures::stream::once(futures::future::ready(Ok(
- vec![],
- )))))
- }
-
- async fn create_namespace(&self, _ns: &Self::Namespace) -> Result<()> {
- Ok(())
- }
-
- async fn delete_namespace(&self, _ns: &Self::Namespace) -> Result<()> {
- Ok(())
- }
-
- async fn list_namespaces(&self) -> Result<Vec<Self::Namespace>> {
- Ok(vec![])
- }
-
- fn entry(&self, data: &mut Vec<u8>, entry_id: EntryId, ns: Self::Namespace) -> Self::Entry {
- let _ = data;
- let _ = entry_id;
- let _ = ns;
- EntryImpl
- }
-
- fn namespace(&self, ns_id: NamespaceId, wal_options: &WalOptions) -> Self::Namespace {
- let _ = ns_id;
- let _ = wal_options;
- NamespaceImpl
- }
-
- async fn obsolete(
- &self,
- ns: Self::Namespace,
- entry_id: EntryId,
- ) -> std::result::Result<(), Self::Error> {
- let _ = ns;
- let _ = entry_id;
- Ok(())
- }
-}
-
-#[cfg(test)]
-mod tests {
- use super::*;
-
- #[test]
- fn test_mock_entry() {
- let e = EntryImpl;
- assert_eq!(0, e.data().len());
- assert_eq!(0, e.id());
- }
-
- #[tokio::test]
- async fn test_noop_logstore() {
- let store = NoopLogStore;
- let e = store.entry(&mut vec![], 1, NamespaceImpl);
- let _ = store.append(e.clone()).await.unwrap();
- assert!(store.append_batch(vec![e]).await.is_ok());
- store.create_namespace(&NamespaceImpl).await.unwrap();
- assert_eq!(0, store.list_namespaces().await.unwrap().len());
- store.delete_namespace(&NamespaceImpl).await.unwrap();
- assert_eq!(NamespaceImpl, store.namespace(0, &WalOptions::default()));
- store.obsolete(NamespaceImpl, 1).await.unwrap();
- }
-}
diff --git a/src/log-store/src/raft_engine.rs b/src/log-store/src/raft_engine.rs
index cdb600249caa..86a46bb1a02f 100644
--- a/src/log-store/src/raft_engine.rs
+++ b/src/log-store/src/raft_engine.rs
@@ -12,20 +12,16 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-use std::hash::{Hash, Hasher};
-use std::mem::size_of;
-
-use store_api::logstore::entry::{Entry, Id as EntryId, RawEntry};
-use store_api::logstore::namespace::{Id as NamespaceId, Namespace};
-use store_api::storage::RegionId;
-
-use crate::raft_engine::protos::logstore::{EntryImpl, NamespaceImpl};
+use crate::raft_engine::protos::logstore::EntryImpl;
mod backend;
pub mod log_store;
pub use backend::RaftEngineBackend;
pub use raft_engine::Config;
+use store_api::logstore::entry::{Entry, NaiveEntry};
+use store_api::logstore::provider::Provider;
+use store_api::storage::RegionId;
pub mod protos {
include!(concat!(env!("OUT_DIR"), concat!("/", "protos/", "mod.rs")));
@@ -42,65 +38,20 @@ impl EntryImpl {
}
}
-impl NamespaceImpl {
- pub fn with_id(id: NamespaceId) -> Self {
- Self {
+impl From<EntryImpl> for Entry {
+ fn from(
+ EntryImpl {
id,
- ..Default::default()
- }
- }
-}
-
-#[allow(clippy::derived_hash_with_manual_eq)]
-impl Hash for NamespaceImpl {
- fn hash<H: Hasher>(&self, state: &mut H) {
- self.id.hash(state);
- }
-}
-
-impl Eq for NamespaceImpl {}
-
-impl Namespace for NamespaceImpl {
- fn id(&self) -> NamespaceId {
- self.id
- }
-}
-
-impl Entry for EntryImpl {
- fn into_raw_entry(self) -> RawEntry {
- RawEntry {
- region_id: self.region_id(),
- entry_id: self.id(),
- data: self.data,
- }
- }
-
- fn data(&self) -> &[u8] {
- self.data.as_slice()
- }
-
- fn id(&self) -> EntryId {
- self.id
- }
-
- fn region_id(&self) -> RegionId {
- RegionId::from_u64(self.id)
- }
-
- fn estimated_size(&self) -> usize {
- self.data.len() + size_of::<u64>() + size_of::<u64>()
- }
-}
-
-#[cfg(test)]
-mod tests {
- use store_api::logstore::entry::Entry;
-
- use crate::raft_engine::protos::logstore::EntryImpl;
-
- #[test]
- fn test_estimated_size() {
- let entry = EntryImpl::create(1, 1, b"hello, world".to_vec());
- assert_eq!(28, entry.estimated_size());
+ namespace_id,
+ data,
+ ..
+ }: EntryImpl,
+ ) -> Self {
+ Entry::Naive(NaiveEntry {
+ provider: Provider::raft_engine_provider(namespace_id),
+ region_id: RegionId::from_u64(namespace_id),
+ entry_id: id,
+ data,
+ })
}
}
diff --git a/src/log-store/src/raft_engine/log_store.rs b/src/log-store/src/raft_engine/log_store.rs
index b2070abbf3ec..c9632e6ea341 100644
--- a/src/log-store/src/raft_engine/log_store.rs
+++ b/src/log-store/src/raft_engine/log_store.rs
@@ -12,8 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-use std::collections::hash_map::Entry;
-use std::collections::HashMap;
+use std::collections::{hash_map, HashMap};
use std::fmt::{Debug, Formatter};
use std::sync::atomic::{AtomicI64, Ordering};
use std::sync::Arc;
@@ -22,22 +21,21 @@ use async_stream::stream;
use common_runtime::{RepeatedTask, TaskFunction};
use common_telemetry::{error, info};
use common_wal::config::raft_engine::RaftEngineConfig;
-use common_wal::options::WalOptions;
use raft_engine::{Config, Engine, LogBatch, MessageExt, ReadableSize, RecoveryMode};
-use snafu::{ensure, ResultExt};
-use store_api::logstore::entry::{Entry as EntryTrait, Id as EntryId};
-use store_api::logstore::entry_stream::SendableEntryStream;
-use store_api::logstore::namespace::{Id as NamespaceId, Namespace as NamespaceTrait};
-use store_api::logstore::{AppendBatchResponse, AppendResponse, LogStore};
+use snafu::{ensure, OptionExt, ResultExt};
+use store_api::logstore::entry::{Entry, Id as EntryId, NaiveEntry};
+use store_api::logstore::provider::{Provider, RaftEngineProvider};
+use store_api::logstore::{AppendBatchResponse, LogStore, SendableEntryStream};
+use store_api::storage::RegionId;
use crate::error::{
AddEntryLogBatchSnafu, DiscontinuousLogIndexSnafu, Error, FetchEntrySnafu,
- IllegalNamespaceSnafu, IllegalStateSnafu, OverrideCompactedEntrySnafu, RaftEngineSnafu, Result,
- StartGcTaskSnafu, StopGcTaskSnafu,
+ IllegalNamespaceSnafu, IllegalStateSnafu, InvalidProviderSnafu, OverrideCompactedEntrySnafu,
+ RaftEngineSnafu, Result, StartGcTaskSnafu, StopGcTaskSnafu,
};
use crate::metrics;
use crate::raft_engine::backend::SYSTEM_NAMESPACE;
-use crate::raft_engine::protos::logstore::{EntryImpl, NamespaceImpl as Namespace};
+use crate::raft_engine::protos::logstore::{EntryImpl, NamespaceImpl};
const NAMESPACE_PREFIX: &str = "$sys/";
@@ -117,10 +115,10 @@ impl RaftEngineLogStore {
.context(StartGcTaskSnafu)
}
- fn span(&self, namespace: &<Self as LogStore>::Namespace) -> (Option<u64>, Option<u64>) {
+ fn span(&self, provider: &RaftEngineProvider) -> (Option<u64>, Option<u64>) {
(
- self.engine.first_index(namespace.id()),
- self.engine.last_index(namespace.id()),
+ self.engine.first_index(provider.id),
+ self.engine.last_index(provider.id),
)
}
@@ -129,56 +127,65 @@ impl RaftEngineLogStore {
/// to append in each namespace(region).
fn entries_to_batch(
&self,
- entries: Vec<EntryImpl>,
- ) -> Result<(LogBatch, HashMap<NamespaceId, EntryId>)> {
+ entries: Vec<Entry>,
+ ) -> Result<(LogBatch, HashMap<RegionId, EntryId>)> {
// Records the last entry id for each region's entries.
- let mut entry_ids: HashMap<NamespaceId, EntryId> = HashMap::with_capacity(entries.len());
+ let mut entry_ids: HashMap<RegionId, EntryId> = HashMap::with_capacity(entries.len());
let mut batch = LogBatch::with_capacity(entries.len());
for e in entries {
- let ns_id = e.namespace_id;
- match entry_ids.entry(ns_id) {
- Entry::Occupied(mut o) => {
+ let region_id = e.region_id();
+ let entry_id = e.entry_id();
+ match entry_ids.entry(region_id) {
+ hash_map::Entry::Occupied(mut o) => {
let prev = *o.get();
ensure!(
- e.id == prev + 1,
+ entry_id == prev + 1,
DiscontinuousLogIndexSnafu {
- region_id: ns_id,
+ region_id,
last_index: prev,
- attempt_index: e.id
+ attempt_index: entry_id
}
);
- o.insert(e.id);
+ o.insert(entry_id);
}
- Entry::Vacant(v) => {
+ hash_map::Entry::Vacant(v) => {
// this entry is the first in batch of given region.
- if let Some(first_index) = self.engine.first_index(ns_id) {
+ if let Some(first_index) = self.engine.first_index(region_id.as_u64()) {
// ensure the first in batch does not override compacted entry.
ensure!(
- e.id > first_index,
+ entry_id > first_index,
OverrideCompactedEntrySnafu {
- namespace: ns_id,
+ namespace: region_id,
first_index,
- attempt_index: e.id,
+ attempt_index: entry_id,
}
);
}
// ensure the first in batch does not form a hole in raft-engine.
- if let Some(last_index) = self.engine.last_index(ns_id) {
+ if let Some(last_index) = self.engine.last_index(region_id.as_u64()) {
ensure!(
- e.id == last_index + 1,
+ entry_id == last_index + 1,
DiscontinuousLogIndexSnafu {
- region_id: ns_id,
+ region_id,
last_index,
- attempt_index: e.id
+ attempt_index: entry_id
}
);
}
- v.insert(e.id);
+ v.insert(entry_id);
}
}
batch
- .add_entries::<MessageType>(ns_id, &[e])
+ .add_entries::<MessageType>(
+ region_id.as_u64(),
+ &[EntryImpl {
+ id: entry_id,
+ namespace_id: region_id.as_u64(),
+ data: e.into_bytes(),
+ ..Default::default()
+ }],
+ )
.context(AddEntryLogBatchSnafu)?;
}
@@ -198,62 +205,19 @@ impl Debug for RaftEngineLogStore {
#[async_trait::async_trait]
impl LogStore for RaftEngineLogStore {
type Error = Error;
- type Namespace = Namespace;
- type Entry = EntryImpl;
async fn stop(&self) -> Result<()> {
self.gc_task.stop().await.context(StopGcTaskSnafu)
}
- /// Appends an entry to logstore. Currently the existence of the entry's namespace is not checked.
- async fn append(&self, e: Self::Entry) -> Result<AppendResponse> {
- ensure!(self.started(), IllegalStateSnafu);
- let entry_id = e.id;
- let namespace_id = e.namespace_id;
- let mut batch = LogBatch::with_capacity(1);
- batch
- .add_entries::<MessageType>(namespace_id, &[e])
- .context(AddEntryLogBatchSnafu)?;
-
- if let Some(first_index) = self.engine.first_index(namespace_id) {
- ensure!(
- entry_id > first_index,
- OverrideCompactedEntrySnafu {
- namespace: namespace_id,
- first_index,
- attempt_index: entry_id,
- }
- );
- }
-
- if let Some(last_index) = self.engine.last_index(namespace_id) {
- ensure!(
- entry_id == last_index + 1,
- DiscontinuousLogIndexSnafu {
- region_id: namespace_id,
- last_index,
- attempt_index: entry_id
- }
- );
- }
-
- let _ = self
- .engine
- .write(&mut batch, self.config.sync_write)
- .context(RaftEngineSnafu)?;
- Ok(AppendResponse {
- last_entry_id: entry_id,
- })
- }
-
/// Appends a batch of entries to logstore. `RaftEngineLogStore` assures the atomicity of
/// batch append.
- async fn append_batch(&self, entries: Vec<Self::Entry>) -> Result<AppendBatchResponse> {
+ async fn append_batch(&self, entries: Vec<Entry>) -> Result<AppendBatchResponse> {
metrics::METRIC_RAFT_ENGINE_APPEND_BATCH_CALLS_TOTAL.inc();
metrics::METRIC_RAFT_ENGINE_APPEND_BATCH_BYTES_TOTAL.inc_by(
entries
.iter()
- .map(EntryTrait::estimated_size)
+ .map(|entry| entry.estimated_size())
.sum::<usize>() as u64,
);
let _timer = metrics::METRIC_RAFT_ENGINE_APPEND_BATCH_ELAPSED.start_timer();
@@ -287,40 +251,47 @@ impl LogStore for RaftEngineLogStore {
/// determined by the current "last index" of the namespace.
async fn read(
&self,
- ns: &Self::Namespace,
+ provider: &Provider,
entry_id: EntryId,
- ) -> Result<SendableEntryStream<'_, Self::Entry, Self::Error>> {
+ ) -> Result<SendableEntryStream<'static, Entry, Self::Error>> {
+ let ns = provider
+ .as_raft_engine_provider()
+ .with_context(|| InvalidProviderSnafu {
+ expected: RaftEngineProvider::type_name(),
+ actual: provider.type_name(),
+ })?;
+ let namespace_id = ns.id;
metrics::METRIC_RAFT_ENGINE_READ_CALLS_TOTAL.inc();
let _timer = metrics::METRIC_RAFT_ENGINE_READ_ELAPSED.start_timer();
ensure!(self.started(), IllegalStateSnafu);
let engine = self.engine.clone();
- let last_index = engine.last_index(ns.id()).unwrap_or(0);
- let mut start_index = entry_id.max(engine.first_index(ns.id()).unwrap_or(last_index + 1));
+ let last_index = engine.last_index(namespace_id).unwrap_or(0);
+ let mut start_index =
+ entry_id.max(engine.first_index(namespace_id).unwrap_or(last_index + 1));
info!(
"Read logstore, namespace: {}, start: {}, span: {:?}",
- ns.id(),
+ namespace_id,
entry_id,
self.span(ns)
);
let max_batch_size = self.config.read_batch_size;
let (tx, mut rx) = tokio::sync::mpsc::channel(max_batch_size);
- let ns = ns.clone();
let _handle = common_runtime::spawn_read(async move {
while start_index <= last_index {
let mut vec = Vec::with_capacity(max_batch_size);
match engine
.fetch_entries_to::<MessageType>(
- ns.id,
+ namespace_id,
start_index,
last_index + 1,
Some(max_batch_size),
&mut vec,
)
.context(FetchEntrySnafu {
- ns: ns.id,
+ ns: namespace_id,
start: start_index,
end: last_index,
max_size: max_batch_size,
@@ -344,22 +315,40 @@ impl LogStore for RaftEngineLogStore {
let s = stream!({
while let Some(res) = rx.recv().await {
- yield res;
+ let res = res?;
+
+ yield Ok(res.into_iter().map(Entry::from).collect::<Vec<_>>());
}
});
Ok(Box::pin(s))
}
- async fn create_namespace(&self, ns: &Self::Namespace) -> Result<()> {
+ async fn create_namespace(&self, ns: &Provider) -> Result<()> {
+ let ns = ns
+ .as_raft_engine_provider()
+ .with_context(|| InvalidProviderSnafu {
+ expected: RaftEngineProvider::type_name(),
+ actual: ns.type_name(),
+ })?;
+ let namespace_id = ns.id;
ensure!(
- ns.id != SYSTEM_NAMESPACE,
- IllegalNamespaceSnafu { ns: ns.id }
+ namespace_id != SYSTEM_NAMESPACE,
+ IllegalNamespaceSnafu { ns: namespace_id }
);
ensure!(self.started(), IllegalStateSnafu);
- let key = format!("{}{}", NAMESPACE_PREFIX, ns.id).as_bytes().to_vec();
+ let key = format!("{}{}", NAMESPACE_PREFIX, namespace_id)
+ .as_bytes()
+ .to_vec();
let mut batch = LogBatch::with_capacity(1);
batch
- .put_message::<Namespace>(SYSTEM_NAMESPACE, key, ns)
+ .put_message::<NamespaceImpl>(
+ SYSTEM_NAMESPACE,
+ key,
+ &NamespaceImpl {
+ id: namespace_id,
+ ..Default::default()
+ },
+ )
.context(RaftEngineSnafu)?;
let _ = self
.engine
@@ -368,13 +357,22 @@ impl LogStore for RaftEngineLogStore {
Ok(())
}
- async fn delete_namespace(&self, ns: &Self::Namespace) -> Result<()> {
+ async fn delete_namespace(&self, ns: &Provider) -> Result<()> {
+ let ns = ns
+ .as_raft_engine_provider()
+ .with_context(|| InvalidProviderSnafu {
+ expected: RaftEngineProvider::type_name(),
+ actual: ns.type_name(),
+ })?;
+ let namespace_id = ns.id;
ensure!(
- ns.id != SYSTEM_NAMESPACE,
- IllegalNamespaceSnafu { ns: ns.id }
+ namespace_id != SYSTEM_NAMESPACE,
+ IllegalNamespaceSnafu { ns: namespace_id }
);
ensure!(self.started(), IllegalStateSnafu);
- let key = format!("{}{}", NAMESPACE_PREFIX, ns.id).as_bytes().to_vec();
+ let key = format!("{}{}", NAMESPACE_PREFIX, namespace_id)
+ .as_bytes()
+ .to_vec();
let mut batch = LogBatch::with_capacity(1);
batch.delete(SYSTEM_NAMESPACE, key);
let _ = self
@@ -384,17 +382,17 @@ impl LogStore for RaftEngineLogStore {
Ok(())
}
- async fn list_namespaces(&self) -> Result<Vec<Self::Namespace>> {
+ async fn list_namespaces(&self) -> Result<Vec<Provider>> {
ensure!(self.started(), IllegalStateSnafu);
- let mut namespaces: Vec<Namespace> = vec![];
+ let mut namespaces: Vec<Provider> = vec![];
self.engine
- .scan_messages::<Namespace, _>(
+ .scan_messages::<NamespaceImpl, _>(
SYSTEM_NAMESPACE,
Some(NAMESPACE_PREFIX.as_bytes()),
None,
false,
|_, v| {
- namespaces.push(v);
+ namespaces.push(Provider::RaftEngine(RaftEngineProvider { id: v.id }));
true
},
)
@@ -402,32 +400,41 @@ impl LogStore for RaftEngineLogStore {
Ok(namespaces)
}
- fn entry(&self, data: &mut Vec<u8>, entry_id: EntryId, ns: Self::Namespace) -> Self::Entry {
- EntryImpl {
- id: entry_id,
+ fn entry(
+ &self,
+ data: &mut Vec<u8>,
+ entry_id: EntryId,
+ region_id: RegionId,
+ provider: &Provider,
+ ) -> Result<Entry> {
+ debug_assert_eq!(
+ provider.as_raft_engine_provider().unwrap().id,
+ region_id.as_u64()
+ );
+ Ok(Entry::Naive(NaiveEntry {
+ provider: provider.clone(),
+ region_id,
+ entry_id,
data: std::mem::take(data),
- namespace_id: ns.id(),
- ..Default::default()
- }
+ }))
}
- fn namespace(&self, ns_id: NamespaceId, wal_options: &WalOptions) -> Self::Namespace {
- let _ = wal_options;
- Namespace {
- id: ns_id,
- ..Default::default()
- }
- }
-
- async fn obsolete(&self, ns: Self::Namespace, entry_id: EntryId) -> Result<()> {
+ async fn obsolete(&self, provider: &Provider, entry_id: EntryId) -> Result<()> {
+ let ns = provider
+ .as_raft_engine_provider()
+ .with_context(|| InvalidProviderSnafu {
+ expected: RaftEngineProvider::type_name(),
+ actual: provider.type_name(),
+ })?;
+ let namespace_id = ns.id;
ensure!(self.started(), IllegalStateSnafu);
- let obsoleted = self.engine.compact_to(ns.id(), entry_id + 1);
+ let obsoleted = self.engine.compact_to(namespace_id, entry_id + 1);
info!(
"Namespace {} obsoleted {} entries, compacted index: {}, span: {:?}",
- ns.id(),
+ namespace_id,
obsoleted,
entry_id,
- self.span(&ns)
+ self.span(ns)
);
Ok(())
}
@@ -444,6 +451,19 @@ impl MessageExt for MessageType {
}
}
+#[cfg(test)]
+impl RaftEngineLogStore {
+ /// Appends a batch of entries and returns a response containing a map where the key is a region id
+ /// while the value is the id of the last successfully written entry of the region.
+ async fn append(&self, entry: Entry) -> Result<store_api::logstore::AppendResponse> {
+ let response = self.append_batch(vec![entry]).await?;
+ if let Some((_, last_entry_id)) = response.last_entry_ids.into_iter().next() {
+ return Ok(store_api::logstore::AppendResponse { last_entry_id });
+ }
+ unreachable!()
+ }
+}
+
#[cfg(test)]
mod tests {
use std::collections::HashSet;
@@ -453,14 +473,12 @@ mod tests {
use common_telemetry::debug;
use common_test_util::temp_dir::{create_temp_dir, TempDir};
use futures_util::StreamExt;
- use store_api::logstore::entry_stream::SendableEntryStream;
- use store_api::logstore::namespace::Namespace as NamespaceTrait;
- use store_api::logstore::LogStore;
+ use store_api::logstore::{LogStore, SendableEntryStream};
use super::*;
use crate::error::Error;
use crate::raft_engine::log_store::RaftEngineLogStore;
- use crate::raft_engine::protos::logstore::{EntryImpl as Entry, NamespaceImpl as Namespace};
+ use crate::raft_engine::protos::logstore::EntryImpl;
#[tokio::test]
async fn test_open_logstore() {
@@ -487,15 +505,15 @@ mod tests {
assert!(logstore.list_namespaces().await.unwrap().is_empty());
logstore
- .create_namespace(&Namespace::with_id(42))
+ .create_namespace(&Provider::raft_engine_provider(42))
.await
.unwrap();
let namespaces = logstore.list_namespaces().await.unwrap();
assert_eq!(1, namespaces.len());
- assert_eq!(Namespace::with_id(42), namespaces[0]);
+ assert_eq!(Provider::raft_engine_provider(42), namespaces[0]);
logstore
- .delete_namespace(&Namespace::with_id(42))
+ .delete_namespace(&Provider::raft_engine_provider(42))
.await
.unwrap();
assert!(logstore.list_namespaces().await.unwrap().is_empty());
@@ -511,24 +529,25 @@ mod tests {
.await
.unwrap();
- let namespace = Namespace::with_id(1);
+ let namespace_id = 1;
let cnt = 1024;
for i in 0..cnt {
let response = logstore
- .append(Entry::create(
- i,
- namespace.id,
- i.to_string().as_bytes().to_vec(),
- ))
+ .append(
+ EntryImpl::create(i, namespace_id, i.to_string().as_bytes().to_vec()).into(),
+ )
.await
.unwrap();
assert_eq!(i, response.last_entry_id);
}
let mut entries = HashSet::with_capacity(1024);
- let mut s = logstore.read(&Namespace::with_id(1), 0).await.unwrap();
+ let mut s = logstore
+ .read(&Provider::raft_engine_provider(1), 0)
+ .await
+ .unwrap();
while let Some(r) = s.next().await {
let vec = r.unwrap();
- entries.extend(vec.into_iter().map(|e| e.id));
+ entries.extend(vec.into_iter().map(|e| e.entry_id()));
}
assert_eq!((0..cnt).collect::<HashSet<_>>(), entries);
}
@@ -552,11 +571,11 @@ mod tests {
.await
.unwrap();
assert!(logstore
- .append(Entry::create(1, 1, "1".as_bytes().to_vec()))
+ .append(EntryImpl::create(1, 1, "1".as_bytes().to_vec()).into())
.await
.is_ok());
let entries = logstore
- .read(&Namespace::with_id(1), 1)
+ .read(&Provider::raft_engine_provider(1), 1)
.await
.unwrap()
.collect::<Vec<_>>()
@@ -572,11 +591,16 @@ mod tests {
.await
.unwrap();
- let entries =
- collect_entries(logstore.read(&Namespace::with_id(1), 1).await.unwrap()).await;
+ let entries = collect_entries(
+ logstore
+ .read(&Provider::raft_engine_provider(1), 1)
+ .await
+ .unwrap(),
+ )
+ .await;
assert_eq!(1, entries.len());
- assert_eq!(1, entries[0].id);
- assert_eq!(1, entries[0].namespace_id);
+ assert_eq!(1, entries[0].entry_id());
+ assert_eq!(1, entries[0].region_id().as_u64());
}
async fn wal_dir_usage(path: impl AsRef<str>) -> usize {
@@ -615,14 +639,15 @@ mod tests {
let dir = create_temp_dir("raft-engine-logstore-test");
let logstore = new_test_log_store(&dir).await;
- let namespace = Namespace::with_id(42);
+ let namespace_id = 42;
+ let namespace = Provider::raft_engine_provider(namespace_id);
for id in 0..4096 {
- let entry = Entry::create(id, namespace.id(), [b'x'; 4096].to_vec());
+ let entry = EntryImpl::create(id, namespace_id, [b'x'; 4096].to_vec()).into();
let _ = logstore.append(entry).await.unwrap();
}
let before_purge = wal_dir_usage(dir.path().to_str().unwrap()).await;
- logstore.obsolete(namespace, 4000).await.unwrap();
+ logstore.obsolete(&namespace, 4000).await.unwrap();
tokio::time::sleep(Duration::from_secs(6)).await;
let after_purge = wal_dir_usage(dir.path().to_str().unwrap()).await;
@@ -639,19 +664,20 @@ mod tests {
let dir = create_temp_dir("raft-engine-logstore-test");
let logstore = new_test_log_store(&dir).await;
- let namespace = Namespace::with_id(42);
+ let namespace_id = 42;
+ let namespace = Provider::raft_engine_provider(namespace_id);
for id in 0..1024 {
- let entry = Entry::create(id, namespace.id(), [b'x'; 4096].to_vec());
+ let entry = EntryImpl::create(id, namespace_id, [b'x'; 4096].to_vec()).into();
let _ = logstore.append(entry).await.unwrap();
}
- logstore.obsolete(namespace.clone(), 100).await.unwrap();
- assert_eq!(101, logstore.engine.first_index(namespace.id).unwrap());
+ logstore.obsolete(&namespace, 100).await.unwrap();
+ assert_eq!(101, logstore.engine.first_index(namespace_id).unwrap());
let res = logstore.read(&namespace, 100).await.unwrap();
let mut vec = collect_entries(res).await;
- vec.sort_by(|a, b| a.id.partial_cmp(&b.id).unwrap());
- assert_eq!(101, vec.first().unwrap().id);
+ vec.sort_by(|a, b| a.entry_id().partial_cmp(&b.entry_id()).unwrap());
+ assert_eq!(101, vec.first().unwrap().entry_id());
}
#[tokio::test]
@@ -663,14 +689,14 @@ mod tests {
let entries = (0..8)
.flat_map(|ns_id| {
let data = [ns_id as u8].repeat(4096);
- (0..16).map(move |idx| Entry::create(idx, ns_id, data.clone()))
+ (0..16).map(move |idx| EntryImpl::create(idx, ns_id, data.clone()).into())
})
.collect();
logstore.append_batch(entries).await.unwrap();
for ns_id in 0..8 {
- let namespace = Namespace::with_id(ns_id);
- let (first, last) = logstore.span(&namespace);
+ let namespace = &RaftEngineProvider::new(ns_id);
+ let (first, last) = logstore.span(namespace);
assert_eq!(0, first.unwrap());
assert_eq!(15, last.unwrap());
}
@@ -681,19 +707,24 @@ mod tests {
common_telemetry::init_default_ut_logging();
let dir = create_temp_dir("logstore-append-batch-test");
let logstore = new_test_log_store(&dir).await;
-
let entries = vec![
- Entry::create(0, 0, [b'0'; 4096].to_vec()),
- Entry::create(1, 0, [b'0'; 4096].to_vec()),
- Entry::create(0, 1, [b'1'; 4096].to_vec()),
- Entry::create(2, 0, [b'0'; 4096].to_vec()),
- Entry::create(1, 1, [b'1'; 4096].to_vec()),
+ EntryImpl::create(0, 0, [b'0'; 4096].to_vec()).into(),
+ EntryImpl::create(1, 0, [b'0'; 4096].to_vec()).into(),
+ EntryImpl::create(0, 1, [b'1'; 4096].to_vec()).into(),
+ EntryImpl::create(2, 0, [b'0'; 4096].to_vec()).into(),
+ EntryImpl::create(1, 1, [b'1'; 4096].to_vec()).into(),
];
logstore.append_batch(entries).await.unwrap();
- assert_eq!((Some(0), Some(2)), logstore.span(&Namespace::with_id(0)));
- assert_eq!((Some(0), Some(1)), logstore.span(&Namespace::with_id(1)));
+ assert_eq!(
+ (Some(0), Some(2)),
+ logstore.span(&RaftEngineProvider::new(0))
+ );
+ assert_eq!(
+ (Some(0), Some(1)),
+ logstore.span(&RaftEngineProvider::new(1))
+ );
}
#[tokio::test]
@@ -704,21 +735,21 @@ mod tests {
let entries = vec![
// Entry[0] from region 0.
- Entry::create(0, 0, [b'0'; 4096].to_vec()),
+ EntryImpl::create(0, 0, [b'0'; 4096].to_vec()).into(),
// Entry[0] from region 1.
- Entry::create(0, 1, [b'1'; 4096].to_vec()),
+ EntryImpl::create(0, 1, [b'1'; 4096].to_vec()).into(),
// Entry[1] from region 1.
- Entry::create(1, 0, [b'1'; 4096].to_vec()),
+ EntryImpl::create(1, 0, [b'1'; 4096].to_vec()).into(),
// Entry[1] from region 0.
- Entry::create(1, 1, [b'0'; 4096].to_vec()),
+ EntryImpl::create(1, 1, [b'0'; 4096].to_vec()).into(),
// Entry[2] from region 2.
- Entry::create(2, 2, [b'2'; 4096].to_vec()),
+ EntryImpl::create(2, 2, [b'2'; 4096].to_vec()).into(),
];
// Ensure the last entry id returned for each region is the expected one.
let last_entry_ids = logstore.append_batch(entries).await.unwrap().last_entry_ids;
- assert_eq!(last_entry_ids[&0], 1);
- assert_eq!(last_entry_ids[&1], 1);
- assert_eq!(last_entry_ids[&2], 2);
+ assert_eq!(last_entry_ids[&(0.into())], 1);
+ assert_eq!(last_entry_ids[&(1.into())], 1);
+ assert_eq!(last_entry_ids[&(2.into())], 2);
}
}
diff --git a/src/mito2/src/error.rs b/src/mito2/src/error.rs
index 400284fdf124..7d523c4168fe 100644
--- a/src/mito2/src/error.rs
+++ b/src/mito2/src/error.rs
@@ -27,6 +27,7 @@ use datatypes::prelude::ConcreteDataType;
use object_store::ErrorKind;
use prost::{DecodeError, EncodeError};
use snafu::{Location, Snafu};
+use store_api::logstore::provider::Provider;
use store_api::manifest::ManifestVersion;
use store_api::storage::RegionId;
@@ -226,6 +227,14 @@ pub enum Error {
source: datatypes::Error,
},
+ #[snafu(display("Failed to build entry, region_id: {}", region_id))]
+ BuildEntry {
+ region_id: RegionId,
+ #[snafu(implicit)]
+ location: Location,
+ source: BoxedError,
+ },
+
#[snafu(display("Failed to encode WAL entry, region_id: {}", region_id))]
EncodeWal {
region_id: RegionId,
@@ -242,17 +251,9 @@ pub enum Error {
source: BoxedError,
},
- #[snafu(display("Failed to read WAL, region_id: {}", region_id))]
+ #[snafu(display("Failed to read WAL, provider: {}", provider))]
ReadWal {
- region_id: RegionId,
- #[snafu(implicit)]
- location: Location,
- source: BoxedError,
- },
-
- #[snafu(display("Failed to read WAL, topic: {}", topic))]
- ReadKafkaWal {
- topic: String,
+ provider: Provider,
#[snafu(implicit)]
location: Location,
source: BoxedError,
@@ -636,6 +637,13 @@ pub enum Error {
unexpected_entry_id: u64,
},
+ #[snafu(display("Read the corrupted log entry, region_id: {}", region_id))]
+ CorruptedEntry {
+ region_id: RegionId,
+ #[snafu(implicit)]
+ location: Location,
+ },
+
#[snafu(display(
"Failed to upload file, region_id: {}, file_id: {}, file_type: {:?}",
region_id,
@@ -757,7 +765,6 @@ impl ErrorExt for Error {
| ReadParquet { .. }
| WriteWal { .. }
| ReadWal { .. }
- | ReadKafkaWal { .. }
| DeleteWal { .. } => StatusCode::StorageUnavailable,
CompressObject { .. }
| DecompressObject { .. }
@@ -789,8 +796,10 @@ impl ErrorExt for Error {
| WorkerStopped { .. }
| Recv { .. }
| EncodeWal { .. }
- | DecodeWal { .. } => StatusCode::Internal,
+ | DecodeWal { .. }
+ | BuildEntry { .. } => StatusCode::Internal,
OpenRegion { source, .. } => source.status_code(),
+
WriteBuffer { source, .. } => source.status_code(),
WriteGroup { source, .. } => source.status_code(),
FieldTypeMismatch { source, .. } => source.status_code(),
@@ -837,7 +846,9 @@ impl ErrorExt for Error {
Upload { .. } => StatusCode::StorageUnavailable,
BiError { .. } => StatusCode::Internal,
- EncodeMemtable { .. } | ReadDataPart { .. } => StatusCode::Internal,
+ EncodeMemtable { .. } | ReadDataPart { .. } | CorruptedEntry { .. } => {
+ StatusCode::Internal
+ }
ChecksumMismatch { .. } => StatusCode::Unexpected,
RegionStopped { .. } => StatusCode::RegionNotReady,
TimeRangePredicateOverflow { .. } => StatusCode::InvalidArguments,
diff --git a/src/mito2/src/region.rs b/src/mito2/src/region.rs
index c9930d2d04a7..8d776bd36b9f 100644
--- a/src/mito2/src/region.rs
+++ b/src/mito2/src/region.rs
@@ -24,9 +24,9 @@ use std::sync::atomic::{AtomicI64, Ordering};
use std::sync::{Arc, RwLock};
use common_telemetry::{error, info, warn};
-use common_wal::options::WalOptions;
use crossbeam_utils::atomic::AtomicCell;
use snafu::{ensure, OptionExt};
+use store_api::logstore::provider::Provider;
use store_api::metadata::RegionMetadataRef;
use store_api::storage::RegionId;
@@ -98,8 +98,8 @@ pub(crate) struct MitoRegion {
pub(crate) manifest_ctx: ManifestContextRef,
/// SST file purger.
pub(crate) file_purger: FilePurgerRef,
- /// Wal options of this region.
- pub(crate) wal_options: WalOptions,
+ /// The provider of log store.
+ pub(crate) provider: Provider,
/// Last flush time in millis.
last_flush_millis: AtomicI64,
/// Provider to get current time.
diff --git a/src/mito2/src/region/opener.rs b/src/mito2/src/region/opener.rs
index 64e333c9c476..8d05063cc3be 100644
--- a/src/mito2/src/region/opener.rs
+++ b/src/mito2/src/region/opener.rs
@@ -24,6 +24,7 @@ use futures::StreamExt;
use object_store::manager::ObjectStoreManagerRef;
use object_store::util::{join_dir, normalize_dir};
use snafu::{ensure, OptionExt};
+use store_api::logstore::provider::Provider;
use store_api::logstore::LogStore;
use store_api::metadata::{ColumnMetadata, RegionMetadata};
use store_api::storage::{ColumnId, RegionId};
@@ -162,7 +163,7 @@ impl RegionOpener {
}
}
let options = self.options.take().unwrap();
- let wal_options = options.wal_options.clone();
+ let provider = self.provider(&options.wal_options);
let object_store = self.object_store(&options.storage)?.clone();
// Create a manifest manager for this region and writes regions to the manifest file.
@@ -212,7 +213,7 @@ impl RegionOpener {
access_layer,
self.cache_manager,
)),
- wal_options,
+ provider,
last_flush_millis: AtomicI64::new(time_provider.current_time_millis()),
time_provider,
memtable_builder,
@@ -250,6 +251,13 @@ impl RegionOpener {
Ok(region)
}
+ fn provider(&self, wal_options: &WalOptions) -> Provider {
+ match wal_options {
+ WalOptions::RaftEngine => Provider::raft_engine_provider(self.region_id.as_u64()),
+ WalOptions::Kafka(options) => Provider::kafka_provider(options.topic.to_string()),
+ }
+ }
+
/// Tries to open the region and returns `None` if the region directory is empty.
async fn maybe_open<S: LogStore>(
&self,
@@ -257,7 +265,6 @@ impl RegionOpener {
wal: &Wal<S>,
) -> Result<Option<MitoRegion>> {
let region_options = self.options.as_ref().unwrap().clone();
- let wal_options = region_options.wal_options.clone();
let region_manifest_options = self.manifest_options(config, ®ion_options)?;
let Some(manifest_manager) = RegionManifestManager::open(region_manifest_options).await?
@@ -269,6 +276,7 @@ impl RegionOpener {
let metadata = manifest.metadata.clone();
let region_id = self.region_id;
+ let provider = self.provider(®ion_options.wal_options);
let object_store = self.object_store(®ion_options.storage)?.clone();
debug!("Open region {} with options: {:?}", region_id, self.options);
@@ -313,7 +321,7 @@ impl RegionOpener {
);
replay_memtable(
wal,
- &wal_options,
+ &provider,
region_id,
flushed_entry_id,
&version_control,
@@ -338,7 +346,7 @@ impl RegionOpener {
RegionState::ReadOnly,
)),
file_purger,
- wal_options,
+ provider,
last_flush_millis: AtomicI64::new(time_provider.current_time_millis()),
time_provider,
memtable_builder,
@@ -430,7 +438,7 @@ pub(crate) fn check_recovered_region(
/// Replays the mutations from WAL and inserts mutations to memtable of given region.
pub(crate) async fn replay_memtable<S: LogStore>(
wal: &Wal<S>,
- wal_options: &WalOptions,
+ provider: &Provider,
region_id: RegionId,
flushed_entry_id: EntryId,
version_control: &VersionControlRef,
@@ -442,7 +450,7 @@ pub(crate) async fn replay_memtable<S: LogStore>(
let mut last_entry_id = flushed_entry_id;
let replay_from_entry_id = flushed_entry_id + 1;
- let mut wal_stream = wal.scan(region_id, replay_from_entry_id, wal_options)?;
+ let mut wal_stream = wal.scan(region_id, replay_from_entry_id, provider)?;
while let Some(res) = wal_stream.next().await {
let (entry_id, entry) = res?;
if entry_id <= flushed_entry_id {
@@ -459,7 +467,7 @@ pub(crate) async fn replay_memtable<S: LogStore>(
last_entry_id = last_entry_id.max(entry_id);
let mut region_write_ctx =
- RegionWriteCtx::new(region_id, version_control, wal_options.clone());
+ RegionWriteCtx::new(region_id, version_control, provider.clone());
for mutation in entry.mutations {
rows_replayed += mutation
.rows
@@ -474,8 +482,9 @@ pub(crate) async fn replay_memtable<S: LogStore>(
region_write_ctx.write_memtable();
}
- wal.obsolete(region_id, flushed_entry_id, wal_options)
- .await?;
+ // TODO(weny): We need to update `flushed_entry_id` in the region manifest
+ // to avoid reading potentially incomplete entries in the future.
+ wal.obsolete(region_id, flushed_entry_id, provider).await?;
info!(
"Replay WAL for region: {}, rows recovered: {}, last entry id: {}",
diff --git a/src/mito2/src/region_write_ctx.rs b/src/mito2/src/region_write_ctx.rs
index 36b1a0fac67b..e86ff77ca2f1 100644
--- a/src/mito2/src/region_write_ctx.rs
+++ b/src/mito2/src/region_write_ctx.rs
@@ -16,8 +16,8 @@ use std::mem;
use std::sync::Arc;
use api::v1::{Mutation, OpType, Rows, WalEntry};
-use common_wal::options::WalOptions;
use snafu::ResultExt;
+use store_api::logstore::provider::Provider;
use store_api::logstore::LogStore;
use store_api::storage::{RegionId, SequenceNumber};
@@ -86,7 +86,7 @@ pub(crate) struct RegionWriteCtx {
/// out of the context to construct the wal entry when we write to the wal.
wal_entry: WalEntry,
/// Wal options of the region being written to.
- wal_options: WalOptions,
+ provider: Provider,
/// Notifiers to send write results to waiters.
///
/// The i-th notify is for i-th mutation.
@@ -106,7 +106,7 @@ impl RegionWriteCtx {
pub(crate) fn new(
region_id: RegionId,
version_control: &VersionControlRef,
- wal_options: WalOptions,
+ provider: Provider,
) -> RegionWriteCtx {
let VersionControlData {
version,
@@ -122,7 +122,7 @@ impl RegionWriteCtx {
next_sequence: committed_sequence + 1,
next_entry_id: last_entry_id + 1,
wal_entry: WalEntry::default(),
- wal_options,
+ provider,
notifiers: Vec::new(),
failed: false,
put_num: 0,
@@ -163,7 +163,7 @@ impl RegionWriteCtx {
self.region_id,
self.next_entry_id,
&self.wal_entry,
- &self.wal_options,
+ &self.provider,
)?;
self.next_entry_id += 1;
Ok(())
diff --git a/src/mito2/src/wal.rs b/src/mito2/src/wal.rs
index 0b3b8282833c..18feb9620473 100644
--- a/src/mito2/src/wal.rs
+++ b/src/mito2/src/wal.rs
@@ -26,20 +26,18 @@ use std::mem;
use std::sync::Arc;
use api::v1::WalEntry;
-use async_stream::try_stream;
use common_error::ext::BoxedError;
-use common_wal::options::WalOptions;
use futures::stream::BoxStream;
-use futures::StreamExt;
use prost::Message;
use snafu::ResultExt;
use store_api::logstore::entry::Entry;
+use store_api::logstore::provider::Provider;
use store_api::logstore::{AppendBatchResponse, LogStore};
use store_api::storage::RegionId;
-use crate::error::{
- DecodeWalSnafu, DeleteWalSnafu, EncodeWalSnafu, ReadWalSnafu, Result, WriteWalSnafu,
-};
+use crate::error::{BuildEntrySnafu, DeleteWalSnafu, EncodeWalSnafu, Result, WriteWalSnafu};
+use crate::wal::raw_entry_reader::{LogStoreRawEntryReader, RegionRawEntryReader};
+use crate::wal::wal_entry_reader::{LogStoreEntryReader, WalEntryReader};
/// WAL entry id.
pub type EntryId = store_api::logstore::entry::Id;
@@ -60,6 +58,10 @@ impl<S> Wal<S> {
pub fn new(store: Arc<S>) -> Self {
Self { store }
}
+
+ pub fn store(&self) -> &Arc<S> {
+ &self.store
+ }
}
impl<S> Clone for Wal<S> {
@@ -77,7 +79,7 @@ impl<S: LogStore> Wal<S> {
store: self.store.clone(),
entries: Vec::new(),
entry_encode_buf: Vec::new(),
- namespaces: HashMap::new(),
+ providers: HashMap::new(),
}
}
@@ -86,29 +88,19 @@ impl<S: LogStore> Wal<S> {
&'a self,
region_id: RegionId,
start_id: EntryId,
- wal_options: &'a WalOptions,
- ) -> Result<WalEntryStream> {
- let stream = try_stream!({
- let namespace = self.store.namespace(region_id.into(), wal_options);
- let mut stream = self
- .store
- .read(&namespace, start_id)
- .await
- .map_err(BoxedError::new)
- .context(ReadWalSnafu { region_id })?;
-
- while let Some(entries) = stream.next().await {
- let entries = entries
- .map_err(BoxedError::new)
- .context(ReadWalSnafu { region_id })?;
-
- for entry in entries {
- yield decode_entry(region_id, entry)?;
- }
+ namespace: &'a Provider,
+ ) -> Result<WalEntryStream<'a>> {
+ match namespace {
+ Provider::RaftEngine(_) => {
+ LogStoreEntryReader::new(LogStoreRawEntryReader::new(self.store.clone()))
+ .read(namespace, start_id)
}
- });
-
- Ok(Box::pin(stream))
+ Provider::Kafka(_) => LogStoreEntryReader::new(RegionRawEntryReader::new(
+ LogStoreRawEntryReader::new(self.store.clone()),
+ region_id,
+ ))
+ .read(namespace, start_id),
+ }
}
/// Mark entries whose ids `<= last_id` as deleted.
@@ -116,37 +108,26 @@ impl<S: LogStore> Wal<S> {
&self,
region_id: RegionId,
last_id: EntryId,
- wal_options: &WalOptions,
+ provider: &Provider,
) -> Result<()> {
- let namespace = self.store.namespace(region_id.into(), wal_options);
self.store
- .obsolete(namespace, last_id)
+ .obsolete(provider, last_id)
.await
.map_err(BoxedError::new)
.context(DeleteWalSnafu { region_id })
}
}
-/// Decode Wal entry from log store.
-fn decode_entry<E: Entry>(region_id: RegionId, entry: E) -> Result<(EntryId, WalEntry)> {
- let entry_id = entry.id();
- let data = entry.data();
-
- let wal_entry = WalEntry::decode(data).context(DecodeWalSnafu { region_id })?;
-
- Ok((entry_id, wal_entry))
-}
-
/// WAL batch writer.
pub struct WalWriter<S: LogStore> {
/// Log store of the WAL.
store: Arc<S>,
/// Entries to write.
- entries: Vec<S::Entry>,
+ entries: Vec<Entry>,
/// Buffer to encode WAL entry.
entry_encode_buf: Vec<u8>,
- /// Namespaces of regions being written into.
- namespaces: HashMap<RegionId, S::Namespace>,
+ /// Providers of regions being written into.
+ providers: HashMap<RegionId, Provider>,
}
impl<S: LogStore> WalWriter<S> {
@@ -156,14 +137,13 @@ impl<S: LogStore> WalWriter<S> {
region_id: RegionId,
entry_id: EntryId,
wal_entry: &WalEntry,
- wal_options: &WalOptions,
+ provider: &Provider,
) -> Result<()> {
- // Gets or inserts with a newly built namespace.
- let namespace = self
- .namespaces
+ // Gets or inserts with a newly built provider.
+ let provider = self
+ .providers
.entry(region_id)
- .or_insert_with(|| self.store.namespace(region_id.into(), wal_options))
- .clone();
+ .or_insert_with(|| provider.clone());
// Encode wal entry to log store entry.
self.entry_encode_buf.clear();
@@ -172,7 +152,9 @@ impl<S: LogStore> WalWriter<S> {
.context(EncodeWalSnafu { region_id })?;
let entry = self
.store
- .entry(&mut self.entry_encode_buf, entry_id, namespace);
+ .entry(&mut self.entry_encode_buf, entry_id, region_id, provider)
+ .map_err(BoxedError::new)
+ .context(BuildEntrySnafu { region_id })?;
self.entries.push(entry);
@@ -272,7 +254,6 @@ mod tests {
async fn test_write_wal() {
let env = WalEnv::new().await;
let wal = env.new_wal();
- let wal_options = WalOptions::default();
let entry = WalEntry {
mutations: vec![
@@ -282,16 +263,34 @@ mod tests {
};
let mut writer = wal.writer();
// Region 1 entry 1.
+ let region_id = RegionId::new(1, 1);
writer
- .add_entry(RegionId::new(1, 1), 1, &entry, &wal_options)
+ .add_entry(
+ region_id,
+ 1,
+ &entry,
+ &Provider::raft_engine_provider(region_id.as_u64()),
+ )
.unwrap();
// Region 2 entry 1.
+ let region_id = RegionId::new(1, 2);
writer
- .add_entry(RegionId::new(1, 2), 1, &entry, &wal_options)
+ .add_entry(
+ region_id,
+ 1,
+ &entry,
+ &Provider::raft_engine_provider(region_id.as_u64()),
+ )
.unwrap();
// Region 1 entry 2.
+ let region_id = RegionId::new(1, 2);
writer
- .add_entry(RegionId::new(1, 1), 2, &entry, &wal_options)
+ .add_entry(
+ region_id,
+ 2,
+ &entry,
+ &Provider::raft_engine_provider(region_id.as_u64()),
+ )
.unwrap();
// Test writing multiple region to wal.
@@ -339,32 +338,33 @@ mod tests {
async fn test_scan_wal() {
let env = WalEnv::new().await;
let wal = env.new_wal();
- let wal_options = WalOptions::default();
let entries = sample_entries();
let (id1, id2) = (RegionId::new(1, 1), RegionId::new(1, 2));
+ let ns1 = Provider::raft_engine_provider(id1.as_u64());
+ let ns2 = Provider::raft_engine_provider(id2.as_u64());
let mut writer = wal.writer();
- writer.add_entry(id1, 1, &entries[0], &wal_options).unwrap();
+ writer.add_entry(id1, 1, &entries[0], &ns1).unwrap();
// Insert one entry into region2. Scan should not return this entry.
- writer.add_entry(id2, 1, &entries[0], &wal_options).unwrap();
- writer.add_entry(id1, 2, &entries[1], &wal_options).unwrap();
- writer.add_entry(id1, 3, &entries[2], &wal_options).unwrap();
- writer.add_entry(id1, 4, &entries[3], &wal_options).unwrap();
+ writer.add_entry(id2, 1, &entries[0], &ns2).unwrap();
+ writer.add_entry(id1, 2, &entries[1], &ns1).unwrap();
+ writer.add_entry(id1, 3, &entries[2], &ns1).unwrap();
+ writer.add_entry(id1, 4, &entries[3], &ns1).unwrap();
writer.write_to_wal().await.unwrap();
// Scan all contents region1
- let stream = wal.scan(id1, 1, &wal_options).unwrap();
+ let stream = wal.scan(id1, 1, &ns1).unwrap();
let actual: Vec<_> = stream.try_collect().await.unwrap();
check_entries(&entries, 1, &actual);
// Scan parts of contents
- let stream = wal.scan(id1, 2, &wal_options).unwrap();
+ let stream = wal.scan(id1, 2, &ns1).unwrap();
let actual: Vec<_> = stream.try_collect().await.unwrap();
check_entries(&entries[1..], 2, &actual);
// Scan out of range
- let stream = wal.scan(id1, 5, &wal_options).unwrap();
+ let stream = wal.scan(id1, 5, &ns1).unwrap();
let actual: Vec<_> = stream.try_collect().await.unwrap();
assert!(actual.is_empty());
}
@@ -373,35 +373,27 @@ mod tests {
async fn test_obsolete_wal() {
let env = WalEnv::new().await;
let wal = env.new_wal();
- let wal_options = WalOptions::default();
let entries = sample_entries();
let mut writer = wal.writer();
let region_id = RegionId::new(1, 1);
- writer
- .add_entry(region_id, 1, &entries[0], &wal_options)
- .unwrap();
- writer
- .add_entry(region_id, 2, &entries[1], &wal_options)
- .unwrap();
- writer
- .add_entry(region_id, 3, &entries[2], &wal_options)
- .unwrap();
+ let ns = Provider::raft_engine_provider(region_id.as_u64());
+ writer.add_entry(region_id, 1, &entries[0], &ns).unwrap();
+ writer.add_entry(region_id, 2, &entries[1], &ns).unwrap();
+ writer.add_entry(region_id, 3, &entries[2], &ns).unwrap();
writer.write_to_wal().await.unwrap();
// Delete 1, 2.
- wal.obsolete(region_id, 2, &wal_options).await.unwrap();
+ wal.obsolete(region_id, 2, &ns).await.unwrap();
// Put 4.
let mut writer = wal.writer();
- writer
- .add_entry(region_id, 4, &entries[3], &wal_options)
- .unwrap();
+ writer.add_entry(region_id, 4, &entries[3], &ns).unwrap();
writer.write_to_wal().await.unwrap();
// Scan all
- let stream = wal.scan(region_id, 1, &wal_options).unwrap();
+ let stream = wal.scan(region_id, 1, &ns).unwrap();
let actual: Vec<_> = stream.try_collect().await.unwrap();
check_entries(&entries[2..], 3, &actual);
}
diff --git a/src/mito2/src/wal/raw_entry_reader.rs b/src/mito2/src/wal/raw_entry_reader.rs
index 57cee5845e50..d8afc7915119 100644
--- a/src/mito2/src/wal/raw_entry_reader.rs
+++ b/src/mito2/src/wal/raw_entry_reader.rs
@@ -20,7 +20,8 @@ use common_wal::options::{KafkaWalOptions, WalOptions};
use futures::stream::BoxStream;
use futures::TryStreamExt;
use snafu::ResultExt;
-use store_api::logstore::entry::{Entry, RawEntry};
+use store_api::logstore::entry::Entry;
+use store_api::logstore::provider::{KafkaProvider, Provider, RaftEngineProvider};
use store_api::logstore::LogStore;
use store_api::storage::RegionId;
use tokio_stream::StreamExt;
@@ -28,38 +29,12 @@ use tokio_stream::StreamExt;
use crate::error::{self, Result};
use crate::wal::EntryId;
-/// A stream that yields [RawEntry].
-pub type RawEntryStream<'a> = BoxStream<'a, Result<RawEntry>>;
+/// A stream that yields [Entry].
+pub type EntryStream<'a> = BoxStream<'a, Result<Entry>>;
-// The namespace of kafka log store
-pub struct KafkaNamespace<'a> {
- topic: &'a str,
-}
-
-// The namespace of raft engine log store
-pub struct RaftEngineNamespace {
- region_id: RegionId,
-}
-
-impl RaftEngineNamespace {
- pub fn new(region_id: RegionId) -> Self {
- Self { region_id }
- }
-}
-
-/// The namespace of [RawEntryReader].
-pub(crate) enum LogStoreNamespace<'a> {
- RaftEngine(RaftEngineNamespace),
- Kafka(KafkaNamespace<'a>),
-}
-
-/// [RawEntryReader] provides the ability to read [RawEntry] from the underlying [LogStore].
+/// [RawEntryReader] provides the ability to read [Entry] from the underlying [LogStore].
pub(crate) trait RawEntryReader: Send + Sync {
- fn read<'a>(
- &'a self,
- ctx: LogStoreNamespace<'a>,
- start_id: EntryId,
- ) -> Result<RawEntryStream<'a>>;
+ fn read(&self, provider: &Provider, start_id: EntryId) -> Result<EntryStream<'static>>;
}
/// Implement the [RawEntryReader] for the [LogStore].
@@ -67,66 +42,35 @@ pub struct LogStoreRawEntryReader<S> {
store: Arc<S>,
}
-impl<S: LogStore> LogStoreRawEntryReader<S> {
+impl<S> LogStoreRawEntryReader<S> {
pub fn new(store: Arc<S>) -> Self {
Self { store }
}
+}
- fn read_region(&self, ns: RaftEngineNamespace, start_id: EntryId) -> Result<RawEntryStream> {
- let region_id = ns.region_id;
- let stream = try_stream!({
- // TODO(weny): refactor the `namespace` method.
- let namespace = self.store.namespace(region_id.into(), &Default::default());
- let mut stream = self
- .store
- .read(&namespace, start_id)
- .await
- .map_err(BoxedError::new)
- .context(error::ReadWalSnafu { region_id })?;
-
- while let Some(entries) = stream.next().await {
- let entries = entries
- .map_err(BoxedError::new)
- .context(error::ReadWalSnafu { region_id })?;
-
- for entry in entries {
- yield entry.into_raw_entry()
- }
- }
- });
-
- Ok(Box::pin(stream))
- }
-
- fn read_topic<'a>(
- &'a self,
- ns: KafkaNamespace<'a>,
- start_id: EntryId,
- ) -> Result<RawEntryStream> {
- let topic = ns.topic;
+impl<S: LogStore> RawEntryReader for LogStoreRawEntryReader<S> {
+ fn read(&self, provider: &Provider, start_id: EntryId) -> Result<EntryStream<'static>> {
+ let store = self.store.clone();
+ let provider = provider.clone();
let stream = try_stream!({
- // TODO(weny): refactor the `namespace` method.
- let namespace = self.store.namespace(
- RegionId::from_u64(0).into(),
- &WalOptions::Kafka(KafkaWalOptions {
- topic: topic.to_string(),
- }),
- );
-
- let mut stream = self
- .store
- .read(&namespace, start_id)
+ let mut stream = store
+ .read(&provider, start_id)
.await
.map_err(BoxedError::new)
- .context(error::ReadKafkaWalSnafu { topic })?;
+ .with_context(|_| error::ReadWalSnafu {
+ provider: provider.clone(),
+ })?;
while let Some(entries) = stream.next().await {
- let entries = entries
- .map_err(BoxedError::new)
- .context(error::ReadKafkaWalSnafu { topic })?;
+ let entries =
+ entries
+ .map_err(BoxedError::new)
+ .with_context(|_| error::ReadWalSnafu {
+ provider: provider.clone(),
+ })?;
for entry in entries {
- yield entry.into_raw_entry()
+ yield entry
}
}
});
@@ -135,53 +79,33 @@ impl<S: LogStore> LogStoreRawEntryReader<S> {
}
}
-impl<S: LogStore> RawEntryReader for LogStoreRawEntryReader<S> {
- fn read<'a>(
- &'a self,
- ctx: LogStoreNamespace<'a>,
- start_id: EntryId,
- ) -> Result<RawEntryStream<'a>> {
- let stream = match ctx {
- LogStoreNamespace::RaftEngine(ns) => self.read_region(ns, start_id)?,
- LogStoreNamespace::Kafka(ns) => self.read_topic(ns, start_id)?,
- };
-
- Ok(Box::pin(stream))
- }
-}
-
-/// A filter implement the [RawEntryReader]
-pub struct RawEntryReaderFilter<R, F> {
+/// A [RawEntryReader] reads [RawEntry] belongs to a specific region.
+pub struct RegionRawEntryReader<R> {
reader: R,
- filter: F,
+ region_id: RegionId,
}
-impl<R, F> RawEntryReaderFilter<R, F>
+impl<R> RegionRawEntryReader<R>
where
R: RawEntryReader,
- F: Fn(&RawEntry) -> bool + Sync + Send,
{
- pub fn new(reader: R, filter: F) -> Self {
- Self { reader, filter }
+ pub fn new(reader: R, region_id: RegionId) -> Self {
+ Self { reader, region_id }
}
}
-impl<R, F> RawEntryReader for RawEntryReaderFilter<R, F>
+impl<R> RawEntryReader for RegionRawEntryReader<R>
where
R: RawEntryReader,
- F: Fn(&RawEntry) -> bool + Sync + Send,
{
- fn read<'a>(
- &'a self,
- ctx: LogStoreNamespace<'a>,
- start_id: EntryId,
- ) -> Result<RawEntryStream<'a>> {
+ fn read(&self, ctx: &Provider, start_id: EntryId) -> Result<EntryStream<'static>> {
let mut stream = self.reader.read(ctx, start_id)?;
- let filter = &(self.filter);
+ let region_id = self.region_id;
+
let stream = try_stream!({
while let Some(entry) = stream.next().await {
let entry = entry?;
- if filter(&entry) {
+ if entry.region_id() == region_id {
yield entry
}
}
@@ -197,11 +121,9 @@ mod tests {
use common_wal::options::WalOptions;
use futures::stream;
- use store_api::logstore::entry::{Entry, RawEntry};
- use store_api::logstore::entry_stream::SendableEntryStream;
- use store_api::logstore::namespace::Namespace;
+ use store_api::logstore::entry::{Entry, NaiveEntry};
use store_api::logstore::{
- AppendBatchResponse, AppendResponse, EntryId, LogStore, NamespaceId,
+ AppendBatchResponse, AppendResponse, EntryId, LogStore, SendableEntryStream,
};
use store_api::storage::RegionId;
@@ -210,93 +132,79 @@ mod tests {
#[derive(Debug)]
struct MockLogStore {
- entries: Vec<RawEntry>,
- }
-
- #[derive(Debug, Eq, PartialEq, Clone, Copy, Default, Hash)]
- struct MockNamespace;
-
- impl Namespace for MockNamespace {
- fn id(&self) -> NamespaceId {
- 0
- }
+ entries: Vec<Entry>,
}
#[async_trait::async_trait]
impl LogStore for MockLogStore {
- type Entry = RawEntry;
type Error = error::Error;
- type Namespace = MockNamespace;
async fn stop(&self) -> Result<(), Self::Error> {
unreachable!()
}
- async fn append(&self, entry: Self::Entry) -> Result<AppendResponse, Self::Error> {
- unreachable!()
- }
-
async fn append_batch(
&self,
- entries: Vec<Self::Entry>,
+ entries: Vec<Entry>,
) -> Result<AppendBatchResponse, Self::Error> {
unreachable!()
}
async fn read(
&self,
- ns: &Self::Namespace,
+ provider: &Provider,
id: EntryId,
- ) -> Result<SendableEntryStream<Self::Entry, Self::Error>, Self::Error> {
+ ) -> Result<SendableEntryStream<'static, Entry, Self::Error>, Self::Error> {
Ok(Box::pin(stream::iter(vec![Ok(self.entries.clone())])))
}
- async fn create_namespace(&self, ns: &Self::Namespace) -> Result<(), Self::Error> {
+ async fn create_namespace(&self, ns: &Provider) -> Result<(), Self::Error> {
unreachable!()
}
- async fn delete_namespace(&self, ns: &Self::Namespace) -> Result<(), Self::Error> {
+ async fn delete_namespace(&self, ns: &Provider) -> Result<(), Self::Error> {
unreachable!()
}
- async fn list_namespaces(&self) -> Result<Vec<Self::Namespace>, Self::Error> {
+ async fn list_namespaces(&self) -> Result<Vec<Provider>, Self::Error> {
unreachable!()
}
async fn obsolete(
&self,
- ns: Self::Namespace,
+ provider: &Provider,
entry_id: EntryId,
) -> Result<(), Self::Error> {
unreachable!()
}
- fn entry(&self, data: &mut Vec<u8>, entry_id: EntryId, ns: Self::Namespace) -> Self::Entry {
+ fn entry(
+ &self,
+ data: &mut Vec<u8>,
+ entry_id: EntryId,
+ region_id: RegionId,
+ provider: &Provider,
+ ) -> Result<Entry, Self::Error> {
unreachable!()
}
-
- fn namespace(&self, _ns_id: NamespaceId, _wal_options: &WalOptions) -> Self::Namespace {
- MockNamespace
- }
}
#[tokio::test]
async fn test_raw_entry_reader() {
- let expected_entries = vec![RawEntry {
+ let provider = Provider::raft_engine_provider(RegionId::new(1024, 1).as_u64());
+ let expected_entries = vec![Entry::Naive(NaiveEntry {
+ provider: provider.clone(),
region_id: RegionId::new(1024, 1),
entry_id: 1,
- data: vec![],
- }];
+ data: vec![1],
+ })];
let store = MockLogStore {
entries: expected_entries.clone(),
};
let reader = LogStoreRawEntryReader::new(Arc::new(store));
let entries = reader
- .read(
- LogStoreNamespace::RaftEngine(RaftEngineNamespace::new(RegionId::new(1024, 1))),
- 0,
- )
+ .read(&provider, 0)
.unwrap()
.try_collect::<Vec<_>>()
.await
@@ -306,37 +214,38 @@ mod tests {
#[tokio::test]
async fn test_raw_entry_reader_filter() {
+ let provider = Provider::raft_engine_provider(RegionId::new(1024, 1).as_u64());
let all_entries = vec![
- RawEntry {
+ Entry::Naive(NaiveEntry {
+ provider: provider.clone(),
region_id: RegionId::new(1024, 1),
entry_id: 1,
data: vec![1],
- },
- RawEntry {
+ }),
+ Entry::Naive(NaiveEntry {
+ provider: provider.clone(),
region_id: RegionId::new(1024, 2),
entry_id: 2,
data: vec![2],
- },
- RawEntry {
+ }),
+ Entry::Naive(NaiveEntry {
+ provider: provider.clone(),
region_id: RegionId::new(1024, 3),
entry_id: 3,
data: vec![3],
- },
+ }),
];
let store = MockLogStore {
entries: all_entries.clone(),
};
let expected_region_id = RegionId::new(1024, 3);
- let reader =
- RawEntryReaderFilter::new(LogStoreRawEntryReader::new(Arc::new(store)), |entry| {
- entry.region_id == expected_region_id
- });
+ let reader = RegionRawEntryReader::new(
+ LogStoreRawEntryReader::new(Arc::new(store)),
+ expected_region_id,
+ );
let entries = reader
- .read(
- LogStoreNamespace::RaftEngine(RaftEngineNamespace::new(RegionId::new(1024, 1))),
- 0,
- )
+ .read(&provider, 0)
.unwrap()
.try_collect::<Vec<_>>()
.await
@@ -344,7 +253,7 @@ mod tests {
assert_eq!(
all_entries
.into_iter()
- .filter(|entry| entry.region_id == expected_region_id)
+ .filter(|entry| entry.region_id() == expected_region_id)
.collect::<Vec<_>>(),
entries
);
diff --git a/src/mito2/src/wal/wal_entry_reader.rs b/src/mito2/src/wal/wal_entry_reader.rs
index 8c3e16122254..82db59540059 100644
--- a/src/mito2/src/wal/wal_entry_reader.rs
+++ b/src/mito2/src/wal/wal_entry_reader.rs
@@ -12,13 +12,183 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+use api::v1::WalEntry;
+use common_telemetry::info;
+use futures::StreamExt;
+use prost::Message;
+use snafu::{ensure, ResultExt};
+use store_api::logstore::entry::Entry;
+use store_api::logstore::provider::Provider;
use store_api::storage::RegionId;
-use crate::error::Result;
-use crate::wal::raw_entry_reader::LogStoreNamespace;
+use crate::error::{CorruptedEntrySnafu, DecodeWalSnafu, Result};
+use crate::wal::raw_entry_reader::RawEntryReader;
use crate::wal::{EntryId, WalEntryStream};
-/// [OneshotWalEntryReader] provides the ability to read and decode entries from the underlying store.
-pub(crate) trait OneshotWalEntryReader: Send + Sync {
- fn read(self, ctx: LogStoreNamespace, start_id: EntryId) -> Result<WalEntryStream>;
+pub(crate) fn decode_raw_entry(raw_entry: Entry) -> Result<(EntryId, WalEntry)> {
+ let entry_id = raw_entry.entry_id();
+ let region_id = raw_entry.region_id();
+ ensure!(raw_entry.is_complete(), CorruptedEntrySnafu { region_id });
+ // TODO(weny): implement the [Buf] for return value, avoid extra memory allocation.
+ let bytes = raw_entry.into_bytes();
+ let wal_entry = WalEntry::decode(bytes.as_slice()).context(DecodeWalSnafu { region_id })?;
+
+ Ok((entry_id, wal_entry))
+}
+
+/// [WalEntryReader] provides the ability to read and decode entries from the underlying store.
+pub(crate) trait WalEntryReader: Send + Sync {
+ fn read(self, ns: &'_ Provider, start_id: EntryId) -> Result<WalEntryStream<'static>>;
+}
+
+/// A Reader reads the [RawEntry] from [RawEntryReader] and decodes [RawEntry] into [WalEntry].
+pub struct LogStoreEntryReader<R> {
+ reader: R,
+}
+
+impl<R> LogStoreEntryReader<R> {
+ pub fn new(reader: R) -> Self {
+ Self { reader }
+ }
+}
+
+impl<R: RawEntryReader> WalEntryReader for LogStoreEntryReader<R> {
+ fn read(self, ns: &'_ Provider, start_id: EntryId) -> Result<WalEntryStream<'static>> {
+ let LogStoreEntryReader { reader } = self;
+ let mut stream = reader.read(ns, start_id)?;
+
+ let stream = async_stream::stream! {
+ let mut buffered_entry = None;
+ while let Some(next_entry) = stream.next().await {
+ match buffered_entry.take() {
+ Some(entry) => {
+ yield decode_raw_entry(entry);
+ buffered_entry = Some(next_entry?);
+ },
+ None => {
+ buffered_entry = Some(next_entry?);
+ }
+ };
+ }
+ if let Some(entry) = buffered_entry {
+ // Ignores tail corrupted data.
+ if entry.is_complete() {
+ yield decode_raw_entry(entry);
+ }
+ }
+ };
+
+ Ok(Box::pin(stream))
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use std::assert_matches::assert_matches;
+
+ use api::v1::{Mutation, OpType, WalEntry};
+ use futures::{stream, TryStreamExt};
+ use prost::Message;
+ use store_api::logstore::entry::{Entry, MultiplePartEntry, MultiplePartHeader};
+ use store_api::logstore::provider::Provider;
+ use store_api::storage::RegionId;
+
+ use crate::error::{self, Result};
+ use crate::wal::raw_entry_reader::{EntryStream, RawEntryReader};
+ use crate::wal::wal_entry_reader::{LogStoreEntryReader, WalEntryReader};
+ use crate::wal::EntryId;
+
+ struct MockRawEntryStream {
+ entries: Vec<Entry>,
+ }
+
+ impl RawEntryReader for MockRawEntryStream {
+ fn read(&self, ns: &Provider, start_id: EntryId) -> Result<EntryStream<'static>> {
+ let entries = self.entries.clone().into_iter().map(Ok);
+
+ Ok(Box::pin(stream::iter(entries)))
+ }
+ }
+
+ #[tokio::test]
+ async fn test_tail_corrupted_stream() {
+ common_telemetry::init_default_ut_logging();
+ let provider = Provider::kafka_provider("my_topic".to_string());
+ let wal_entry = WalEntry {
+ mutations: vec![Mutation {
+ op_type: OpType::Put as i32,
+ sequence: 1u64,
+ rows: None,
+ }],
+ };
+ let encoded_entry = wal_entry.encode_to_vec();
+ let parts = encoded_entry
+ .chunks(encoded_entry.len() / 2)
+ .map(Into::into)
+ .collect::<Vec<_>>();
+ let raw_entry_stream = MockRawEntryStream {
+ entries: vec![
+ Entry::MultiplePart(MultiplePartEntry {
+ provider: provider.clone(),
+ region_id: RegionId::new(1, 1),
+ entry_id: 2,
+ headers: vec![MultiplePartHeader::First, MultiplePartHeader::Last],
+ parts,
+ }),
+ // The tail corrupted data.
+ Entry::MultiplePart(MultiplePartEntry {
+ provider: provider.clone(),
+ region_id: RegionId::new(1, 1),
+ entry_id: 1,
+ headers: vec![MultiplePartHeader::Last],
+ parts: vec![vec![1; 100]],
+ }),
+ ],
+ };
+
+ let reader = LogStoreEntryReader::new(raw_entry_stream);
+ let entries = reader
+ .read(&provider, 0)
+ .unwrap()
+ .try_collect::<Vec<_>>()
+ .await
+ .unwrap()
+ .into_iter()
+ .map(|(_, entry)| entry)
+ .collect::<Vec<_>>();
+
+ assert_eq!(entries, vec![wal_entry]);
+ }
+
+ #[tokio::test]
+ async fn test_corrupted_stream() {
+ let provider = Provider::kafka_provider("my_topic".to_string());
+ let raw_entry_stream = MockRawEntryStream {
+ entries: vec![
+ Entry::MultiplePart(MultiplePartEntry {
+ provider: provider.clone(),
+ region_id: RegionId::new(1, 1),
+ entry_id: 1,
+ headers: vec![MultiplePartHeader::Last],
+ parts: vec![vec![1; 100]],
+ }),
+ Entry::MultiplePart(MultiplePartEntry {
+ provider: provider.clone(),
+ region_id: RegionId::new(1, 1),
+ entry_id: 2,
+ headers: vec![MultiplePartHeader::First],
+ parts: vec![vec![1; 100]],
+ }),
+ ],
+ };
+
+ let reader = LogStoreEntryReader::new(raw_entry_stream);
+ let err = reader
+ .read(&provider, 0)
+ .unwrap()
+ .try_collect::<Vec<_>>()
+ .await
+ .unwrap_err();
+ assert_matches!(err, error::Error::CorruptedEntry { .. });
+ }
}
diff --git a/src/mito2/src/worker/handle_catchup.rs b/src/mito2/src/worker/handle_catchup.rs
index f6d890dc8ff8..595b6ee56635 100644
--- a/src/mito2/src/worker/handle_catchup.rs
+++ b/src/mito2/src/worker/handle_catchup.rs
@@ -75,7 +75,7 @@ impl<S: LogStore> RegionWorkerLoop<S> {
let timer = Instant::now();
let last_entry_id = replay_memtable(
&self.wal,
- ®ion.wal_options,
+ ®ion.provider,
region_id,
flushed_entry_id,
®ion.version_control,
diff --git a/src/mito2/src/worker/handle_flush.rs b/src/mito2/src/worker/handle_flush.rs
index b776f98aaa56..2d1c4b96ca39 100644
--- a/src/mito2/src/worker/handle_flush.rs
+++ b/src/mito2/src/worker/handle_flush.rs
@@ -212,7 +212,7 @@ impl<S: LogStore> RegionWorkerLoop<S> {
);
if let Err(e) = self
.wal
- .obsolete(region_id, request.flushed_entry_id, ®ion.wal_options)
+ .obsolete(region_id, request.flushed_entry_id, ®ion.provider)
.await
{
error!(e; "Failed to write wal, region: {}", region_id);
diff --git a/src/mito2/src/worker/handle_truncate.rs b/src/mito2/src/worker/handle_truncate.rs
index f5598286a563..70aca9f6ace4 100644
--- a/src/mito2/src/worker/handle_truncate.rs
+++ b/src/mito2/src/worker/handle_truncate.rs
@@ -82,7 +82,7 @@ impl<S: LogStore> RegionWorkerLoop<S> {
.obsolete(
region_id,
truncate_result.truncated_entry_id,
- ®ion.wal_options,
+ ®ion.provider,
)
.await
{
diff --git a/src/mito2/src/worker/handle_write.rs b/src/mito2/src/worker/handle_write.rs
index 3614d1be5de2..85ce49f3150f 100644
--- a/src/mito2/src/worker/handle_write.rs
+++ b/src/mito2/src/worker/handle_write.rs
@@ -84,8 +84,7 @@ impl<S: LogStore> RegionWorkerLoop<S> {
for (region_id, region_ctx) in region_ctxs.iter_mut() {
// Safety: the log store implementation ensures that either the `write_to_wal` fails and no
// response is returned or the last entry ids for each region do exist.
- let last_entry_id =
- response.last_entry_ids.get(®ion_id.as_u64()).unwrap();
+ let last_entry_id = response.last_entry_ids.get(region_id).unwrap();
region_ctx.set_next_entry_id(last_entry_id + 1);
}
}
@@ -162,7 +161,7 @@ impl<S> RegionWorkerLoop<S> {
let region_ctx = RegionWriteCtx::new(
region.region_id,
®ion.version_control,
- region.wal_options.clone(),
+ region.provider.clone(),
);
e.insert(region_ctx);
diff --git a/src/store-api/src/logstore.rs b/src/store-api/src/logstore.rs
index 33739ac85fb4..347643982716 100644
--- a/src/store-api/src/logstore.rs
+++ b/src/store-api/src/logstore.rs
@@ -14,68 +14,64 @@
//! LogStore APIs.
+pub mod entry;
+pub mod provider;
+
use std::collections::HashMap;
+use std::pin::Pin;
use common_error::ext::ErrorExt;
-use common_wal::options::WalOptions;
+use entry::Entry;
+use futures::Stream;
-use crate::logstore::entry::Entry;
-pub use crate::logstore::entry::Id as EntryId;
-use crate::logstore::entry_stream::SendableEntryStream;
-pub use crate::logstore::namespace::Id as NamespaceId;
-use crate::logstore::namespace::Namespace;
+pub type SendableEntryStream<'a, I, E> = Pin<Box<dyn Stream<Item = Result<Vec<I>, E>> + Send + 'a>>;
-pub mod entry;
-pub mod entry_stream;
-pub mod namespace;
+pub use crate::logstore::entry::Id as EntryId;
+use crate::logstore::provider::Provider;
+use crate::storage::RegionId;
/// `LogStore` serves as a Write-Ahead-Log for storage engine.
#[async_trait::async_trait]
pub trait LogStore: Send + Sync + 'static + std::fmt::Debug {
type Error: ErrorExt + Send + Sync + 'static;
- type Namespace: Namespace;
- type Entry: Entry;
/// Stops components of the logstore.
async fn stop(&self) -> Result<(), Self::Error>;
- /// Appends an entry to the log store and returns a response containing the id of the append entry.
- async fn append(&self, entry: Self::Entry) -> Result<AppendResponse, Self::Error>;
-
/// Appends a batch of entries and returns a response containing a map where the key is a region id
/// while the value is the id of the last successfully written entry of the region.
- async fn append_batch(
- &self,
- entries: Vec<Self::Entry>,
- ) -> Result<AppendBatchResponse, Self::Error>;
+ async fn append_batch(&self, entries: Vec<Entry>) -> Result<AppendBatchResponse, Self::Error>;
/// Creates a new `EntryStream` to asynchronously generates `Entry` with ids
/// starting from `id`.
async fn read(
&self,
- ns: &Self::Namespace,
+ provider: &Provider,
id: EntryId,
- ) -> Result<SendableEntryStream<Self::Entry, Self::Error>, Self::Error>;
+ ) -> Result<SendableEntryStream<'static, Entry, Self::Error>, Self::Error>;
/// Creates a new `Namespace` from the given ref.
- async fn create_namespace(&self, ns: &Self::Namespace) -> Result<(), Self::Error>;
+ async fn create_namespace(&self, ns: &Provider) -> Result<(), Self::Error>;
/// Deletes an existing `Namespace` specified by the given ref.
- async fn delete_namespace(&self, ns: &Self::Namespace) -> Result<(), Self::Error>;
+ async fn delete_namespace(&self, ns: &Provider) -> Result<(), Self::Error>;
/// Lists all existing namespaces.
- async fn list_namespaces(&self) -> Result<Vec<Self::Namespace>, Self::Error>;
+ async fn list_namespaces(&self) -> Result<Vec<Provider>, Self::Error>;
/// Marks all entries with ids `<=entry_id` of the given `namespace` as obsolete,
/// so that the log store can safely delete those entries. This method does not guarantee
/// that the obsolete entries are deleted immediately.
- async fn obsolete(&self, ns: Self::Namespace, entry_id: EntryId) -> Result<(), Self::Error>;
+ async fn obsolete(&self, provider: &Provider, entry_id: EntryId) -> Result<(), Self::Error>;
/// Makes an entry instance of the associated Entry type
- fn entry(&self, data: &mut Vec<u8>, entry_id: EntryId, ns: Self::Namespace) -> Self::Entry;
-
- /// Makes a namespace instance of the associated Namespace type
- fn namespace(&self, ns_id: NamespaceId, wal_options: &WalOptions) -> Self::Namespace;
+ fn entry(
+ &self,
+ data: &mut Vec<u8>,
+ entry_id: EntryId,
+ region_id: RegionId,
+ provider: &Provider,
+ ) -> Result<Entry, Self::Error>;
}
/// The response of an `append` operation.
@@ -89,5 +85,5 @@ pub struct AppendResponse {
#[derive(Debug, Default)]
pub struct AppendBatchResponse {
/// Key: region id (as u64). Value: the id of the last successfully written entry of the region.
- pub last_entry_ids: HashMap<u64, EntryId>,
+ pub last_entry_ids: HashMap<RegionId, EntryId>,
}
diff --git a/src/store-api/src/logstore/entry.rs b/src/store-api/src/logstore/entry.rs
index 09daa2e1abb9..8b7f838be17a 100644
--- a/src/store-api/src/logstore/entry.rs
+++ b/src/store-api/src/logstore/entry.rs
@@ -12,58 +12,141 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+use std::mem::size_of;
+
+use crate::logstore::provider::Provider;
use crate::storage::RegionId;
/// An entry's id.
/// Different log store implementations may interpret the id to different meanings.
pub type Id = u64;
-/// The raw Wal entry.
+/// The [Entry::Naive] is used in RaftEngineLogStore and KafkaLogStore.
+///
+/// The [Entry::MultiplePart] contains multiple parts of data that split from a large entry, is used in KafkaLogStore,
#[derive(Debug, Clone, PartialEq, Eq)]
-pub struct RawEntry {
+pub enum Entry {
+ Naive(NaiveEntry),
+ MultiplePart(MultiplePartEntry),
+}
+
+impl Entry {
+ /// Into [NaiveEntry] if it's type of [Entry::Naive].
+ pub fn into_naive_entry(self) -> Option<NaiveEntry> {
+ match self {
+ Entry::Naive(entry) => Some(entry),
+ Entry::MultiplePart(_) => None,
+ }
+ }
+
+ /// Into [MultiplePartEntry] if it's type of [Entry::MultiplePart].
+ pub fn into_multiple_part_entry(self) -> Option<MultiplePartEntry> {
+ match self {
+ Entry::Naive(_) => None,
+ Entry::MultiplePart(entry) => Some(entry),
+ }
+ }
+}
+
+#[derive(Debug, Clone, PartialEq, Eq)]
+pub struct NaiveEntry {
+ pub provider: Provider,
pub region_id: RegionId,
pub entry_id: Id,
pub data: Vec<u8>,
}
-impl Entry for RawEntry {
- fn into_raw_entry(self) -> RawEntry {
- self
+impl NaiveEntry {
+ fn estimated_size(&self) -> usize {
+ size_of::<Self>() + self.data.capacity() * size_of::<u8>()
}
+}
- fn data(&self) -> &[u8] {
- &self.data
- }
+#[derive(Debug, Clone, PartialEq, Eq)]
+pub enum MultiplePartHeader {
+ First,
+ Middle(usize),
+ Last,
+}
- fn id(&self) -> Id {
- self.entry_id
- }
+#[derive(Debug, Clone, PartialEq, Eq)]
+pub struct MultiplePartEntry {
+ pub provider: Provider,
+ pub region_id: RegionId,
+ pub entry_id: Id,
+ pub headers: Vec<MultiplePartHeader>,
+ pub parts: Vec<Vec<u8>>,
+}
- fn region_id(&self) -> RegionId {
- self.region_id
+impl MultiplePartEntry {
+ fn is_complete(&self) -> bool {
+ self.headers.contains(&MultiplePartHeader::First)
+ && self.headers.contains(&MultiplePartHeader::Last)
}
fn estimated_size(&self) -> usize {
- std::mem::size_of_val(self)
+ size_of::<Self>()
+ + self
+ .parts
+ .iter()
+ .map(|data| data.capacity() * size_of::<u8>())
+ .sum::<usize>()
+ + self.headers.capacity() * size_of::<MultiplePartHeader>()
}
}
-/// Entry is the minimal data storage unit through which users interact with the log store.
-/// The log store implementation may have larger or smaller data storage unit than an entry.
-pub trait Entry: Send + Sync {
- /// Consumes [Entry] and converts to [RawEntry].
- fn into_raw_entry(self) -> RawEntry;
+impl Entry {
+ /// Returns the [Provider]
+ pub fn provider(&self) -> &Provider {
+ match self {
+ Entry::Naive(entry) => &entry.provider,
+ Entry::MultiplePart(entry) => &entry.provider,
+ }
+ }
- /// Returns the contained data of the entry.
- fn data(&self) -> &[u8];
+ /// Returns the [RegionId]
+ pub fn region_id(&self) -> RegionId {
+ match self {
+ Entry::Naive(entry) => entry.region_id,
+ Entry::MultiplePart(entry) => entry.region_id,
+ }
+ }
- /// Returns the id of the entry.
- /// Usually the namespace id is identical with the region id.
- fn id(&self) -> Id;
+ /// Returns the [Id]
+ pub fn entry_id(&self) -> Id {
+ match self {
+ Entry::Naive(entry) => entry.entry_id,
+ Entry::MultiplePart(entry) => entry.entry_id,
+ }
+ }
- /// Returns the [RegionId]
- fn region_id(&self) -> RegionId;
+ /// Returns the [Id]
+ pub fn set_entry_id(&mut self, id: Id) {
+ match self {
+ Entry::Naive(entry) => entry.entry_id = id,
+ Entry::MultiplePart(entry) => entry.entry_id = id,
+ }
+ }
+
+ /// Returns true if it's a complete entry.
+ pub fn is_complete(&self) -> bool {
+ match self {
+ Entry::Naive(_) => true,
+ Entry::MultiplePart(entry) => entry.is_complete(),
+ }
+ }
- /// Computes the estimated encoded size.
- fn estimated_size(&self) -> usize;
+ pub fn into_bytes(self) -> Vec<u8> {
+ match self {
+ Entry::Naive(entry) => entry.data,
+ Entry::MultiplePart(entry) => entry.parts.concat(),
+ }
+ }
+
+ pub fn estimated_size(&self) -> usize {
+ match self {
+ Entry::Naive(entry) => entry.estimated_size(),
+ Entry::MultiplePart(entry) => entry.estimated_size(),
+ }
+ }
}
diff --git a/src/store-api/src/logstore/entry_stream.rs b/src/store-api/src/logstore/entry_stream.rs
deleted file mode 100644
index 6a5886b0b53f..000000000000
--- a/src/store-api/src/logstore/entry_stream.rs
+++ /dev/null
@@ -1,143 +0,0 @@
-// Copyright 2023 Greptime Team
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-use std::pin::Pin;
-
-use common_error::ext::ErrorExt;
-use futures::Stream;
-
-use crate::logstore::entry::Entry;
-
-pub trait EntryStream: Stream<Item = Result<Vec<Self::Entry>, Self::Error>> {
- type Error: ErrorExt;
- type Entry: Entry;
-
- fn start_id(&self) -> u64;
-}
-
-pub type SendableEntryStream<'a, I, E> = Pin<Box<dyn Stream<Item = Result<Vec<I>, E>> + Send + 'a>>;
-
-#[cfg(test)]
-mod tests {
- use std::any::Any;
- use std::task::{Context, Poll};
-
- use common_error::ext::StackError;
- use futures::StreamExt;
- use snafu::Snafu;
-
- use super::*;
- pub use crate::logstore::entry::Id;
- use crate::logstore::entry::RawEntry;
- use crate::storage::RegionId;
-
- pub struct SimpleEntry {
- /// Binary data of current entry
- data: Vec<u8>,
- }
-
- #[derive(Debug, Snafu)]
- #[snafu(visibility(pub))]
- pub struct Error {}
-
- impl ErrorExt for Error {
- fn as_any(&self) -> &dyn Any {
- self
- }
- }
-
- impl StackError for Error {
- fn debug_fmt(&self, _: usize, _: &mut Vec<String>) {}
-
- fn next(&self) -> Option<&dyn StackError> {
- None
- }
- }
-
- impl Entry for SimpleEntry {
- fn into_raw_entry(self) -> RawEntry {
- RawEntry {
- region_id: RegionId::from_u64(0),
- entry_id: 0,
- data: vec![],
- }
- }
-
- fn data(&self) -> &[u8] {
- &self.data
- }
-
- fn id(&self) -> Id {
- 0u64
- }
-
- fn region_id(&self) -> RegionId {
- RegionId::from_u64(0)
- }
-
- fn estimated_size(&self) -> usize {
- self.data.len()
- }
- }
-
- impl SimpleEntry {
- pub fn new(data: impl AsRef<[u8]>) -> Self {
- let data = data.as_ref().to_vec();
- Self { data }
- }
- }
-
- pub struct EntryStreamImpl<'a> {
- inner: SendableEntryStream<'a, SimpleEntry, Error>,
- start_id: u64,
- }
-
- impl<'a> EntryStream for EntryStreamImpl<'a> {
- type Error = Error;
- type Entry = SimpleEntry;
-
- fn start_id(&self) -> u64 {
- self.start_id
- }
- }
-
- impl Stream for EntryStreamImpl<'_> {
- type Item = Result<Vec<SimpleEntry>, Error>;
-
- fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
- match Pin::new(&mut self.inner).poll_next(cx) {
- Poll::Ready(Some(v)) => Poll::Ready(Some(v)),
- Poll::Ready(None) => Poll::Ready(None),
- Poll::Pending => Poll::Pending,
- }
- }
- }
-
- #[tokio::test]
- pub async fn test_entry_stream() {
- let stream =
- async_stream::stream!(yield Ok(vec![SimpleEntry::new("test_entry".as_bytes())]));
-
- let mut stream_impl = EntryStreamImpl {
- inner: Box::pin(stream),
- start_id: 1234,
- };
-
- if let Some(v) = stream_impl.next().await {
- let vec = v.unwrap();
- assert_eq!(1, vec.len());
- assert_eq!(b"test_entry", vec[0].data());
- }
- }
-}
diff --git a/src/store-api/src/logstore/namespace.rs b/src/store-api/src/logstore/namespace.rs
deleted file mode 100644
index ac1b62e31bd4..000000000000
--- a/src/store-api/src/logstore/namespace.rs
+++ /dev/null
@@ -1,24 +0,0 @@
-// Copyright 2023 Greptime Team
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-use std::hash::Hash;
-
-/// The namespace id.
-/// Usually the namespace id is identical with the region id.
-pub type Id = u64;
-
-pub trait Namespace: Send + Sync + Clone + std::fmt::Debug + Hash + PartialEq + Eq {
- /// Returns the namespace id.
- fn id(&self) -> Id;
-}
diff --git a/src/store-api/src/logstore/provider.rs b/src/store-api/src/logstore/provider.rs
new file mode 100644
index 000000000000..f893a47df54f
--- /dev/null
+++ b/src/store-api/src/logstore/provider.rs
@@ -0,0 +1,110 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::fmt::Display;
+use std::sync::Arc;
+
+use crate::storage::RegionId;
+
+// The Provider of kafka log store
+#[derive(Debug, Clone, PartialEq, Eq)]
+pub struct KafkaProvider {
+ pub topic: String,
+}
+
+impl KafkaProvider {
+ pub fn new(topic: String) -> Self {
+ Self { topic }
+ }
+
+ /// Returns the type name.
+ pub fn type_name() -> &'static str {
+ "KafkaProvider"
+ }
+}
+
+impl Display for KafkaProvider {
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ write!(f, "{}", self.topic)
+ }
+}
+
+// The Provider of raft engine log store
+#[derive(Debug, Clone, Copy, PartialEq, Eq)]
+pub struct RaftEngineProvider {
+ pub id: u64,
+}
+
+impl RaftEngineProvider {
+ pub fn new(id: u64) -> Self {
+ Self { id }
+ }
+
+ /// Returns the type name.
+ pub fn type_name() -> &'static str {
+ "RaftEngineProvider"
+ }
+}
+
+/// The Provider of LogStore
+#[derive(Debug, Clone, PartialEq, Eq)]
+pub enum Provider {
+ RaftEngine(RaftEngineProvider),
+ Kafka(Arc<KafkaProvider>),
+}
+
+impl Display for Provider {
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ match &self {
+ Provider::RaftEngine(provider) => {
+ write!(f, "region: {}", RegionId::from_u64(provider.id))
+ }
+ Provider::Kafka(provider) => write!(f, "topic: {}", provider.topic),
+ }
+ }
+}
+
+impl Provider {
+ pub fn raft_engine_provider(id: u64) -> Provider {
+ Provider::RaftEngine(RaftEngineProvider { id })
+ }
+
+ pub fn kafka_provider(topic: String) -> Provider {
+ Provider::Kafka(Arc::new(KafkaProvider { topic }))
+ }
+
+ /// Returns the type name.
+ pub fn type_name(&self) -> &'static str {
+ match self {
+ Provider::RaftEngine(_) => RaftEngineProvider::type_name(),
+ Provider::Kafka(_) => KafkaProvider::type_name(),
+ }
+ }
+
+ /// Returns the reference of [`RaftEngineProvider`] if it's the type of [`LogStoreProvider::RaftEngine`].
+ pub fn as_raft_engine_provider(&self) -> Option<&RaftEngineProvider> {
+ if let Provider::RaftEngine(ns) = self {
+ return Some(ns);
+ }
+ None
+ }
+
+ /// Returns the reference of [`KafkaProvider`] if it's the type of [`LogStoreProvider::Kafka`].
+ pub fn as_kafka_provider(&self) -> Option<&Arc<KafkaProvider>> {
+ if let Provider::Kafka(ns) = self {
+ return Some(ns);
+ }
+ None
+ }
+}
|
refactor
|
remove associated type `Namespace` and `Entry` in `LogStore` (#4038)
|
b227a7637c52060e226adfdd8d8bf2a7c97f1ba6
|
2023-07-19 09:24:49
|
Ning Sun
|
feat: add timers for promql query (#1994)
| false
|
diff --git a/src/frontend/src/instance.rs b/src/frontend/src/instance.rs
index a9cfc31cad32..6c68a62dfc92 100644
--- a/src/frontend/src/instance.rs
+++ b/src/frontend/src/instance.rs
@@ -580,6 +580,7 @@ impl PrometheusHandler for Instance {
query: &PromQuery,
query_ctx: QueryContextRef,
) -> server_error::Result<Output> {
+ let _timer = timer!(metrics::METRIC_HANDLE_PROMQL_ELAPSED);
let interceptor = self
.plugins
.get::<PromQueryInterceptorRef<server_error::Error>>();
diff --git a/src/frontend/src/metrics.rs b/src/frontend/src/metrics.rs
index 31c5de4a64f7..cbfcab962c59 100644
--- a/src/frontend/src/metrics.rs
+++ b/src/frontend/src/metrics.rs
@@ -13,6 +13,7 @@
// limitations under the License.
pub(crate) const METRIC_HANDLE_SQL_ELAPSED: &str = "frontend.handle_sql_elapsed";
+pub(crate) const METRIC_HANDLE_PROMQL_ELAPSED: &str = "frontend.handle_promql_elapsed";
pub(crate) const METRIC_EXEC_PLAN_ELAPSED: &str = "frontend.exec_plan_elapsed";
pub(crate) const METRIC_HANDLE_SCRIPTS_ELAPSED: &str = "frontend.handle_scripts_elapsed";
pub(crate) const METRIC_RUN_SCRIPT_ELAPSED: &str = "frontend.run_script_elapsed";
diff --git a/src/servers/src/metrics.rs b/src/servers/src/metrics.rs
index cbeaaa9ab881..8e206059d824 100644
--- a/src/servers/src/metrics.rs
+++ b/src/servers/src/metrics.rs
@@ -46,6 +46,16 @@ pub(crate) const METRIC_HTTP_PROM_STORE_READ_ELAPSED: &str = "servers.http_prome
pub(crate) const METRIC_HTTP_OPENTELEMETRY_ELAPSED: &str = "servers.http_otlp_elapsed";
pub(crate) const METRIC_TCP_OPENTSDB_LINE_WRITE_ELAPSED: &str =
"servers.opentsdb_line_write_elapsed";
+pub(crate) const METRIC_HTTP_PROMQL_INSTANT_QUERY_ELAPSED: &str =
+ "servers.http_promql_instant_query_elapsed";
+pub(crate) const METRIC_HTTP_PROMQL_RANGE_QUERY_ELAPSED: &str =
+ "servers.http_promql_range_query_elapsed";
+pub(crate) const METRIC_HTTP_PROMQL_LABEL_QUERY_ELAPSED: &str =
+ "servers.http_promql_label_query_elapsed";
+pub(crate) const METRIC_HTTP_PROMQL_SERIES_QUERY_ELAPSED: &str =
+ "servers.http_promql_series_query_elapsed";
+pub(crate) const METRIC_HTTP_PROMQL_LABEL_VALUE_QUERY_ELAPSED: &str =
+ "servers.http_promql_label_value_query_elapsed";
pub(crate) const METRIC_MYSQL_CONNECTIONS: &str = "servers.mysql_connection_count";
pub(crate) const METRIC_MYSQL_QUERY_TIMER: &str = "servers.mysql_query_elapsed";
diff --git a/src/servers/src/prometheus.rs b/src/servers/src/prometheus.rs
index 293a702a96a7..9c65a243cad2 100644
--- a/src/servers/src/prometheus.rs
+++ b/src/servers/src/prometheus.rs
@@ -26,7 +26,7 @@ use common_error::ext::ErrorExt;
use common_error::status_code::StatusCode;
use common_query::Output;
use common_recordbatch::RecordBatches;
-use common_telemetry::info;
+use common_telemetry::{info, timer};
use common_time::util::{current_time_rfc3339, yesterday_rfc3339};
use datatypes::prelude::ConcreteDataType;
use datatypes::scalars::ScalarVector;
@@ -91,7 +91,7 @@ impl PrometheusServer {
}
pub fn make_app(&self) -> Router {
- // TODO(ruihang): implement format_query, series, values, query_examplars and targets methods
+ // TODO(ruihang): implement format_query, series, values, query_exemplars and targets methods
let router = Router::new()
.route("/query", routing::post(instant_query).get(instant_query))
@@ -428,6 +428,7 @@ pub async fn instant_query(
Query(params): Query<InstantQuery>,
Form(form_params): Form<InstantQuery>,
) -> Json<PrometheusJsonResponse> {
+ let _timer = timer!(crate::metrics::METRIC_HTTP_PROMQL_INSTANT_QUERY_ELAPSED);
// Extract time from query string, or use current server time if not specified.
let time = params
.time
@@ -471,6 +472,7 @@ pub async fn range_query(
Query(params): Query<RangeQuery>,
Form(form_params): Form<RangeQuery>,
) -> Json<PrometheusJsonResponse> {
+ let _timer = timer!(crate::metrics::METRIC_HTTP_PROMQL_RANGE_QUERY_ELAPSED);
let prom_query = PromQuery {
query: params.query.or(form_params.query).unwrap_or_default(),
start: params.start.or(form_params.start).unwrap_or_default(),
@@ -543,6 +545,7 @@ pub async fn labels_query(
Query(params): Query<LabelsQuery>,
Form(form_params): Form<LabelsQuery>,
) -> Json<PrometheusJsonResponse> {
+ let _timer = timer!(crate::metrics::METRIC_HTTP_PROMQL_LABEL_QUERY_ELAPSED);
let mut queries = params.matches.0;
if queries.is_empty() {
queries = form_params.matches.0;
@@ -779,6 +782,7 @@ pub async fn label_values_query(
Path(label_name): Path<String>,
Query(params): Query<LabelValueQuery>,
) -> Json<PrometheusJsonResponse> {
+ let _timer = timer!(crate::metrics::METRIC_HTTP_PROMQL_LABEL_VALUE_QUERY_ELAPSED);
let queries = params.matches.0;
if queries.is_empty() {
return PrometheusJsonResponse::error("Invalid argument", "match[] parameter is required");
@@ -891,6 +895,7 @@ pub async fn series_query(
Query(params): Query<SeriesQuery>,
Form(form_params): Form<SeriesQuery>,
) -> Json<PrometheusJsonResponse> {
+ let _timer = timer!(crate::metrics::METRIC_HTTP_PROMQL_SERIES_QUERY_ELAPSED);
let mut queries: Vec<String> = params.matches.0;
if queries.is_empty() {
queries = form_params.matches.0;
|
feat
|
add timers for promql query (#1994)
|
fa08085119cc97d06dbc8901c1d3a3c2c7ce19f8
|
2024-02-21 13:39:09
|
tison
|
ci: upgrade actions to node20-based version (#3345)
| false
|
diff --git a/.github/actions/build-linux-artifacts/action.yml b/.github/actions/build-linux-artifacts/action.yml
index 03c7239677c2..26eb4c81c09f 100644
--- a/.github/actions/build-linux-artifacts/action.yml
+++ b/.github/actions/build-linux-artifacts/action.yml
@@ -34,7 +34,7 @@ runs:
- name: Upload sqlness logs
if: ${{ failure() && inputs.disable-run-tests == 'false' }} # Only upload logs when the integration tests failed.
- uses: actions/upload-artifact@v3
+ uses: actions/upload-artifact@v4
with:
name: sqlness-logs
path: /tmp/greptime-*.log
diff --git a/.github/actions/build-macos-artifacts/action.yml b/.github/actions/build-macos-artifacts/action.yml
index 6b1cf6b03ba8..0b2d42cebb63 100644
--- a/.github/actions/build-macos-artifacts/action.yml
+++ b/.github/actions/build-macos-artifacts/action.yml
@@ -67,7 +67,7 @@ runs:
- name: Upload sqlness logs
if: ${{ failure() }} # Only upload logs when the integration tests failed.
- uses: actions/upload-artifact@v3
+ uses: actions/upload-artifact@v4
with:
name: sqlness-logs
path: /tmp/greptime-*.log
diff --git a/.github/actions/build-windows-artifacts/action.yml b/.github/actions/build-windows-artifacts/action.yml
index 452bc58c6a0a..122d490de3f8 100644
--- a/.github/actions/build-windows-artifacts/action.yml
+++ b/.github/actions/build-windows-artifacts/action.yml
@@ -62,7 +62,7 @@ runs:
- name: Upload sqlness logs
if: ${{ failure() }} # Only upload logs when the integration tests failed.
- uses: actions/upload-artifact@v3
+ uses: actions/upload-artifact@v4
with:
name: sqlness-logs
path: /tmp/greptime-*.log
diff --git a/.github/workflows/develop.yml b/.github/workflows/develop.yml
index 0abdd391c823..b91f370030f5 100644
--- a/.github/workflows/develop.yml
+++ b/.github/workflows/develop.yml
@@ -138,7 +138,7 @@ jobs:
run: RUST_BACKTRACE=1 ./bins/sqlness-runner -c ./tests/cases --bins-dir ./bins
- name: Upload sqlness logs
if: always()
- uses: actions/upload-artifact@v3
+ uses: actions/upload-artifact@v4
with:
name: sqlness-logs
path: /tmp/greptime-*.log
@@ -168,9 +168,9 @@ jobs:
run: RUST_BACKTRACE=1 ./bins/sqlness-runner -w kafka -k 127.0.0.1:9092 -c ./tests/cases --bins-dir ./bins
- name: Upload sqlness logs
if: always()
- uses: actions/upload-artifact@v3
+ uses: actions/upload-artifact@v4
with:
- name: sqlness-logs
+ name: sqlness-logs-with-kafka-wal
path: /tmp/greptime-*.log
retention-days: 3
@@ -269,7 +269,7 @@ jobs:
GT_KAFKA_ENDPOINTS: 127.0.0.1:9092
UNITTEST_LOG_DIR: "__unittest_logs"
- name: Codecov upload
- uses: codecov/codecov-action@v2
+ uses: codecov/codecov-action@v4
with:
token: ${{ secrets.CODECOV_TOKEN }}
files: ./lcov.info
@@ -283,7 +283,7 @@ jobs:
runs-on: ubuntu-20.04
timeout-minutes: 60
steps:
- - uses: actions/checkout@v3
+ - uses: actions/checkout@v4
- name: Download pre-built binaries
uses: actions/download-artifact@v4
with:
diff --git a/.github/workflows/nightly-ci.yml b/.github/workflows/nightly-ci.yml
index 71842844782c..b7e475a7f6a6 100644
--- a/.github/workflows/nightly-ci.yml
+++ b/.github/workflows/nightly-ci.yml
@@ -45,7 +45,7 @@ jobs:
{"text": "Nightly CI failed for sqlness tests"}
- name: Upload sqlness logs
if: always()
- uses: actions/upload-artifact@v3
+ uses: actions/upload-artifact@v4
with:
name: sqlness-logs
path: /tmp/greptime-*.log
|
ci
|
upgrade actions to node20-based version (#3345)
|
2aa6ac5731e5627e170949adf63a7e617f59de7d
|
2023-11-17 08:28:51
|
Yingwen
|
fix: correct memtable cost (#2762)
| false
|
diff --git a/src/mito2/src/memtable/time_series.rs b/src/mito2/src/memtable/time_series.rs
index 5b6dd7958564..0289d932cd70 100644
--- a/src/mito2/src/memtable/time_series.rs
+++ b/src/mito2/src/memtable/time_series.rs
@@ -410,7 +410,6 @@ impl Iterator for Iter {
self.metrics.total_series += 1;
let mut series = series.write().unwrap();
- let start = Instant::now();
if !self.predicate.is_empty()
&& !prune_primary_key(
&self.codec,
|
fix
|
correct memtable cost (#2762)
|
370ec04a9d95f71e7dd4dd7d833bd6373b6e7db8
|
2023-12-13 15:23:35
|
WU Jingdi
|
fix: use linear interpolation to implement range LINEAR fill strategy (#2903)
| false
|
diff --git a/src/query/src/range_select/plan.rs b/src/query/src/range_select/plan.rs
index bedb3242a976..b687b6d93c65 100644
--- a/src/query/src/range_select/plan.rs
+++ b/src/query/src/range_select/plan.rs
@@ -105,9 +105,12 @@ impl Fill {
}
/// The input `data` contains data on a complete time series.
- /// If the filling strategy is `PREV` or `LINEAR`, caller must be ensured that the incoming `data` is ascending time order.
- pub fn apply_fill_strategy(&self, data: &mut [ScalarValue]) -> DfResult<()> {
+ /// If the filling strategy is `PREV` or `LINEAR`, caller must be ensured that the incoming `ts`&`data` is ascending time order.
+ pub fn apply_fill_strategy(&self, ts: &[i64], data: &mut [ScalarValue]) -> DfResult<()> {
let len = data.len();
+ if *self == Fill::Linear {
+ return Self::fill_linear(ts, data);
+ }
for i in 0..len {
if data[i].is_null() {
match self {
@@ -117,32 +120,101 @@ impl Fill {
data[i] = data[i - 1].clone()
}
}
- Fill::Linear => {
- if 0 < i && i < len - 1 {
- match (&data[i - 1], &data[i + 1]) {
- (ScalarValue::Float64(Some(a)), ScalarValue::Float64(Some(b))) => {
- data[i] = ScalarValue::Float64(Some((a + b) / 2.0));
- }
- (ScalarValue::Float32(Some(a)), ScalarValue::Float32(Some(b))) => {
- data[i] = ScalarValue::Float32(Some((a + b) / 2.0));
- }
- (a, b) => {
- if !a.is_null() && !b.is_null() {
- return Err(DataFusionError::Execution(
- "RangePlan: Apply Fill LINEAR strategy on Non-floating type".to_string()));
- } else {
- continue;
- }
- }
- }
- }
- }
+ // The calculation of linear interpolation is relatively complicated.
+ // `Self::fill_linear` is used to dispose `Fill::Linear`.
+ Fill::Linear => unreachable!(),
Fill::Const(v) => data[i] = v.clone(),
}
}
}
Ok(())
}
+
+ fn fill_linear(ts: &[i64], data: &mut [ScalarValue]) -> DfResult<()> {
+ let not_null_num = data
+ .iter()
+ .fold(0, |acc, x| if x.is_null() { acc } else { acc + 1 });
+ // We need at least two non-empty data points to perform linear interpolation
+ if not_null_num < 2 {
+ return Ok(());
+ }
+ let mut index = 0;
+ let mut head: Option<usize> = None;
+ let mut tail: Option<usize> = None;
+ while index < data.len() {
+ // find null interval [start, end)
+ // start is null, end is not-null
+ let start = data[index..]
+ .iter()
+ .position(ScalarValue::is_null)
+ .unwrap_or(data.len() - index)
+ + index;
+ if start == data.len() {
+ break;
+ }
+ let end = data[start..]
+ .iter()
+ .position(|r| !r.is_null())
+ .unwrap_or(data.len() - start)
+ + start;
+ index = end + 1;
+ // head or tail null dispose later, record start/end first
+ if start == 0 {
+ head = Some(end);
+ } else if end == data.len() {
+ tail = Some(start);
+ } else {
+ linear_interpolation(ts, data, start - 1, end, start, end)?;
+ }
+ }
+ // dispose head null interval
+ if let Some(end) = head {
+ linear_interpolation(ts, data, end, end + 1, 0, end)?;
+ }
+ // dispose tail null interval
+ if let Some(start) = tail {
+ linear_interpolation(ts, data, start - 2, start - 1, start, data.len())?;
+ }
+ Ok(())
+ }
+}
+
+/// use `(ts[i1], data[i1])`, `(ts[i2], data[i2])` as endpoint, linearly interpolates element over the interval `[start, end)`
+fn linear_interpolation(
+ ts: &[i64],
+ data: &mut [ScalarValue],
+ i1: usize,
+ i2: usize,
+ start: usize,
+ end: usize,
+) -> DfResult<()> {
+ let (x0, x1) = (ts[i1] as f64, ts[i2] as f64);
+ let (y0, y1, is_float32) = match (&data[i1], &data[i2]) {
+ (ScalarValue::Float64(Some(y0)), ScalarValue::Float64(Some(y1))) => (*y0, *y1, false),
+ (ScalarValue::Float32(Some(y0)), ScalarValue::Float32(Some(y1))) => {
+ (*y0 as f64, *y1 as f64, true)
+ }
+ _ => {
+ return Err(DataFusionError::Execution(
+ "RangePlan: Apply Fill LINEAR strategy on Non-floating type".to_string(),
+ ));
+ }
+ };
+ // To avoid divide zero error, kind of defensive programming
+ if x1 == x0 {
+ return Err(DataFusionError::Execution(
+ "RangePlan: Linear interpolation using the same coordinate points".to_string(),
+ ));
+ }
+ for i in start..end {
+ let val = y0 + (y1 - y0) / (x1 - x0) * (ts[i] as f64 - x0);
+ data[i] = if is_float32 {
+ ScalarValue::Float32(Some(val as f32))
+ } else {
+ ScalarValue::Float64(Some(val))
+ }
+ }
+ Ok(())
}
#[derive(Eq, Clone, Debug)]
@@ -859,25 +931,16 @@ impl RangeSelectStream {
} in self.series_map.values()
{
// collect data on time series
- if !need_sort_output {
- for (ts, accumulators) in align_ts_accumulator {
- for (i, accumulator) in accumulators.iter().enumerate() {
- all_scalar[i].push(accumulator.evaluate()?);
- }
- ts_builder.append_value(*ts);
- }
- } else {
- let mut keys = align_ts_accumulator.keys().copied().collect::<Vec<_>>();
- keys.sort();
- for key in &keys {
- for (i, accumulator) in
- align_ts_accumulator.get(key).unwrap().iter().enumerate()
- {
- all_scalar[i].push(accumulator.evaluate()?);
- }
+ let mut align_ts = align_ts_accumulator.keys().copied().collect::<Vec<_>>();
+ if need_sort_output {
+ align_ts.sort();
+ }
+ for ts in &align_ts {
+ for (i, accumulator) in align_ts_accumulator.get(ts).unwrap().iter().enumerate() {
+ all_scalar[i].push(accumulator.evaluate()?);
}
- ts_builder.append_slice(&keys);
}
+ ts_builder.append_slice(&align_ts);
// apply fill strategy on time series
for (
i,
@@ -891,7 +954,7 @@ impl RangeSelectStream {
if let Some(data_type) = need_cast {
cast_scalar_values(time_series_data, data_type)?;
}
- fill.apply_fill_strategy(time_series_data)?;
+ fill.apply_fill_strategy(&align_ts, time_series_data)?;
}
by_rows.resize(by_rows.len() + align_ts_accumulator.len(), row.row());
start_index += align_ts_accumulator.len();
@@ -1220,13 +1283,13 @@ mod test {
\n| 1.0 | 1.0 | 1970-01-01T00:00:10 | host1 |\
\n| 1.0 | 1.5 | 1970-01-01T00:00:15 | host1 |\
\n| 2.0 | 2.0 | 1970-01-01T00:00:20 | host1 |\
- \n| 2.0 | | 1970-01-01T00:00:25 | host1 |\
+ \n| 2.0 | 2.5 | 1970-01-01T00:00:25 | host1 |\
\n| 3.0 | 3.0 | 1970-01-01T00:00:00 | host2 |\
\n| 3.0 | 3.5 | 1970-01-01T00:00:05 | host2 |\
\n| 4.0 | 4.0 | 1970-01-01T00:00:10 | host2 |\
\n| 4.0 | 4.5 | 1970-01-01T00:00:15 | host2 |\
\n| 5.0 | 5.0 | 1970-01-01T00:00:20 | host2 |\
- \n| 5.0 | | 1970-01-01T00:00:25 | host2 |\
+ \n| 5.0 | 5.5 | 1970-01-01T00:00:25 | host2 |\
\n+------------+------------+---------------------+-------+",
);
do_range_select_test(10_000, 5_000, 5_000, Fill::Linear, true, expected).await;
@@ -1266,13 +1329,13 @@ mod test {
\n| 1.0 | 1.0 | 1970-01-01T00:00:10 | host1 |\
\n| 1.0 | 1.5 | 1970-01-01T00:00:15 | host1 |\
\n| 2.0 | 2.0 | 1970-01-01T00:00:20 | host1 |\
- \n| 2.0 | | 1970-01-01T00:00:25 | host1 |\
+ \n| 2.0 | 2.5 | 1970-01-01T00:00:25 | host1 |\
\n| 3.0 | 3.0 | 1970-01-01T00:00:00 | host2 |\
\n| 3.0 | 3.5 | 1970-01-01T00:00:05 | host2 |\
\n| 4.0 | 4.0 | 1970-01-01T00:00:10 | host2 |\
\n| 4.0 | 4.5 | 1970-01-01T00:00:15 | host2 |\
\n| 5.0 | 5.0 | 1970-01-01T00:00:20 | host2 |\
- \n| 5.0 | | 1970-01-01T00:00:25 | host2 |\
+ \n| 5.0 | 5.5 | 1970-01-01T00:00:25 | host2 |\
\n+------------+------------+---------------------+-------+",
);
do_range_select_test(10_000, 5_000, 5_000, Fill::Linear, false, expected).await;
@@ -1339,29 +1402,72 @@ mod test {
ScalarValue::UInt8(None),
ScalarValue::UInt8(Some(9)),
];
- Fill::Null.apply_fill_strategy(&mut test1).unwrap();
+ Fill::Null.apply_fill_strategy(&[], &mut test1).unwrap();
assert_eq!(test1[1], ScalarValue::UInt8(None));
- Fill::Prev.apply_fill_strategy(&mut test1).unwrap();
+ Fill::Prev.apply_fill_strategy(&[], &mut test1).unwrap();
assert_eq!(test1[1], ScalarValue::UInt8(Some(8)));
test1[1] = ScalarValue::UInt8(None);
Fill::Const(ScalarValue::UInt8(Some(10)))
- .apply_fill_strategy(&mut test1)
+ .apply_fill_strategy(&[], &mut test1)
.unwrap();
assert_eq!(test1[1], ScalarValue::UInt8(Some(10)));
- test1[1] = ScalarValue::UInt8(None);
- assert_eq!(
- Fill::Linear
- .apply_fill_strategy(&mut test1)
- .unwrap_err()
- .to_string(),
- "Execution error: RangePlan: Apply Fill LINEAR strategy on Non-floating type"
- );
- let mut test2 = vec![
- ScalarValue::Float32(Some(8.0)),
+ }
+
+ #[test]
+ fn test_fill_linear() {
+ let ts = vec![1, 2, 3, 4, 5];
+ let mut test = vec![
+ ScalarValue::Float32(Some(1.0)),
+ ScalarValue::Float32(None),
+ ScalarValue::Float32(Some(3.0)),
ScalarValue::Float32(None),
- ScalarValue::Float32(Some(9.0)),
+ ScalarValue::Float32(Some(5.0)),
];
- Fill::Linear.apply_fill_strategy(&mut test2).unwrap();
- assert_eq!(test2[1], ScalarValue::Float32(Some(8.5)));
+ Fill::Linear.apply_fill_strategy(&ts, &mut test).unwrap();
+ let mut test1 = vec![
+ ScalarValue::Float32(None),
+ ScalarValue::Float32(Some(2.0)),
+ ScalarValue::Float32(None),
+ ScalarValue::Float32(Some(4.0)),
+ ScalarValue::Float32(None),
+ ];
+ Fill::Linear.apply_fill_strategy(&ts, &mut test1).unwrap();
+ assert_eq!(test, test1);
+ // test linear interpolation on irregularly spaced ts/data
+ let ts = vec![
+ 1, // None
+ 3, // 1.0
+ 8, // 11.0
+ 30, // None
+ 88, // 10.0
+ 108, // 5.0
+ 128, // None
+ ];
+ let mut test = vec![
+ ScalarValue::Float64(None),
+ ScalarValue::Float64(Some(1.0)),
+ ScalarValue::Float64(Some(11.0)),
+ ScalarValue::Float64(None),
+ ScalarValue::Float64(Some(10.0)),
+ ScalarValue::Float64(Some(5.0)),
+ ScalarValue::Float64(None),
+ ];
+ Fill::Linear.apply_fill_strategy(&ts, &mut test).unwrap();
+ let data: Vec<_> = test
+ .into_iter()
+ .map(|x| {
+ let ScalarValue::Float64(Some(f)) = x else {
+ unreachable!()
+ };
+ f
+ })
+ .collect();
+ assert_eq!(data, vec![-3.0, 1.0, 11.0, 10.725, 10.0, 5.0, 0.0]);
+ // test corner case
+ let ts = vec![1];
+ let test = vec![ScalarValue::Float32(None)];
+ let mut test1 = test.clone();
+ Fill::Linear.apply_fill_strategy(&ts, &mut test1).unwrap();
+ assert_eq!(test, test1);
}
}
|
fix
|
use linear interpolation to implement range LINEAR fill strategy (#2903)
|
9ce73e7ca1450a6a245da21a88f3bede08d93a57
|
2023-08-24 18:16:54
|
Zhenchi
|
refactor(frontend): TableScan instead of `scan_to_stream` for `COPY TO` (#2244)
| false
|
diff --git a/src/frontend/src/statement.rs b/src/frontend/src/statement.rs
index 26ee518f689b..3deadcf8649e 100644
--- a/src/frontend/src/statement.rs
+++ b/src/frontend/src/statement.rs
@@ -104,11 +104,12 @@ impl StatementExecutor {
Statement::ShowTables(stmt) => self.show_tables(stmt, query_ctx).await,
Statement::Copy(sql::statements::copy::Copy::CopyTable(stmt)) => {
- let req = to_copy_table_request(stmt, query_ctx)?;
+ let req = to_copy_table_request(stmt, query_ctx.clone())?;
match req.direction {
- CopyDirection::Export => {
- self.copy_table_to(req).await.map(Output::AffectedRows)
- }
+ CopyDirection::Export => self
+ .copy_table_to(req, query_ctx)
+ .await
+ .map(Output::AffectedRows),
CopyDirection::Import => {
self.copy_table_from(req).await.map(Output::AffectedRows)
}
diff --git a/src/frontend/src/statement/backup.rs b/src/frontend/src/statement/backup.rs
index b0004ad5f2bb..7d34d376f59c 100644
--- a/src/frontend/src/statement/backup.rs
+++ b/src/frontend/src/statement/backup.rs
@@ -15,6 +15,7 @@
use common_datasource::file_format::Format;
use common_query::Output;
use common_telemetry::info;
+use session::context::QueryContextBuilder;
use snafu::{ensure, ResultExt};
use table::requests::{CopyDatabaseRequest, CopyDirection, CopyTableRequest};
@@ -65,17 +66,20 @@ impl StatementExecutor {
);
let exported = self
- .copy_table_to(CopyTableRequest {
- catalog_name: req.catalog_name.clone(),
- schema_name: req.schema_name.clone(),
- table_name,
- location: table_file,
- with: req.with.clone(),
- connection: req.connection.clone(),
- pattern: None,
- direction: CopyDirection::Export,
- timestamp_range: req.time_range,
- })
+ .copy_table_to(
+ CopyTableRequest {
+ catalog_name: req.catalog_name.clone(),
+ schema_name: req.schema_name.clone(),
+ table_name,
+ location: table_file,
+ with: req.with.clone(),
+ connection: req.connection.clone(),
+ pattern: None,
+ direction: CopyDirection::Export,
+ timestamp_range: req.time_range,
+ },
+ QueryContextBuilder::default().build(),
+ )
.await?;
exported_rows += exported;
}
diff --git a/src/frontend/src/statement/copy_table_to.rs b/src/frontend/src/statement/copy_table_to.rs
index 5efd934ec5e6..c23d5473f300 100644
--- a/src/frontend/src/statement/copy_table_to.rs
+++ b/src/frontend/src/statement/copy_table_to.rs
@@ -12,22 +12,32 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+use std::sync::Arc;
+
use common_base::readable_size::ReadableSize;
use common_datasource::file_format::csv::stream_to_csv;
use common_datasource::file_format::json::stream_to_json;
use common_datasource::file_format::Format;
use common_datasource::object_store::{build_backend, parse_url};
+use common_query::Output;
use common_recordbatch::adapter::DfRecordBatchStreamAdapter;
use common_recordbatch::SendableRecordBatchStream;
+use datafusion::datasource::DefaultTableSource;
+use datafusion_common::TableReference as DfTableReference;
+use datafusion_expr::LogicalPlanBuilder;
use object_store::ObjectStore;
+use query::plan::LogicalPlan;
+use session::context::QueryContextRef;
use snafu::ResultExt;
use storage::sst::SstInfo;
use storage::{ParquetWriter, Source};
-use store_api::storage::ScanRequest;
use table::engine::TableReference;
use table::requests::CopyTableRequest;
+use table::table::adapter::DfTableProviderAdapter;
-use crate::error::{self, Result, WriteParquetSnafu};
+use crate::error::{
+ self, BuildDfLogicalPlanSnafu, ExecLogicalPlanSnafu, Result, WriteParquetSnafu,
+};
use crate::statement::StatementExecutor;
impl StatementExecutor {
@@ -72,16 +82,18 @@ impl StatementExecutor {
}
}
- pub(crate) async fn copy_table_to(&self, req: CopyTableRequest) -> Result<usize> {
- let table_ref = TableReference {
- catalog: &req.catalog_name,
- schema: &req.schema_name,
- table: &req.table_name,
- };
+ pub(crate) async fn copy_table_to(
+ &self,
+ req: CopyTableRequest,
+ query_ctx: QueryContextRef,
+ ) -> Result<usize> {
+ let table_ref = TableReference::full(&req.catalog_name, &req.schema_name, &req.table_name);
let table = self.get_table(&table_ref).await?;
let format = Format::try_from(&req.with).context(error::ParseFileFormatSnafu)?;
+ let df_table_ref = DfTableReference::from(table_ref);
+
let filters = table
.schema()
.timestamp_column()
@@ -91,20 +103,33 @@ impl StatementExecutor {
req.timestamp_range.as_ref(),
)
})
+ .map(|filter| filter.df_expr().clone())
.into_iter()
.collect::<Vec<_>>();
- let scan_req = ScanRequest {
+ let table_provider = Arc::new(DfTableProviderAdapter::new(table));
+ let table_source = Arc::new(DefaultTableSource::new(table_provider));
+
+ let plan = LogicalPlanBuilder::scan_with_filters(
+ df_table_ref.to_owned_reference(),
+ table_source,
+ None,
filters,
- ..Default::default()
+ )
+ .context(BuildDfLogicalPlanSnafu)?
+ .build()
+ .context(BuildDfLogicalPlanSnafu)?;
+
+ let output = self
+ .query_engine
+ .execute(LogicalPlan::DfPlan(plan), query_ctx)
+ .await
+ .context(ExecLogicalPlanSnafu)?;
+ let stream = match output {
+ Output::Stream(stream) => stream,
+ Output::RecordBatches(record_batches) => record_batches.as_stream(),
+ _ => unreachable!(),
};
- let stream =
- table
- .scan_to_stream(scan_req)
- .await
- .with_context(|_| error::CopyTableSnafu {
- table_name: table_ref.to_string(),
- })?;
let (_schema, _host, path) = parse_url(&req.location).context(error::ParseUrlSnafu)?;
let object_store =
diff --git a/src/table/src/engine.rs b/src/table/src/engine.rs
index d26dc0b768f6..f4937bfb60a8 100644
--- a/src/table/src/engine.rs
+++ b/src/table/src/engine.rs
@@ -17,6 +17,7 @@ use std::sync::Arc;
use common_base::paths::DATA_DIR;
use common_procedure::BoxedProcedure;
+use datafusion_common::TableReference as DfTableReference;
use store_api::storage::RegionNumber;
use crate::error::{self, Result};
@@ -63,6 +64,12 @@ impl<'a> Display for TableReference<'a> {
}
}
+impl<'a> From<TableReference<'a>> for DfTableReference<'a> {
+ fn from(val: TableReference<'a>) -> Self {
+ DfTableReference::full(val.catalog, val.schema, val.table)
+ }
+}
+
/// CloseTableResult
///
/// Returns [`CloseTableResult::Released`] and closed region numbers if a table was removed
|
refactor
|
TableScan instead of `scan_to_stream` for `COPY TO` (#2244)
|
6628c41c3663bb22b41fee3dd10f0edd0769802d
|
2024-02-20 08:08:35
|
Zhenchi
|
feat(metric-engine): set index options for data region (#3330)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index 939dd99813fe..3cc6ead1d7a2 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -5210,6 +5210,7 @@ dependencies = [
"common-time",
"datafusion",
"datatypes",
+ "itertools 0.10.5",
"lazy_static",
"mito2",
"mur3",
diff --git a/src/metric-engine/Cargo.toml b/src/metric-engine/Cargo.toml
index def5885cf908..4722fa81e2fc 100644
--- a/src/metric-engine/Cargo.toml
+++ b/src/metric-engine/Cargo.toml
@@ -17,6 +17,7 @@ common-telemetry.workspace = true
common-time.workspace = true
datafusion.workspace = true
datatypes.workspace = true
+itertools.workspace = true
lazy_static = "1.4"
mito2.workspace = true
mur3 = "0.1"
diff --git a/src/metric-engine/src/engine.rs b/src/metric-engine/src/engine.rs
index 7cf5dc4e266f..1240b7cd6f0b 100644
--- a/src/metric-engine/src/engine.rs
+++ b/src/metric-engine/src/engine.rs
@@ -17,6 +17,7 @@ mod close;
mod create;
mod drop;
mod open;
+mod options;
mod put;
mod read;
mod region_metadata;
diff --git a/src/metric-engine/src/engine/create.rs b/src/metric-engine/src/engine/create.rs
index 9ba33121c8f3..fbadbf5d2e18 100644
--- a/src/metric-engine/src/engine/create.rs
+++ b/src/metric-engine/src/engine/create.rs
@@ -36,6 +36,7 @@ use store_api::region_request::{AffectedRows, RegionCreateRequest, RegionRequest
use store_api::storage::consts::ReservedColumnId;
use store_api::storage::RegionId;
+use crate::engine::options::set_index_options_for_data_region;
use crate::engine::MetricEngineInner;
use crate::error::{
ConflictRegionOptionSnafu, CreateMitoRegionSnafu, InternalColumnOccupiedSnafu,
@@ -376,6 +377,9 @@ impl MetricEngineInner {
data_region_request.primary_key =
vec![ReservedColumnId::table_id(), ReservedColumnId::tsid()];
+ // set index options
+ set_index_options_for_data_region(&mut data_region_request.options);
+
data_region_request
}
diff --git a/src/metric-engine/src/engine/open.rs b/src/metric-engine/src/engine/open.rs
index df41d1cf12ef..952c923487bf 100644
--- a/src/metric-engine/src/engine/open.rs
+++ b/src/metric-engine/src/engine/open.rs
@@ -26,6 +26,7 @@ use store_api::region_request::{AffectedRows, RegionOpenRequest, RegionRequest};
use store_api::storage::RegionId;
use super::MetricEngineInner;
+use crate::engine::options::set_index_options_for_data_region;
use crate::error::{OpenMitoRegionSnafu, Result};
use crate::metrics::{LOGICAL_REGION_COUNT, PHYSICAL_REGION_COUNT};
use crate::utils;
@@ -77,9 +78,12 @@ impl MetricEngineInner {
engine: MITO_ENGINE_NAME.to_string(),
skip_wal_replay: request.skip_wal_replay,
};
+
+ let mut data_region_options = request.options;
+ set_index_options_for_data_region(&mut data_region_options);
let open_data_region_request = RegionOpenRequest {
region_dir: data_region_dir,
- options: request.options.clone(),
+ options: data_region_options,
engine: MITO_ENGINE_NAME.to_string(),
skip_wal_replay: request.skip_wal_replay,
};
diff --git a/src/metric-engine/src/engine/options.rs b/src/metric-engine/src/engine/options.rs
new file mode 100644
index 000000000000..ee071e8d48e5
--- /dev/null
+++ b/src/metric-engine/src/engine/options.rs
@@ -0,0 +1,44 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! Specific options for the metric engine to create or open a region.
+
+use std::collections::HashMap;
+
+use itertools::Itertools as _;
+use store_api::storage::consts::ReservedColumnId;
+use store_api::storage::ColumnId;
+
+/// Ignore building index on the column `tsid` which is unfriendly to the inverted index and
+/// will occupy excessive space if indexed.
+const IGNORE_COLUMN_IDS_FOR_DATA_REGION: [ColumnId; 1] = [ReservedColumnId::tsid()];
+
+/// The empirical value for the seg row count of the metric data region.
+/// Compared to the mito engine, the pattern of the metric engine constructs smaller indices.
+/// Therefore, compared to the default seg row count of 1024, by adjusting it to a smaller
+/// value and appropriately increasing the size of the index, it results in an improved indexing effect.
+const SEG_ROW_COUNT_FOR_DATA_REGION: u32 = 256;
+
+/// Set the index options for the data region.
+pub fn set_index_options_for_data_region(options: &mut HashMap<String, String>) {
+ options.insert(
+ "index.inverted_index.ignore_column_ids".to_string(),
+ IGNORE_COLUMN_IDS_FOR_DATA_REGION.iter().join(","),
+ );
+
+ options.insert(
+ "index.inverted_index.segment_row_count".to_string(),
+ SEG_ROW_COUNT_FOR_DATA_REGION.to_string(),
+ );
+}
|
feat
|
set index options for data region (#3330)
|
01e3a24cf77d748e4e4cfdf4155d6e2e2a3aabd6
|
2024-06-14 22:33:30
|
localhost
|
feat: log ingestion support (#4014)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index 13e05ac06c71..3bd96784858f 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -339,7 +339,7 @@ dependencies = [
"arrow-data",
"arrow-schema",
"chrono",
- "chrono-tz",
+ "chrono-tz 0.8.6",
"half 2.4.1",
"hashbrown 0.14.5",
"num",
@@ -771,6 +771,7 @@ dependencies = [
"matchit",
"memchr",
"mime",
+ "multer",
"percent-encoding",
"pin-project-lite",
"rustversion",
@@ -1388,7 +1389,18 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d59ae0466b83e838b81a54256c39d5d7c20b9d7daa10510a242d9b75abd5936e"
dependencies = [
"chrono",
- "chrono-tz-build",
+ "chrono-tz-build 0.2.1",
+ "phf",
+]
+
+[[package]]
+name = "chrono-tz"
+version = "0.9.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "93698b29de5e97ad0ae26447b344c482a7284c737d9ddc5f9e52b74a336671bb"
+dependencies = [
+ "chrono",
+ "chrono-tz-build 0.3.0",
"phf",
]
@@ -1403,6 +1415,17 @@ dependencies = [
"phf_codegen",
]
+[[package]]
+name = "chrono-tz-build"
+version = "0.3.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0c088aee841df9c3041febbb73934cfc39708749bf96dc827e3359cd39ef11b1"
+dependencies = [
+ "parse-zoneinfo",
+ "phf",
+ "phf_codegen",
+]
+
[[package]]
name = "chunked_transfer"
version = "1.5.0"
@@ -2141,7 +2164,7 @@ version = "0.8.2"
dependencies = [
"arrow",
"chrono",
- "chrono-tz",
+ "chrono-tz 0.8.6",
"common-error",
"common-macro",
"once_cell",
@@ -3544,6 +3567,15 @@ version = "0.3.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a357d28ed41a50f9c765dbfe56cbc04a64e53e5fc58ba79fbc34c10ef3df831f"
+[[package]]
+name = "encoding_rs"
+version = "0.8.34"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b45de904aa0b010bce2ab45264d0631681847fa7b6f2eaa7dab7619943bc4f59"
+dependencies = [
+ "cfg-if",
+]
+
[[package]]
name = "endian-type"
version = "0.1.2"
@@ -3913,6 +3945,7 @@ dependencies = [
"common-time",
"common-version",
"datanode",
+ "datatypes",
"futures",
"humantime-serde",
"lazy_static",
@@ -3922,12 +3955,14 @@ dependencies = [
"opentelemetry-proto 0.5.0",
"operator",
"partition",
+ "pipeline",
"prometheus",
"prost 0.12.6",
"query",
"raft-engine",
"script",
"serde",
+ "serde_json",
"servers",
"session",
"snafu 0.8.3",
@@ -5941,6 +5976,24 @@ dependencies = [
"rand_core",
]
+[[package]]
+name = "multer"
+version = "2.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "01acbdc23469fd8fe07ab135923371d5f5a422fbf9c522158677c8eb15bc51c2"
+dependencies = [
+ "bytes",
+ "encoding_rs",
+ "futures-util",
+ "http 0.2.12",
+ "httparse",
+ "log",
+ "memchr",
+ "mime",
+ "spin 0.9.8",
+ "version_check",
+]
+
[[package]]
name = "multimap"
version = "0.8.3"
@@ -6756,7 +6809,7 @@ dependencies = [
"async-trait",
"bytes",
"chrono",
- "chrono-tz",
+ "chrono-tz 0.8.6",
"datafusion 37.1.0",
"datafusion-expr 37.1.0",
"datafusion-physical-expr 37.1.0",
@@ -7229,6 +7282,58 @@ version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184"
+[[package]]
+name = "pipeline"
+version = "0.8.2"
+dependencies = [
+ "api",
+ "arrow",
+ "async-trait",
+ "catalog",
+ "chrono",
+ "chrono-tz 0.9.0",
+ "common-catalog",
+ "common-error",
+ "common-function",
+ "common-macro",
+ "common-meta",
+ "common-query",
+ "common-recordbatch",
+ "common-runtime",
+ "common-telemetry",
+ "common-time",
+ "crossbeam-utils",
+ "csv",
+ "datafusion 38.0.0",
+ "datafusion-common 38.0.0",
+ "datafusion-expr 38.0.0",
+ "datafusion-functions 38.0.0",
+ "datafusion-physical-expr 38.0.0",
+ "datatypes",
+ "futures",
+ "greptime-proto",
+ "itertools 0.10.5",
+ "lazy_static",
+ "moka",
+ "once_cell",
+ "operator",
+ "paste",
+ "prometheus",
+ "query",
+ "rayon",
+ "regex",
+ "ron",
+ "serde",
+ "serde_json",
+ "session",
+ "snafu 0.8.3",
+ "sql",
+ "table",
+ "tokio",
+ "urlencoding",
+ "yaml-rust",
+]
+
[[package]]
name = "pkcs1"
version = "0.3.3"
@@ -9637,6 +9742,7 @@ dependencies = [
"permutation",
"pgwire",
"pin-project",
+ "pipeline",
"postgres-types",
"pprof",
"prometheus",
diff --git a/Cargo.toml b/Cargo.toml
index 9241fe632ebf..0cb9d9b93999 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -46,6 +46,7 @@ members = [
"src/object-store",
"src/operator",
"src/partition",
+ "src/pipeline",
"src/plugins",
"src/promql",
"src/puffin",
@@ -224,6 +225,7 @@ mito2 = { path = "src/mito2" }
object-store = { path = "src/object-store" }
operator = { path = "src/operator" }
partition = { path = "src/partition" }
+pipeline = { path = "src/pipeline" }
plugins = { path = "src/plugins" }
promql = { path = "src/promql" }
puffin = { path = "src/puffin" }
diff --git a/src/auth/src/permission.rs b/src/auth/src/permission.rs
index 9a8c2a243de7..57afda471c8b 100644
--- a/src/auth/src/permission.rs
+++ b/src/auth/src/permission.rs
@@ -30,6 +30,7 @@ pub enum PermissionReq<'a> {
PromStoreWrite,
PromStoreRead,
Otlp,
+ LogWrite,
}
#[derive(Debug)]
diff --git a/src/frontend/Cargo.toml b/src/frontend/Cargo.toml
index 2b8d5c746f60..56f4ab904ae2 100644
--- a/src/frontend/Cargo.toml
+++ b/src/frontend/Cargo.toml
@@ -44,6 +44,7 @@ meta-client.workspace = true
opentelemetry-proto.workspace = true
operator.workspace = true
partition.workspace = true
+pipeline.workspace = true
prometheus.workspace = true
prost.workspace = true
query.workspace = true
@@ -62,11 +63,13 @@ toml.workspace = true
tonic.workspace = true
[dev-dependencies]
-catalog.workspace = true
+catalog = { workspace = true, features = ["testing"] }
common-test-util.workspace = true
datanode.workspace = true
+datatypes.workspace = true
futures = "0.3"
meta-srv = { workspace = true, features = ["mock"] }
+serde_json.workspace = true
strfmt = "0.2"
tower.workspace = true
uuid.workspace = true
diff --git a/src/frontend/src/instance.rs b/src/frontend/src/instance.rs
index 29c832afe595..ecc04af789b1 100644
--- a/src/frontend/src/instance.rs
+++ b/src/frontend/src/instance.rs
@@ -15,6 +15,7 @@
pub mod builder;
mod grpc;
mod influxdb;
+mod log_handler;
mod opentsdb;
mod otlp;
mod prom_store;
@@ -48,6 +49,7 @@ use meta_client::MetaClientOptions;
use operator::delete::DeleterRef;
use operator::insert::InserterRef;
use operator::statement::StatementExecutor;
+use pipeline::pipeline_operator::PipelineOperator;
use prometheus::HistogramTimer;
use query::metrics::OnDone;
use query::parser::{PromQuery, QueryLanguageParser, QueryStatement};
@@ -66,7 +68,7 @@ use servers::prometheus_handler::PrometheusHandler;
use servers::query_handler::grpc::GrpcQueryHandler;
use servers::query_handler::sql::SqlQueryHandler;
use servers::query_handler::{
- InfluxdbLineProtocolHandler, OpenTelemetryProtocolHandler, OpentsdbProtocolHandler,
+ InfluxdbLineProtocolHandler, LogHandler, OpenTelemetryProtocolHandler, OpentsdbProtocolHandler,
PromStoreProtocolHandler, ScriptHandler,
};
use servers::server::ServerHandlers;
@@ -100,6 +102,7 @@ pub trait FrontendInstance:
+ OpenTelemetryProtocolHandler
+ ScriptHandler
+ PrometheusHandler
+ + LogHandler
+ Send
+ Sync
+ 'static
@@ -108,12 +111,12 @@ pub trait FrontendInstance:
}
pub type FrontendInstanceRef = Arc<dyn FrontendInstance>;
-pub type StatementExecutorRef = Arc<StatementExecutor>;
#[derive(Clone)]
pub struct Instance {
catalog_manager: CatalogManagerRef,
script_executor: Arc<ScriptExecutor>,
+ pipeline_operator: Arc<PipelineOperator>,
statement_executor: Arc<StatementExecutor>,
query_engine: QueryEngineRef,
plugins: Plugins,
diff --git a/src/frontend/src/instance/builder.rs b/src/frontend/src/instance/builder.rs
index f0993458a6aa..ae8d77dd20b5 100644
--- a/src/frontend/src/instance/builder.rs
+++ b/src/frontend/src/instance/builder.rs
@@ -27,9 +27,10 @@ use operator::delete::Deleter;
use operator::insert::Inserter;
use operator::procedure::ProcedureServiceOperator;
use operator::request::Requester;
-use operator::statement::StatementExecutor;
+use operator::statement::{StatementExecutor, StatementExecutorRef};
use operator::table::TableMutationOperator;
use partition::manager::PartitionRuleManager;
+use pipeline::pipeline_operator::PipelineOperator;
use query::QueryEngineFactory;
use servers::server::ServerHandlers;
use snafu::OptionExt;
@@ -37,7 +38,7 @@ use snafu::OptionExt;
use crate::error::{self, Result};
use crate::heartbeat::HeartbeatTask;
use crate::instance::region_query::FrontendRegionQueryHandler;
-use crate::instance::{Instance, StatementExecutorRef};
+use crate::instance::Instance;
use crate::script::ScriptExecutor;
/// The frontend [`Instance`] builder.
@@ -172,11 +173,19 @@ impl FrontendBuilder {
table_route_cache,
));
+ let pipeline_operator = Arc::new(PipelineOperator::new(
+ inserter.clone(),
+ statement_executor.clone(),
+ self.catalog_manager.clone(),
+ query_engine.clone(),
+ ));
+
plugins.insert::<StatementExecutorRef>(statement_executor.clone());
Ok(Instance {
catalog_manager: self.catalog_manager,
script_executor,
+ pipeline_operator,
statement_executor,
query_engine,
plugins,
diff --git a/src/frontend/src/instance/log_handler.rs b/src/frontend/src/instance/log_handler.rs
new file mode 100644
index 000000000000..6ef48205cc56
--- /dev/null
+++ b/src/frontend/src/instance/log_handler.rs
@@ -0,0 +1,93 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::sync::Arc;
+
+use api::v1::RowInsertRequests;
+use async_trait::async_trait;
+use auth::{PermissionChecker, PermissionCheckerRef, PermissionReq};
+use client::Output;
+use common_error::ext::BoxedError;
+use pipeline::table::PipelineVersion;
+use pipeline::{GreptimeTransformer, Pipeline};
+use servers::error::{
+ AuthSnafu, ExecuteGrpcRequestSnafu, PipelineSnafu, Result as ServerResult,
+ UnsupportedDeletePipelineSnafu,
+};
+use servers::query_handler::LogHandler;
+use session::context::QueryContextRef;
+use snafu::ResultExt;
+
+use crate::instance::Instance;
+
+#[async_trait]
+impl LogHandler for Instance {
+ async fn insert_logs(
+ &self,
+ log: RowInsertRequests,
+ ctx: QueryContextRef,
+ ) -> ServerResult<Output> {
+ self.plugins
+ .get::<PermissionCheckerRef>()
+ .as_ref()
+ .check_permission(ctx.current_user(), PermissionReq::LogWrite)
+ .context(AuthSnafu)?;
+
+ self.handle_log_inserts(log, ctx).await
+ }
+
+ async fn get_pipeline(
+ &self,
+ name: &str,
+ version: PipelineVersion,
+ query_ctx: QueryContextRef,
+ ) -> ServerResult<Arc<Pipeline<GreptimeTransformer>>> {
+ self.pipeline_operator
+ .get_pipeline(query_ctx, name, version)
+ .await
+ .context(PipelineSnafu)
+ }
+
+ async fn insert_pipeline(
+ &self,
+ name: &str,
+ content_type: &str,
+ pipeline: &str,
+ query_ctx: QueryContextRef,
+ ) -> ServerResult<()> {
+ self.pipeline_operator
+ .insert_pipeline(name, content_type, pipeline, query_ctx)
+ .await
+ .context(PipelineSnafu)
+ }
+
+ async fn delete_pipeline(&self, _name: &str, _query_ctx: QueryContextRef) -> ServerResult<()> {
+ // TODO(qtang): impl delete
+ Err(UnsupportedDeletePipelineSnafu {}.build())
+ }
+}
+
+impl Instance {
+ pub async fn handle_log_inserts(
+ &self,
+ log: RowInsertRequests,
+ ctx: QueryContextRef,
+ ) -> ServerResult<Output> {
+ self.inserter
+ .handle_log_inserts(log, ctx, self.statement_executor.as_ref())
+ .await
+ .map_err(BoxedError::new)
+ .context(ExecuteGrpcRequestSnafu)
+ }
+}
diff --git a/src/frontend/src/server.rs b/src/frontend/src/server.rs
index a72269f863e6..1433a595ce81 100644
--- a/src/frontend/src/server.rs
+++ b/src/frontend/src/server.rs
@@ -90,6 +90,8 @@ where
Some(self.instance.clone()),
);
+ builder = builder.with_log_ingest_handler(self.instance.clone());
+
if let Some(user_provider) = self.plugins.get::<UserProviderRef>() {
builder = builder.with_user_provider(user_provider);
}
diff --git a/src/operator/src/insert.rs b/src/operator/src/insert.rs
index 7a04ebdee5d9..4bae21f987b9 100644
--- a/src/operator/src/insert.rs
+++ b/src/operator/src/insert.rs
@@ -66,6 +66,22 @@ pub struct Inserter {
pub type InserterRef = Arc<Inserter>;
+enum AutoCreateTableType {
+ Logical(String),
+ Physical,
+ Log,
+}
+
+impl AutoCreateTableType {
+ fn as_str(&self) -> &'static str {
+ match self {
+ AutoCreateTableType::Logical(_) => "logical",
+ AutoCreateTableType::Physical => "physical",
+ AutoCreateTableType::Log => "log",
+ }
+ }
+}
+
impl Inserter {
pub fn new(
catalog_manager: CatalogManagerRef,
@@ -108,7 +124,42 @@ impl Inserter {
validate_column_count_match(&requests)?;
let table_name_to_ids = self
- .create_or_alter_tables_on_demand(&requests, &ctx, None, statement_executor)
+ .create_or_alter_tables_on_demand(
+ &requests,
+ &ctx,
+ AutoCreateTableType::Physical,
+ statement_executor,
+ )
+ .await?;
+ let inserts = RowToRegion::new(table_name_to_ids, self.partition_manager.as_ref())
+ .convert(requests)
+ .await?;
+
+ self.do_request(inserts, &ctx).await
+ }
+
+ pub async fn handle_log_inserts(
+ &self,
+ mut requests: RowInsertRequests,
+ ctx: QueryContextRef,
+ statement_executor: &StatementExecutor,
+ ) -> Result<Output> {
+ // remove empty requests
+ requests.inserts.retain(|req| {
+ req.rows
+ .as_ref()
+ .map(|r| !r.rows.is_empty())
+ .unwrap_or_default()
+ });
+ validate_column_count_match(&requests)?;
+
+ let table_name_to_ids = self
+ .create_or_alter_tables_on_demand(
+ &requests,
+ &ctx,
+ AutoCreateTableType::Log,
+ statement_executor,
+ )
.await?;
let inserts = RowToRegion::new(table_name_to_ids, self.partition_manager.as_ref())
.convert(requests)
@@ -143,7 +194,7 @@ impl Inserter {
.create_or_alter_tables_on_demand(
&requests,
&ctx,
- Some(physical_table.to_string()),
+ AutoCreateTableType::Logical(physical_table.to_string()),
statement_executor,
)
.await?;
@@ -380,12 +431,15 @@ impl Inserter {
&self,
requests: &RowInsertRequests,
ctx: &QueryContextRef,
- on_physical_table: Option<String>,
+ auto_create_table_type: AutoCreateTableType,
statement_executor: &StatementExecutor,
) -> Result<HashMap<String, TableId>> {
let mut table_name_to_ids = HashMap::with_capacity(requests.inserts.len());
let mut create_tables = vec![];
let mut alter_tables = vec![];
+ let _timer = crate::metrics::CREATE_ALTER_ON_DEMAND
+ .with_label_values(&[auto_create_table_type.as_str()])
+ .start_timer();
for req in &requests.inserts {
let catalog = ctx.current_catalog();
let schema = ctx.current_schema();
@@ -407,42 +461,56 @@ impl Inserter {
}
}
- if let Some(on_physical_table) = on_physical_table {
- if !create_tables.is_empty() {
- // Creates logical tables in batch.
- let tables = self
- .create_logical_tables(
- create_tables,
- ctx,
- &on_physical_table,
- statement_executor,
- )
- .await?;
+ match auto_create_table_type {
+ AutoCreateTableType::Logical(on_physical_table) => {
+ if !create_tables.is_empty() {
+ // Creates logical tables in batch.
+ let tables = self
+ .create_logical_tables(
+ create_tables,
+ ctx,
+ &on_physical_table,
+ statement_executor,
+ )
+ .await?;
- for table in tables {
+ for table in tables {
+ let table_info = table.table_info();
+ table_name_to_ids.insert(table_info.name.clone(), table_info.table_id());
+ }
+ }
+ if !alter_tables.is_empty() {
+ // Alter logical tables in batch.
+ statement_executor
+ .alter_logical_tables(alter_tables, ctx.clone())
+ .await?;
+ }
+ }
+ AutoCreateTableType::Physical => {
+ for req in create_tables {
+ let table = self.create_table(req, ctx, statement_executor).await?;
let table_info = table.table_info();
table_name_to_ids.insert(table_info.name.clone(), table_info.table_id());
}
+ for alter_expr in alter_tables.into_iter() {
+ statement_executor
+ .alter_table_inner(alter_expr, ctx.clone())
+ .await?;
+ }
}
- if !alter_tables.is_empty() {
- // Alter logical tables in batch.
- statement_executor
- .alter_logical_tables(alter_tables, ctx.clone())
- .await?;
- }
- } else {
- for req in create_tables {
- let table = self.create_table(req, ctx, statement_executor).await?;
- let table_info = table.table_info();
- table_name_to_ids.insert(table_info.name.clone(), table_info.table_id());
- }
- for alter_expr in alter_tables.into_iter() {
- statement_executor
- .alter_table_inner(alter_expr, ctx.clone())
- .await?;
+ AutoCreateTableType::Log => {
+ for req in create_tables {
+ let table = self.create_log_table(req, ctx, statement_executor).await?;
+ let table_info = table.table_info();
+ table_name_to_ids.insert(table_info.name.clone(), table_info.table_id());
+ }
+ for alter_expr in alter_tables.into_iter() {
+ statement_executor
+ .alter_table_inner(alter_expr, ctx.clone())
+ .await?;
+ }
}
}
-
Ok(table_name_to_ids)
}
@@ -568,17 +636,45 @@ impl Inserter {
match res {
Ok(table) => {
- info!(
- "Successfully created table {}.{}.{}",
- table_ref.catalog, table_ref.schema, table_ref.table,
- );
+ info!("Successfully created table {}", table_ref,);
Ok(table)
}
Err(err) => {
- error!(
- "Failed to create table {}.{}.{}: {}",
- table_ref.catalog, table_ref.schema, table_ref.table, err
- );
+ error!(err; "Failed to create table {}", table_ref);
+ Err(err)
+ }
+ }
+ }
+
+ async fn create_log_table(
+ &self,
+ req: &RowInsertRequest,
+ ctx: &QueryContextRef,
+ statement_executor: &StatementExecutor,
+ ) -> Result<TableRef> {
+ let table_ref =
+ TableReference::full(ctx.current_catalog(), ctx.current_schema(), &req.table_name);
+ // SAFETY: `req.rows` is guaranteed to be `Some` by `handle_log_inserts`.
+ let request_schema = req.rows.as_ref().unwrap().schema.as_slice();
+ let create_table_expr = &mut build_create_table_expr(&table_ref, request_schema)?;
+
+ info!("Table `{table_ref}` does not exist, try creating the log table");
+ // Set append_mode to true for log table.
+ // because log tables should keep rows with the same ts and tags.
+ create_table_expr
+ .table_options
+ .insert("append_mode".to_string(), "true".to_string());
+ let res = statement_executor
+ .create_table_inner(create_table_expr, None, ctx.clone())
+ .await;
+
+ match res {
+ Ok(table) => {
+ info!("Successfully created a log table {}", table_ref);
+ Ok(table)
+ }
+ Err(err) => {
+ error!(err; "Failed to create a log table {}", table_ref);
Err(err)
}
}
diff --git a/src/operator/src/metrics.rs b/src/operator/src/metrics.rs
index 97c5e0015a55..9a77f9844d38 100644
--- a/src/operator/src/metrics.rs
+++ b/src/operator/src/metrics.rs
@@ -51,4 +51,10 @@ lazy_static! {
"DDL operator create view"
)
.unwrap();
+ pub static ref CREATE_ALTER_ON_DEMAND: HistogramVec = register_histogram_vec!(
+ "greptime_table_operator_create_alter_on_demand",
+ "table operator duration to create or alter tables on demand",
+ &["table_type"]
+ )
+ .unwrap();
}
diff --git a/src/operator/src/statement.rs b/src/operator/src/statement.rs
index 8522b5db9bb0..6c1c33a0c809 100644
--- a/src/operator/src/statement.rs
+++ b/src/operator/src/statement.rs
@@ -73,6 +73,8 @@ pub struct StatementExecutor {
inserter: InserterRef,
}
+pub type StatementExecutorRef = Arc<StatementExecutor>;
+
impl StatementExecutor {
pub fn new(
catalog_manager: CatalogManagerRef,
diff --git a/src/pipeline/Cargo.toml b/src/pipeline/Cargo.toml
new file mode 100644
index 000000000000..03096b47a7a1
--- /dev/null
+++ b/src/pipeline/Cargo.toml
@@ -0,0 +1,62 @@
+[package]
+name = "pipeline"
+edition.workspace = true
+version.workspace = true
+license.workspace = true
+
+[features]
+
+[lints]
+workspace = true
+
+[dependencies]
+api.workspace = true
+arrow.workspace = true
+async-trait.workspace = true
+catalog.workspace = true
+chrono.workspace = true
+chrono-tz = "0.9.0"
+common-catalog.workspace = true
+common-error.workspace = true
+common-function.workspace = true
+common-macro.workspace = true
+common-meta.workspace = true
+common-query.workspace = true
+common-recordbatch.workspace = true
+common-runtime.workspace = true
+common-telemetry.workspace = true
+common-time.workspace = true
+crossbeam-utils.workspace = true
+csv = "1.3.0"
+datafusion.workspace = true
+datafusion-common.workspace = true
+datafusion-expr.workspace = true
+datafusion-functions.workspace = true
+datafusion-physical-expr.workspace = true
+datatypes.workspace = true
+futures.workspace = true
+greptime-proto.workspace = true
+itertools.workspace = true
+lazy_static.workspace = true
+moka = { workspace = true, features = ["sync"] }
+once_cell.workspace = true
+operator.workspace = true
+paste.workspace = true
+prometheus.workspace = true
+query.workspace = true
+regex.workspace = true
+serde_json.workspace = true
+session.workspace = true
+snafu.workspace = true
+sql.workspace = true
+table.workspace = true
+tokio.workspace = true
+urlencoding = "2.1"
+yaml-rust = "0.4"
+
+[dev-dependencies]
+catalog = { workspace = true, features = ["testing"] }
+rayon = "1.0"
+ron = "0.7"
+serde = { version = "1.0", features = ["derive"] }
+session = { workspace = true, features = ["testing"] }
diff --git a/src/pipeline/src/lib.rs b/src/pipeline/src/lib.rs
new file mode 100644
index 000000000000..86ed9c7ea79b
--- /dev/null
+++ b/src/pipeline/src/lib.rs
@@ -0,0 +1,21 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+mod etl;
+mod manager;
+
+pub use etl::transform::GreptimeTransformer;
+pub use etl::value::Value;
+pub use etl::{parse, Content, Pipeline};
+pub use manager::{error, pipeline_operator, table};
diff --git a/src/pipeline/src/manager/error.rs b/src/pipeline/src/manager/error.rs
new file mode 100644
index 000000000000..ad5d8a96bebd
--- /dev/null
+++ b/src/pipeline/src/manager/error.rs
@@ -0,0 +1,129 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::any::Any;
+
+use common_error::ext::ErrorExt;
+use common_error::status_code::StatusCode;
+use common_macro::stack_trace_debug;
+use datatypes::timestamp::TimestampNanosecond;
+use snafu::{Location, Snafu};
+
+#[derive(Snafu)]
+#[snafu(visibility(pub))]
+#[stack_trace_debug]
+pub enum Error {
+ #[snafu(display("Pipeline table not found"))]
+ PipelineTableNotFound {
+ #[snafu(implicit)]
+ location: Location,
+ },
+
+ #[snafu(display("Failed to insert pipeline to pipelines table"))]
+ InsertPipeline {
+ source: operator::error::Error,
+ #[snafu(implicit)]
+ location: Location,
+ },
+
+ #[snafu(display("Failed to parse pipeline: {}", reason))]
+ CompilePipeline {
+ reason: String,
+ #[snafu(implicit)]
+ location: Location,
+ },
+
+ #[snafu(display("Pipeline not found, name: {}, version: {}", name, version.map(|ts| ts.0.to_iso8601_string()).unwrap_or("latest".to_string())))]
+ PipelineNotFound {
+ name: String,
+ version: Option<TimestampNanosecond>,
+ #[snafu(implicit)]
+ location: Location,
+ },
+
+ #[snafu(display("Failed to collect record batch"))]
+ CollectRecords {
+ #[snafu(implicit)]
+ location: Location,
+ source: common_recordbatch::error::Error,
+ },
+
+ #[snafu(display("Failed to cast type, msg: {}", msg))]
+ CastType {
+ msg: String,
+ #[snafu(implicit)]
+ location: Location,
+ },
+
+ #[snafu(display("Failed to build DataFusion logical plan"))]
+ BuildDfLogicalPlan {
+ #[snafu(source)]
+ error: datafusion_common::DataFusionError,
+ #[snafu(implicit)]
+ location: Location,
+ },
+
+ #[snafu(display("Failed to execute internal statement"))]
+ ExecuteInternalStatement {
+ source: query::error::Error,
+ #[snafu(implicit)]
+ location: Location,
+ },
+
+ #[snafu(display("General catalog error"))]
+ Catalog {
+ source: catalog::error::Error,
+ #[snafu(implicit)]
+ location: Location,
+ },
+
+ #[snafu(display("Failed to create table"))]
+ CreateTable {
+ source: operator::error::Error,
+ #[snafu(implicit)]
+ location: Location,
+ },
+
+ #[snafu(display("Failed to execute pipeline, reason: {}", reason))]
+ PipelineTransform {
+ reason: String,
+ #[snafu(implicit)]
+ location: Location,
+ },
+}
+
+pub type Result<T> = std::result::Result<T, Error>;
+
+impl ErrorExt for Error {
+ fn status_code(&self) -> StatusCode {
+ use Error::*;
+ match self {
+ CastType { .. } => StatusCode::Unexpected,
+ PipelineTableNotFound { .. } => StatusCode::TableNotFound,
+ InsertPipeline { source, .. } => source.status_code(),
+ CollectRecords { source, .. } => source.status_code(),
+ PipelineNotFound { .. } | CompilePipeline { .. } | PipelineTransform { .. } => {
+ StatusCode::InvalidArguments
+ }
+ BuildDfLogicalPlan { .. } => StatusCode::Internal,
+ ExecuteInternalStatement { source, .. } => source.status_code(),
+ Catalog { source, .. } => source.status_code(),
+ CreateTable { source, .. } => source.status_code(),
+ }
+ }
+
+ fn as_any(&self) -> &dyn Any {
+ self
+ }
+}
diff --git a/src/pipeline/src/manager/mod.rs b/src/pipeline/src/manager/mod.rs
new file mode 100644
index 000000000000..95ffb5822ec3
--- /dev/null
+++ b/src/pipeline/src/manager/mod.rs
@@ -0,0 +1,17 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+pub mod error;
+pub mod pipeline_operator;
+pub mod table;
diff --git a/src/pipeline/src/manager/pipeline_operator.rs b/src/pipeline/src/manager/pipeline_operator.rs
new file mode 100644
index 000000000000..390a48d834a4
--- /dev/null
+++ b/src/pipeline/src/manager/pipeline_operator.rs
@@ -0,0 +1,211 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::collections::HashMap;
+use std::sync::{Arc, RwLock};
+
+use api::v1::CreateTableExpr;
+use catalog::{CatalogManagerRef, RegisterSystemTableRequest};
+use common_catalog::consts::{default_engine, DEFAULT_PRIVATE_SCHEMA_NAME};
+use common_telemetry::info;
+use operator::insert::InserterRef;
+use operator::statement::StatementExecutorRef;
+use query::QueryEngineRef;
+use session::context::QueryContextRef;
+use snafu::{OptionExt, ResultExt};
+use table::TableRef;
+
+use crate::error::{CatalogSnafu, CreateTableSnafu, PipelineTableNotFoundSnafu, Result};
+use crate::table::{PipelineTable, PipelineTableRef, PipelineVersion};
+use crate::{GreptimeTransformer, Pipeline};
+
+pub const PIPELINE_TABLE_NAME: &str = "pipelines";
+
+/// PipelineOperator is responsible for managing pipelines.
+/// It provides the ability to:
+/// - Create a pipeline table if it does not exist
+/// - Get a pipeline from the pipeline table
+/// - Insert a pipeline into the pipeline table
+/// - Compile a pipeline
+/// - Add a pipeline table to the cache
+/// - Get a pipeline table from the cache
+pub struct PipelineOperator {
+ inserter: InserterRef,
+ statement_executor: StatementExecutorRef,
+ catalog_manager: CatalogManagerRef,
+ query_engine: QueryEngineRef,
+ tables: RwLock<HashMap<String, PipelineTableRef>>,
+}
+
+impl PipelineOperator {
+ /// Create a table request for the pipeline table.
+ pub fn create_table_request(&self, catalog: &str) -> RegisterSystemTableRequest {
+ let (time_index, primary_keys, column_defs) = PipelineTable::build_pipeline_schema();
+
+ let create_table_expr = CreateTableExpr {
+ catalog_name: catalog.to_string(),
+ schema_name: DEFAULT_PRIVATE_SCHEMA_NAME.to_string(),
+ table_name: PIPELINE_TABLE_NAME.to_string(),
+ desc: "GreptimeDB pipeline table for Log".to_string(),
+ column_defs,
+ time_index,
+ primary_keys,
+ create_if_not_exists: true,
+ table_options: Default::default(),
+ table_id: None, // Should and will be assigned by Meta.
+ engine: default_engine().to_string(),
+ };
+
+ RegisterSystemTableRequest {
+ create_table_expr,
+ open_hook: None,
+ }
+ }
+
+ fn add_pipeline_table_to_cache(&self, catalog: &str, table: TableRef) {
+ let mut tables = self.tables.write().unwrap();
+ if tables.contains_key(catalog) {
+ return;
+ }
+ tables.insert(
+ catalog.to_string(),
+ Arc::new(PipelineTable::new(
+ self.inserter.clone(),
+ self.statement_executor.clone(),
+ table,
+ self.query_engine.clone(),
+ )),
+ );
+ }
+
+ async fn create_pipeline_table_if_not_exists(&self, ctx: QueryContextRef) -> Result<()> {
+ let catalog = ctx.current_catalog();
+
+ // exist in cache
+ if self.get_pipeline_table_from_cache(catalog).is_some() {
+ return Ok(());
+ }
+
+ let RegisterSystemTableRequest {
+ create_table_expr: mut expr,
+ open_hook: _,
+ } = self.create_table_request(catalog);
+
+ // exist in catalog, just open
+ if let Some(table) = self
+ .catalog_manager
+ .table(&expr.catalog_name, &expr.schema_name, &expr.table_name)
+ .await
+ .context(CatalogSnafu)?
+ {
+ self.add_pipeline_table_to_cache(catalog, table);
+ return Ok(());
+ }
+
+ // create table
+ self.statement_executor
+ .create_table_inner(&mut expr, None, ctx.clone())
+ .await
+ .context(CreateTableSnafu)?;
+
+ let schema = &expr.schema_name;
+ let table_name = &expr.table_name;
+
+ // get from catalog
+ let table = self
+ .catalog_manager
+ .table(catalog, schema, table_name)
+ .await
+ .context(CatalogSnafu)?
+ .context(PipelineTableNotFoundSnafu)?;
+
+ info!(
+ "Created pipelines table {} with table id {}.",
+ table.table_info().full_table_name(),
+ table.table_info().table_id()
+ );
+
+ // put to cache
+ self.add_pipeline_table_to_cache(catalog, table);
+
+ Ok(())
+ }
+
+ /// Get a pipeline table from the cache.
+ pub fn get_pipeline_table_from_cache(&self, catalog: &str) -> Option<PipelineTableRef> {
+ self.tables.read().unwrap().get(catalog).cloned()
+ }
+
+ async fn insert_and_compile(
+ &self,
+ ctx: QueryContextRef,
+ name: &str,
+ content_type: &str,
+ pipeline: &str,
+ ) -> Result<Arc<Pipeline<GreptimeTransformer>>> {
+ self.get_pipeline_table_from_cache(ctx.current_catalog())
+ .context(PipelineTableNotFoundSnafu)?
+ .insert_and_compile(ctx.current_schema(), name, content_type, pipeline)
+ .await
+ }
+}
+
+impl PipelineOperator {
+ /// Create a new PipelineOperator.
+ pub fn new(
+ inserter: InserterRef,
+ statement_executor: StatementExecutorRef,
+ catalog_manager: CatalogManagerRef,
+ query_engine: QueryEngineRef,
+ ) -> Self {
+ Self {
+ inserter,
+ statement_executor,
+ catalog_manager,
+ tables: RwLock::new(HashMap::new()),
+ query_engine,
+ }
+ }
+
+ /// Get a pipeline from the pipeline table.
+ pub async fn get_pipeline(
+ &self,
+ query_ctx: QueryContextRef,
+ name: &str,
+ version: PipelineVersion,
+ ) -> Result<Arc<Pipeline<GreptimeTransformer>>> {
+ self.create_pipeline_table_if_not_exists(query_ctx.clone())
+ .await?;
+ self.get_pipeline_table_from_cache(query_ctx.current_catalog())
+ .context(PipelineTableNotFoundSnafu)?
+ .get_pipeline(query_ctx.current_schema(), name, version)
+ .await
+ }
+
+ /// Insert a pipeline into the pipeline table.
+ pub async fn insert_pipeline(
+ &self,
+ name: &str,
+ content_type: &str,
+ pipeline: &str,
+ query_ctx: QueryContextRef,
+ ) -> Result<()> {
+ self.create_pipeline_table_if_not_exists(query_ctx.clone())
+ .await?;
+
+ self.insert_and_compile(query_ctx, name, content_type, pipeline)
+ .await
+ .map(|_| ())
+ }
+}
diff --git a/src/pipeline/src/manager/table.rs b/src/pipeline/src/manager/table.rs
new file mode 100644
index 000000000000..d037ae3d4832
--- /dev/null
+++ b/src/pipeline/src/manager/table.rs
@@ -0,0 +1,444 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::sync::Arc;
+use std::time::Duration;
+
+use api::v1::value::ValueData;
+use api::v1::{
+ ColumnDataType, ColumnDef, ColumnSchema as PbColumnSchema, Row, RowInsertRequest,
+ RowInsertRequests, Rows, SemanticType,
+};
+use common_query::OutputData;
+use common_recordbatch::util as record_util;
+use common_telemetry::{debug, info};
+use common_time::timestamp::{TimeUnit, Timestamp};
+use datafusion::datasource::DefaultTableSource;
+use datafusion::logical_expr::{and, col, lit};
+use datafusion_common::TableReference;
+use datafusion_expr::LogicalPlanBuilder;
+use datatypes::prelude::ScalarVector;
+use datatypes::timestamp::TimestampNanosecond;
+use datatypes::vectors::{StringVector, TimestampNanosecondVector, Vector};
+use moka::sync::Cache;
+use operator::insert::InserterRef;
+use operator::statement::StatementExecutorRef;
+use query::plan::LogicalPlan;
+use query::QueryEngineRef;
+use session::context::{QueryContextBuilder, QueryContextRef};
+use snafu::{ensure, OptionExt, ResultExt};
+use table::metadata::TableInfo;
+use table::table::adapter::DfTableProviderAdapter;
+use table::TableRef;
+
+use crate::error::{
+ BuildDfLogicalPlanSnafu, CastTypeSnafu, CollectRecordsSnafu, CompilePipelineSnafu,
+ ExecuteInternalStatementSnafu, InsertPipelineSnafu, PipelineNotFoundSnafu, Result,
+};
+use crate::etl::transform::GreptimeTransformer;
+use crate::etl::{parse, Content, Pipeline};
+
+/// Pipeline version. An optional timestamp with nanosecond precision.
+/// If the version is None, it means the latest version of the pipeline.
+/// User can specify the version by providing a timestamp string formatted as iso8601.
+/// When it used in cache key, it will be converted to i64 meaning the number of nanoseconds since the epoch.
+pub type PipelineVersion = Option<TimestampNanosecond>;
+
+pub type PipelineTableRef = Arc<PipelineTable>;
+
+pub const PIPELINE_TABLE_NAME: &str = "pipelines";
+
+pub const PIPELINE_TABLE_PIPELINE_NAME_COLUMN_NAME: &str = "name";
+pub const PIPELINE_TABLE_PIPELINE_SCHEMA_COLUMN_NAME: &str = "schema";
+pub const PIPELINE_TABLE_PIPELINE_CONTENT_TYPE_COLUMN_NAME: &str = "content_type";
+pub const PIPELINE_TABLE_PIPELINE_CONTENT_COLUMN_NAME: &str = "pipeline";
+pub const PIPELINE_TABLE_CREATED_AT_COLUMN_NAME: &str = "created_at";
+
+/// Pipeline table cache size.
+pub const PIPELINES_CACHE_SIZE: u64 = 10000;
+/// Pipeline table cache time to live.
+pub const PIPELINES_CACHE_TTL: Duration = Duration::from_secs(10);
+
+/// PipelineTable is a table that stores the pipeline schema and content.
+/// Every catalog has its own pipeline table.
+pub struct PipelineTable {
+ inserter: InserterRef,
+ statement_executor: StatementExecutorRef,
+ table: TableRef,
+ query_engine: QueryEngineRef,
+ pipelines: Cache<String, Arc<Pipeline<GreptimeTransformer>>>,
+}
+
+impl PipelineTable {
+ /// Create a new PipelineTable.
+ pub fn new(
+ inserter: InserterRef,
+ statement_executor: StatementExecutorRef,
+ table: TableRef,
+ query_engine: QueryEngineRef,
+ ) -> Self {
+ Self {
+ inserter,
+ statement_executor,
+ table,
+ query_engine,
+ pipelines: Cache::builder()
+ .max_capacity(PIPELINES_CACHE_SIZE)
+ .time_to_live(PIPELINES_CACHE_TTL)
+ .build(),
+ }
+ }
+
+ /// Build the schema for the pipeline table.
+ /// Returns the (time index, primary keys, column) definitions.
+ pub fn build_pipeline_schema() -> (String, Vec<String>, Vec<ColumnDef>) {
+ (
+ PIPELINE_TABLE_CREATED_AT_COLUMN_NAME.to_string(),
+ vec![
+ PIPELINE_TABLE_PIPELINE_SCHEMA_COLUMN_NAME.to_string(),
+ PIPELINE_TABLE_PIPELINE_NAME_COLUMN_NAME.to_string(),
+ PIPELINE_TABLE_PIPELINE_CONTENT_TYPE_COLUMN_NAME.to_string(),
+ ],
+ vec![
+ ColumnDef {
+ name: PIPELINE_TABLE_PIPELINE_NAME_COLUMN_NAME.to_string(),
+ data_type: ColumnDataType::String as i32,
+ is_nullable: false,
+ default_constraint: vec![],
+ semantic_type: SemanticType::Tag as i32,
+ comment: "".to_string(),
+ datatype_extension: None,
+ },
+ ColumnDef {
+ name: PIPELINE_TABLE_PIPELINE_SCHEMA_COLUMN_NAME.to_string(),
+ data_type: ColumnDataType::String as i32,
+ is_nullable: false,
+ default_constraint: vec![],
+ semantic_type: SemanticType::Tag as i32,
+ comment: "".to_string(),
+ datatype_extension: None,
+ },
+ ColumnDef {
+ name: PIPELINE_TABLE_PIPELINE_CONTENT_TYPE_COLUMN_NAME.to_string(),
+ data_type: ColumnDataType::String as i32,
+ is_nullable: false,
+ default_constraint: vec![],
+ semantic_type: SemanticType::Tag as i32,
+ comment: "".to_string(),
+ datatype_extension: None,
+ },
+ ColumnDef {
+ name: PIPELINE_TABLE_PIPELINE_CONTENT_COLUMN_NAME.to_string(),
+ data_type: ColumnDataType::String as i32,
+ is_nullable: false,
+ default_constraint: vec![],
+ semantic_type: SemanticType::Field as i32,
+ comment: "".to_string(),
+ datatype_extension: None,
+ },
+ ColumnDef {
+ name: PIPELINE_TABLE_CREATED_AT_COLUMN_NAME.to_string(),
+ data_type: ColumnDataType::TimestampNanosecond as i32,
+ is_nullable: false,
+ default_constraint: vec![],
+ semantic_type: SemanticType::Timestamp as i32,
+ comment: "".to_string(),
+ datatype_extension: None,
+ },
+ ],
+ )
+ }
+
+ /// Build the column schemas for inserting a row into the pipeline table.
+ fn build_insert_column_schemas() -> Vec<PbColumnSchema> {
+ vec![
+ PbColumnSchema {
+ column_name: PIPELINE_TABLE_PIPELINE_NAME_COLUMN_NAME.to_string(),
+ datatype: ColumnDataType::String.into(),
+ semantic_type: SemanticType::Tag.into(),
+ ..Default::default()
+ },
+ PbColumnSchema {
+ column_name: PIPELINE_TABLE_PIPELINE_SCHEMA_COLUMN_NAME.to_string(),
+ datatype: ColumnDataType::String.into(),
+ semantic_type: SemanticType::Tag.into(),
+ ..Default::default()
+ },
+ PbColumnSchema {
+ column_name: PIPELINE_TABLE_PIPELINE_CONTENT_TYPE_COLUMN_NAME.to_string(),
+ datatype: ColumnDataType::String.into(),
+ semantic_type: SemanticType::Tag.into(),
+ ..Default::default()
+ },
+ PbColumnSchema {
+ column_name: PIPELINE_TABLE_PIPELINE_CONTENT_COLUMN_NAME.to_string(),
+ datatype: ColumnDataType::String.into(),
+ semantic_type: SemanticType::Field.into(),
+ ..Default::default()
+ },
+ PbColumnSchema {
+ column_name: PIPELINE_TABLE_CREATED_AT_COLUMN_NAME.to_string(),
+ datatype: ColumnDataType::TimestampNanosecond.into(),
+ semantic_type: SemanticType::Timestamp.into(),
+ ..Default::default()
+ },
+ ]
+ }
+
+ fn query_ctx(table_info: &TableInfo) -> QueryContextRef {
+ QueryContextBuilder::default()
+ .current_catalog(table_info.catalog_name.to_string())
+ .current_schema(table_info.schema_name.to_string())
+ .build()
+ .into()
+ }
+
+ /// Compile a pipeline from a string.
+ pub fn compile_pipeline(pipeline: &str) -> Result<Pipeline<GreptimeTransformer>> {
+ let yaml_content = Content::Yaml(pipeline.into());
+ parse::<GreptimeTransformer>(&yaml_content)
+ .map_err(|e| CompilePipelineSnafu { reason: e }.build())
+ }
+
+ fn generate_pipeline_cache_key(schema: &str, name: &str, version: PipelineVersion) -> String {
+ match version {
+ Some(version) => format!("{}/{}/{}", schema, name, i64::from(version)),
+ None => format!("{}/{}/latest", schema, name),
+ }
+ }
+
+ fn get_compiled_pipeline_from_cache(
+ &self,
+ schema: &str,
+ name: &str,
+ version: PipelineVersion,
+ ) -> Option<Arc<Pipeline<GreptimeTransformer>>> {
+ self.pipelines
+ .get(&Self::generate_pipeline_cache_key(schema, name, version))
+ }
+
+ /// Insert a pipeline into the pipeline table.
+ async fn insert_pipeline_to_pipeline_table(
+ &self,
+ schema: &str,
+ name: &str,
+ content_type: &str,
+ pipeline: &str,
+ ) -> Result<Timestamp> {
+ let now = Timestamp::current_time(TimeUnit::Nanosecond);
+
+ let table_info = self.table.table_info();
+
+ let insert = RowInsertRequest {
+ table_name: PIPELINE_TABLE_NAME.to_string(),
+ rows: Some(Rows {
+ schema: Self::build_insert_column_schemas(),
+ rows: vec![Row {
+ values: vec![
+ ValueData::StringValue(name.to_string()).into(),
+ ValueData::StringValue(schema.to_string()).into(),
+ ValueData::StringValue(content_type.to_string()).into(),
+ ValueData::StringValue(pipeline.to_string()).into(),
+ ValueData::TimestampNanosecondValue(now.value()).into(),
+ ],
+ }],
+ }),
+ };
+
+ let requests = RowInsertRequests {
+ inserts: vec![insert],
+ };
+
+ let output = self
+ .inserter
+ .handle_row_inserts(
+ requests,
+ Self::query_ctx(&table_info),
+ &self.statement_executor,
+ )
+ .await
+ .context(InsertPipelineSnafu)?;
+
+ info!(
+ "Inserted pipeline: {} into {} table: {}, output: {:?}.",
+ name,
+ PIPELINE_TABLE_NAME,
+ table_info.full_table_name(),
+ output
+ );
+
+ Ok(now)
+ }
+
+ /// Get a pipeline by name.
+ /// If the pipeline is not in the cache, it will be get from table and compiled and inserted into the cache.
+ pub async fn get_pipeline(
+ &self,
+ schema: &str,
+ name: &str,
+ version: PipelineVersion,
+ ) -> Result<Arc<Pipeline<GreptimeTransformer>>> {
+ if let Some(pipeline) = self.get_compiled_pipeline_from_cache(schema, name, version) {
+ return Ok(pipeline);
+ }
+
+ let pipeline = self.find_pipeline_by_name(schema, name, version).await?;
+ let compiled_pipeline = Arc::new(Self::compile_pipeline(&pipeline.0)?);
+
+ self.pipelines.insert(
+ Self::generate_pipeline_cache_key(schema, name, version),
+ compiled_pipeline.clone(),
+ );
+ Ok(compiled_pipeline)
+ }
+
+ /// Insert a pipeline into the pipeline table and compile it.
+ /// The compiled pipeline will be inserted into the cache.
+ pub async fn insert_and_compile(
+ &self,
+ schema: &str,
+ name: &str,
+ content_type: &str,
+ pipeline: &str,
+ ) -> Result<Arc<Pipeline<GreptimeTransformer>>> {
+ let compiled_pipeline = Arc::new(Self::compile_pipeline(pipeline)?);
+ // we will use the version in the future
+ let version = self
+ .insert_pipeline_to_pipeline_table(schema, name, content_type, pipeline)
+ .await?;
+
+ {
+ self.pipelines.insert(
+ Self::generate_pipeline_cache_key(schema, name, None),
+ compiled_pipeline.clone(),
+ );
+ self.pipelines.insert(
+ Self::generate_pipeline_cache_key(schema, name, Some(TimestampNanosecond(version))),
+ compiled_pipeline.clone(),
+ );
+ }
+
+ Ok(compiled_pipeline)
+ }
+
+ async fn find_pipeline_by_name(
+ &self,
+ schema: &str,
+ name: &str,
+ version: PipelineVersion,
+ ) -> Result<(String, TimestampNanosecond)> {
+ let table_info = self.table.table_info();
+
+ let table_name = TableReference::full(
+ table_info.catalog_name.clone(),
+ table_info.schema_name.clone(),
+ table_info.name.clone(),
+ );
+
+ let table_provider = Arc::new(DfTableProviderAdapter::new(self.table.clone()));
+ let table_source = Arc::new(DefaultTableSource::new(table_provider));
+ let schema_and_name_filter = and(
+ col(PIPELINE_TABLE_PIPELINE_SCHEMA_COLUMN_NAME).eq(lit(schema)),
+ col(PIPELINE_TABLE_PIPELINE_NAME_COLUMN_NAME).eq(lit(name)),
+ );
+ let filter = if let Some(v) = version {
+ and(
+ schema_and_name_filter,
+ col(PIPELINE_TABLE_CREATED_AT_COLUMN_NAME).eq(lit(v.0.to_iso8601_string())),
+ )
+ } else {
+ schema_and_name_filter
+ };
+
+ let plan = LogicalPlanBuilder::scan(table_name, table_source, None)
+ .context(BuildDfLogicalPlanSnafu)?
+ .filter(filter)
+ .context(BuildDfLogicalPlanSnafu)?
+ .project(vec![
+ col(PIPELINE_TABLE_PIPELINE_CONTENT_COLUMN_NAME),
+ col(PIPELINE_TABLE_CREATED_AT_COLUMN_NAME),
+ ])
+ .context(BuildDfLogicalPlanSnafu)?
+ .sort(vec![
+ col(PIPELINE_TABLE_CREATED_AT_COLUMN_NAME).sort(false, true)
+ ])
+ .context(BuildDfLogicalPlanSnafu)?
+ .limit(0, Some(1))
+ .context(BuildDfLogicalPlanSnafu)?
+ .build()
+ .context(BuildDfLogicalPlanSnafu)?;
+
+ debug!("find_pipeline_by_name: plan: {:?}", plan);
+
+ let output = self
+ .query_engine
+ .execute(LogicalPlan::DfPlan(plan), Self::query_ctx(&table_info))
+ .await
+ .context(ExecuteInternalStatementSnafu)?;
+ let stream = match output.data {
+ OutputData::Stream(stream) => stream,
+ OutputData::RecordBatches(record_batches) => record_batches.as_stream(),
+ _ => unreachable!(),
+ };
+
+ let records = record_util::collect(stream)
+ .await
+ .context(CollectRecordsSnafu)?;
+
+ ensure!(!records.is_empty(), PipelineNotFoundSnafu { name, version });
+
+ ensure!(
+ records.len() == 1 && records[0].num_columns() == 2,
+ PipelineNotFoundSnafu { name, version }
+ );
+
+ let pipeline_content_column = records[0].column(0);
+ let pipeline_content = pipeline_content_column
+ .as_any()
+ .downcast_ref::<StringVector>()
+ .with_context(|| CastTypeSnafu {
+ msg: format!(
+ "can't downcast {:?} array into string vector",
+ pipeline_content_column.data_type()
+ ),
+ })?;
+
+ let pipeline_created_at_column = records[0].column(1);
+ let pipeline_created_at = pipeline_created_at_column
+ .as_any()
+ .downcast_ref::<TimestampNanosecondVector>()
+ .with_context(|| CastTypeSnafu {
+ msg: format!(
+ "can't downcast {:?} array into scalar vector",
+ pipeline_created_at_column.data_type()
+ ),
+ })?;
+
+ debug!(
+ "find_pipeline_by_name: pipeline_content: {:?}, pipeline_created_at: {:?}",
+ pipeline_content, pipeline_created_at
+ );
+
+ ensure!(
+ pipeline_content.len() == 1,
+ PipelineNotFoundSnafu { name, version }
+ );
+
+ // Safety: asserted above
+ Ok((
+ pipeline_content.get_data(0).unwrap().to_string(),
+ pipeline_created_at.get_data(0).unwrap(),
+ ))
+ }
+}
diff --git a/src/servers/Cargo.toml b/src/servers/Cargo.toml
index b30426d2e7ac..755d59bfacd6 100644
--- a/src/servers/Cargo.toml
+++ b/src/servers/Cargo.toml
@@ -22,7 +22,7 @@ arrow-ipc.workspace = true
arrow-schema.workspace = true
async-trait = "0.1"
auth.workspace = true
-axum.workspace = true
+axum = { workspace = true, features = ["multipart"] }
axum-macros = "0.3.8"
base64.workspace = true
bytes.workspace = true
@@ -69,6 +69,7 @@ opentelemetry-proto.workspace = true
parking_lot = "0.12"
pgwire = "0.20"
pin-project = "1.0"
+pipeline.workspace = true
postgres-types = { version = "0.2", features = ["with-chrono-0_4"] }
pprof = { version = "0.13", features = [
"flamegraph",
diff --git a/src/servers/src/error.rs b/src/servers/src/error.rs
index ae595b8e95b6..04b6fa196ca2 100644
--- a/src/servers/src/error.rs
+++ b/src/servers/src/error.rs
@@ -27,6 +27,7 @@ use common_error::status_code::StatusCode;
use common_macro::stack_trace_debug;
use common_telemetry::{debug, error};
use datatypes::prelude::ConcreteDataType;
+use headers::ContentType;
use query::parser::PromQuery;
use serde_json::json;
use snafu::{Location, Snafu};
@@ -148,6 +149,19 @@ pub enum Error {
source: BoxedError,
},
+ #[snafu(display("Pipeline management api error"))]
+ Pipeline {
+ source: pipeline::error::Error,
+ #[snafu(implicit)]
+ location: Location,
+ },
+
+ #[snafu(display("Unsupported delete pipeline."))]
+ UnsupportedDeletePipeline {
+ #[snafu(implicit)]
+ location: Location,
+ },
+
#[snafu(display("Failed to execute script by name: {}", name))]
ExecuteScript {
name: String,
@@ -533,6 +547,27 @@ pub enum Error {
location: Location,
},
+ #[snafu(display("Failed to parse payload as json"))]
+ ParseJson {
+ #[snafu(source)]
+ error: serde_json::error::Error,
+ #[snafu(implicit)]
+ location: Location,
+ },
+
+ #[snafu(display("Failed to convert to structured log"))]
+ ToStructuredLog {
+ #[snafu(implicit)]
+ location: Location,
+ },
+
+ #[snafu(display("Unsupported content type: {:?}", content_type))]
+ UnsupportedContentType {
+ content_type: ContentType,
+ #[snafu(implicit)]
+ location: Location,
+ },
+
#[snafu(display("Failed to decode url"))]
UrlDecode {
#[snafu(source)]
@@ -600,6 +635,7 @@ impl ErrorExt for Error {
| FileWatch { .. } => StatusCode::Internal,
UnsupportedDataType { .. } => StatusCode::Unsupported,
+ UnsupportedDeletePipeline { .. } => StatusCode::Unsupported,
#[cfg(not(windows))]
UpdateJemallocMetrics { .. } => StatusCode::Internal,
@@ -614,6 +650,8 @@ impl ErrorExt for Error {
| ExecuteGrpcRequest { source, .. }
| CheckDatabaseValidity { source, .. } => source.status_code(),
+ Pipeline { source, .. } => source.status_code(),
+
NotSupported { .. }
| InvalidParameter { .. }
| InvalidQuery { .. }
@@ -637,6 +675,9 @@ impl ErrorExt for Error {
| MissingQueryContext { .. }
| MysqlValueConversion { .. }
| UnexpectedPhysicalTable { .. }
+ | ParseJson { .. }
+ | ToStructuredLog { .. }
+ | UnsupportedContentType { .. }
| TimestampOverflow { .. } => StatusCode::InvalidArguments,
RowWriter { source, .. }
diff --git a/src/servers/src/http.rs b/src/servers/src/http.rs
index 5ef29b8b38c1..3f7f71653f73 100644
--- a/src/servers/src/http.rs
+++ b/src/servers/src/http.rs
@@ -67,12 +67,13 @@ use crate::metrics_handler::MetricsHandler;
use crate::prometheus_handler::PrometheusHandlerRef;
use crate::query_handler::sql::ServerSqlQueryHandlerRef;
use crate::query_handler::{
- InfluxdbLineProtocolHandlerRef, OpenTelemetryProtocolHandlerRef, OpentsdbProtocolHandlerRef,
- PromStoreProtocolHandlerRef, ScriptHandlerRef,
+ InfluxdbLineProtocolHandlerRef, LogHandlerRef, OpenTelemetryProtocolHandlerRef,
+ OpentsdbProtocolHandlerRef, PromStoreProtocolHandlerRef, ScriptHandlerRef,
};
use crate::server::Server;
pub mod authorize;
+pub mod event;
pub mod handler;
pub mod header;
pub mod influxdb;
@@ -587,6 +588,16 @@ impl HttpServerBuilder {
}
}
+ pub fn with_log_ingest_handler(self, handler: LogHandlerRef) -> Self {
+ Self {
+ router: self.router.nest(
+ &format!("/{HTTP_API_VERSION}/events"),
+ HttpServer::route_log(handler),
+ ),
+ ..self
+ }
+ }
+
pub fn with_plugins(self, plugins: Plugins) -> Self {
Self { plugins, ..self }
}
@@ -699,6 +710,21 @@ impl HttpServer {
.with_state(metrics_handler)
}
+ fn route_log<S>(log_handler: LogHandlerRef) -> Router<S> {
+ Router::new()
+ .route("/logs", routing::post(event::log_ingester))
+ .route(
+ "/pipelines/:pipeline_name",
+ routing::post(event::add_pipeline),
+ )
+ .layer(
+ ServiceBuilder::new()
+ .layer(HandleErrorLayer::new(handle_error))
+ .layer(RequestDecompressionLayer::new()),
+ )
+ .with_state(log_handler)
+ }
+
fn route_sql<S>(api_state: ApiState) -> ApiRouter<S> {
ApiRouter::new()
.api_route(
diff --git a/src/servers/src/http/event.rs b/src/servers/src/http/event.rs
new file mode 100644
index 000000000000..f9939b80572e
--- /dev/null
+++ b/src/servers/src/http/event.rs
@@ -0,0 +1,257 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::result::Result as StdResult;
+
+use api::v1::{RowInsertRequest, RowInsertRequests, Rows};
+use axum::body::HttpBody;
+use axum::extract::{FromRequest, Multipart, Path, Query, State};
+use axum::headers::ContentType;
+use axum::http::header::CONTENT_TYPE;
+use axum::http::{Request, StatusCode};
+use axum::response::{IntoResponse, Response};
+use axum::{async_trait, BoxError, Extension, TypedHeader};
+use common_telemetry::{error, warn};
+use common_time::Timestamp;
+use datatypes::timestamp::TimestampNanosecond;
+use mime_guess::mime;
+use pipeline::error::{CastTypeSnafu, PipelineTransformSnafu};
+use pipeline::table::PipelineVersion;
+use pipeline::Value as PipelineValue;
+use schemars::JsonSchema;
+use serde::{Deserialize, Serialize};
+use serde_json::{Deserializer, Value};
+use session::context::QueryContextRef;
+use snafu::{OptionExt, ResultExt};
+
+use crate::error::{
+ InvalidParameterSnafu, ParseJsonSnafu, PipelineSnafu, Result, UnsupportedContentTypeSnafu,
+};
+use crate::http::greptime_result_v1::GreptimedbV1Response;
+use crate::http::HttpResponse;
+use crate::query_handler::LogHandlerRef;
+
+#[derive(Debug, Default, Serialize, Deserialize, JsonSchema)]
+pub struct LogIngesterQueryParams {
+ pub table: Option<String>,
+ pub db: Option<String>,
+ pub pipeline_name: Option<String>,
+ pub ignore_errors: Option<bool>,
+
+ pub version: Option<String>,
+}
+
+pub struct PipelineContent(String);
+
+#[async_trait]
+impl<S, B> FromRequest<S, B> for PipelineContent
+where
+ B: HttpBody + Send + 'static,
+ B::Data: Send,
+ bytes::Bytes: std::convert::From<<B as HttpBody>::Data>,
+ B::Error: Into<BoxError>,
+ S: Send + Sync,
+{
+ type Rejection = Response;
+
+ async fn from_request(req: Request<B>, state: &S) -> StdResult<Self, Self::Rejection> {
+ let content_type_header = req.headers().get(CONTENT_TYPE);
+ let content_type = content_type_header.and_then(|value| value.to_str().ok());
+ if let Some(content_type) = content_type {
+ if content_type.ends_with("yaml") {
+ let payload = String::from_request(req, state)
+ .await
+ .map_err(IntoResponse::into_response)?;
+ return Ok(Self(payload));
+ }
+
+ if content_type.starts_with("multipart/form-data") {
+ let mut payload: Multipart = Multipart::from_request(req, state)
+ .await
+ .map_err(IntoResponse::into_response)?;
+ let file = payload
+ .next_field()
+ .await
+ .map_err(IntoResponse::into_response)?;
+ let payload = file
+ .ok_or(StatusCode::UNSUPPORTED_MEDIA_TYPE.into_response())?
+ .text()
+ .await
+ .map_err(IntoResponse::into_response)?;
+ return Ok(Self(payload));
+ }
+ }
+
+ Err(StatusCode::UNSUPPORTED_MEDIA_TYPE.into_response())
+ }
+}
+
+#[axum_macros::debug_handler]
+pub async fn add_pipeline(
+ State(handler): State<LogHandlerRef>,
+ Path(pipeline_name): Path<String>,
+ Extension(query_ctx): Extension<QueryContextRef>,
+ PipelineContent(payload): PipelineContent,
+) -> Result<String> {
+ if pipeline_name.is_empty() {
+ return Err(InvalidParameterSnafu {
+ reason: "pipeline_name is required in path",
+ }
+ .build());
+ }
+
+ if payload.is_empty() {
+ return Err(InvalidParameterSnafu {
+ reason: "pipeline is required in body",
+ }
+ .build());
+ }
+
+ let content_type = "yaml";
+ let result = handler
+ .insert_pipeline(&pipeline_name, content_type, &payload, query_ctx)
+ .await;
+
+ result.map(|_| "ok".to_string()).map_err(|e| {
+ error!(e; "failed to insert pipeline");
+ e
+ })
+}
+
+/// Transform NDJSON array into a single array
+fn transform_ndjson_array_factory(
+ values: impl IntoIterator<Item = StdResult<Value, serde_json::Error>>,
+ ignore_error: bool,
+) -> Result<Value> {
+ values.into_iter().try_fold(
+ Value::Array(Vec::with_capacity(100)),
+ |acc, item| match acc {
+ Value::Array(mut acc_array) => {
+ if let Ok(item_value) = item {
+ match item_value {
+ Value::Array(item_array) => {
+ acc_array.extend(item_array);
+ }
+ Value::Object(_) => {
+ acc_array.push(item_value);
+ }
+ _ => {
+ if !ignore_error {
+ warn!("invalid item in array: {:?}", item_value);
+ return InvalidParameterSnafu {
+ reason: format!("invalid item:{} in array", item_value),
+ }
+ .fail();
+ }
+ }
+ }
+ Ok(Value::Array(acc_array))
+ } else if !ignore_error {
+ item.context(ParseJsonSnafu)
+ } else {
+ warn!("invalid item in array: {:?}", item);
+ Ok(Value::Array(acc_array))
+ }
+ }
+ _ => unreachable!("invalid acc: {:?}", acc),
+ },
+ )
+}
+
+#[axum_macros::debug_handler]
+pub async fn log_ingester(
+ State(handler): State<LogHandlerRef>,
+ Query(query_params): Query<LogIngesterQueryParams>,
+ Extension(query_ctx): Extension<QueryContextRef>,
+ TypedHeader(content_type): TypedHeader<ContentType>,
+ payload: String,
+) -> Result<HttpResponse> {
+ let pipeline_name = query_params.pipeline_name.context(InvalidParameterSnafu {
+ reason: "pipeline_name is required",
+ })?;
+ let table_name = query_params.table.context(InvalidParameterSnafu {
+ reason: "table is required",
+ })?;
+
+ let version = match query_params.version {
+ Some(version) => {
+ let ts = Timestamp::from_str_utc(&version).map_err(|e| {
+ InvalidParameterSnafu {
+ reason: format!("invalid pipeline version: {} with error: {}", &version, e),
+ }
+ .build()
+ })?;
+ Some(TimestampNanosecond(ts))
+ }
+ None => None,
+ };
+
+ let ignore_errors = query_params.ignore_errors.unwrap_or(false);
+
+ let m: mime::Mime = content_type.clone().into();
+ let value = match m.subtype() {
+ mime::JSON => transform_ndjson_array_factory(
+ Deserializer::from_str(&payload).into_iter(),
+ ignore_errors,
+ )?,
+ // add more content type support
+ _ => UnsupportedContentTypeSnafu { content_type }.fail()?,
+ };
+
+ ingest_logs_inner(
+ handler,
+ pipeline_name,
+ version,
+ table_name,
+ value,
+ query_ctx,
+ )
+ .await
+}
+
+async fn ingest_logs_inner(
+ state: LogHandlerRef,
+ pipeline_name: String,
+ version: PipelineVersion,
+ table_name: String,
+ payload: Value,
+ query_ctx: QueryContextRef,
+) -> Result<HttpResponse> {
+ let start = std::time::Instant::now();
+ let pipeline_data = PipelineValue::try_from(payload)
+ .map_err(|reason| CastTypeSnafu { msg: reason }.build())
+ .context(PipelineSnafu)?;
+
+ let pipeline = state
+ .get_pipeline(&pipeline_name, version, query_ctx.clone())
+ .await?;
+ let transformed_data: Rows = pipeline
+ .exec(pipeline_data)
+ .map_err(|reason| PipelineTransformSnafu { reason }.build())
+ .context(PipelineSnafu)?;
+
+ let insert_request = RowInsertRequest {
+ rows: Some(transformed_data),
+ table_name: table_name.clone(),
+ };
+ let insert_requests = RowInsertRequests {
+ inserts: vec![insert_request],
+ };
+ let output = state.insert_logs(insert_requests, query_ctx).await;
+
+ let response = GreptimedbV1Response::from_output(vec![output])
+ .await
+ .with_execution_time(start.elapsed().as_millis() as u64);
+ Ok(response)
+}
diff --git a/src/servers/src/query_handler.rs b/src/servers/src/query_handler.rs
index 0430005aed7d..f0c1170e0746 100644
--- a/src/servers/src/query_handler.rs
+++ b/src/servers/src/query_handler.rs
@@ -35,6 +35,8 @@ use common_query::Output;
use headers::HeaderValue;
use opentelemetry_proto::tonic::collector::metrics::v1::ExportMetricsServiceRequest;
use opentelemetry_proto::tonic::collector::trace::v1::ExportTraceServiceRequest;
+use pipeline::table::PipelineVersion;
+use pipeline::{GreptimeTransformer, Pipeline};
use serde_json::Value;
use session::context::QueryContextRef;
@@ -48,6 +50,7 @@ pub type InfluxdbLineProtocolHandlerRef = Arc<dyn InfluxdbLineProtocolHandler +
pub type PromStoreProtocolHandlerRef = Arc<dyn PromStoreProtocolHandler + Send + Sync>;
pub type OpenTelemetryProtocolHandlerRef = Arc<dyn OpenTelemetryProtocolHandler + Send + Sync>;
pub type ScriptHandlerRef = Arc<dyn ScriptHandler + Send + Sync>;
+pub type LogHandlerRef = Arc<dyn LogHandler + Send + Sync>;
#[async_trait]
pub trait ScriptHandler {
@@ -118,3 +121,29 @@ pub trait OpenTelemetryProtocolHandler {
ctx: QueryContextRef,
) -> Result<Output>;
}
+
+/// LogHandler is responsible for handling log related requests.
+/// It should be able to insert logs and manage pipelines.
+/// The pipeline is a series of transformations that can be applied to logs.
+/// The pipeline is stored in the database and can be retrieved by name.
+#[async_trait]
+pub trait LogHandler {
+ async fn insert_logs(&self, log: RowInsertRequests, ctx: QueryContextRef) -> Result<Output>;
+
+ async fn get_pipeline(
+ &self,
+ name: &str,
+ version: PipelineVersion,
+ query_ctx: QueryContextRef,
+ ) -> Result<Arc<Pipeline<GreptimeTransformer>>>;
+
+ async fn insert_pipeline(
+ &self,
+ name: &str,
+ content_type: &str,
+ pipeline: &str,
+ query_ctx: QueryContextRef,
+ ) -> Result<()>;
+
+ async fn delete_pipeline(&self, name: &str, query_ctx: QueryContextRef) -> Result<()>;
+}
|
feat
|
log ingestion support (#4014)
|
128c6ec98c57996de08a12e958feb9695afa2204
|
2023-06-19 12:20:33
|
LFC
|
feat: region alive keeper in Datanode (#1780)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index 93b9319b079a..71e481b5aee9 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -209,8 +209,8 @@ dependencies = [
"greptime-proto",
"prost",
"snafu",
- "tonic 0.9.2",
- "tonic-build 0.9.2",
+ "tonic",
+ "tonic-build",
]
[[package]]
@@ -382,7 +382,7 @@ dependencies = [
"paste",
"prost",
"tokio",
- "tonic 0.9.2",
+ "tonic",
]
[[package]]
@@ -1538,7 +1538,7 @@ dependencies = [
"substrait 0.7.5",
"tokio",
"tokio-stream",
- "tonic 0.9.2",
+ "tonic",
"tracing",
"tracing-subscriber",
]
@@ -1760,7 +1760,7 @@ dependencies = [
"rand",
"snafu",
"tokio",
- "tonic 0.9.2",
+ "tonic",
"tower",
]
@@ -2005,7 +2005,7 @@ checksum = "c2895653b4d9f1538a83970077cb01dfc77a4810524e51a110944688e916b18e"
dependencies = [
"prost",
"prost-types",
- "tonic 0.9.2",
+ "tonic",
"tracing-core",
]
@@ -2027,7 +2027,7 @@ dependencies = [
"thread_local",
"tokio",
"tokio-stream",
- "tonic 0.9.2",
+ "tonic",
"tracing",
"tracing-core",
"tracing-subscriber",
@@ -2647,7 +2647,7 @@ dependencies = [
"tokio",
"tokio-stream",
"toml",
- "tonic 0.9.2",
+ "tonic",
"tower",
"tower-http",
"url",
@@ -3025,16 +3025,16 @@ dependencies = [
[[package]]
name = "etcd-client"
-version = "0.10.4"
+version = "0.11.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4319dc0fb739a6e84cb8678b8cf50c9bcfa4712ae826b33ecf00cc0850550a58"
+checksum = "f4b0ea5ef6dc2388a4b1669fa32097249bc03a15417b97cb75e38afb309e4a89"
dependencies = [
"http",
"prost",
"tokio",
"tokio-stream",
- "tonic 0.8.3",
- "tonic-build 0.8.4",
+ "tonic",
+ "tonic-build",
"tower",
"tower-service",
]
@@ -3257,7 +3257,7 @@ dependencies = [
"table",
"tokio",
"toml",
- "tonic 0.9.2",
+ "tonic",
"tower",
"uuid",
]
@@ -4096,13 +4096,13 @@ checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b"
[[package]]
name = "greptime-proto"
version = "0.1.0"
-source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=4398d20c56d5f7939cc2960789cb1fa7dd18e6fe#4398d20c56d5f7939cc2960789cb1fa7dd18e6fe"
+source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=aee86f4a68c59873961c9b99ee7ed6a4341bf773#aee86f4a68c59873961c9b99ee7ed6a4341bf773"
dependencies = [
"prost",
"serde",
"serde_json",
- "tonic 0.9.2",
- "tonic-build 0.9.2",
+ "tonic",
+ "tonic-build",
]
[[package]]
@@ -5141,7 +5141,7 @@ dependencies = [
"table",
"tokio",
"tokio-stream",
- "tonic 0.9.2",
+ "tonic",
"tower",
"tracing",
"tracing-subscriber",
@@ -5188,7 +5188,7 @@ dependencies = [
"table",
"tokio",
"tokio-stream",
- "tonic 0.9.2",
+ "tonic",
"tower",
"tracing",
"tracing-subscriber",
@@ -8584,7 +8584,7 @@ dependencies = [
"tokio-rustls 0.24.0",
"tokio-stream",
"tokio-test",
- "tonic 0.9.2",
+ "tonic",
"tonic-reflection",
"tower",
"tower-http",
@@ -9138,8 +9138,8 @@ dependencies = [
"table",
"tokio",
"tokio-util",
- "tonic 0.9.2",
- "tonic-build 0.9.2",
+ "tonic",
+ "tonic-build",
"uuid",
]
@@ -9596,7 +9596,7 @@ dependencies = [
"table",
"tempfile",
"tokio",
- "tonic 0.9.2",
+ "tonic",
"tower",
"uuid",
]
@@ -9971,38 +9971,6 @@ dependencies = [
"winnow",
]
-[[package]]
-name = "tonic"
-version = "0.8.3"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8f219fad3b929bef19b1f86fbc0358d35daed8f2cac972037ac0dc10bbb8d5fb"
-dependencies = [
- "async-stream",
- "async-trait",
- "axum",
- "base64 0.13.1",
- "bytes",
- "futures-core",
- "futures-util",
- "h2",
- "http",
- "http-body",
- "hyper",
- "hyper-timeout",
- "percent-encoding",
- "pin-project",
- "prost",
- "prost-derive",
- "tokio",
- "tokio-stream",
- "tokio-util",
- "tower",
- "tower-layer",
- "tower-service",
- "tracing",
- "tracing-futures",
-]
-
[[package]]
name = "tonic"
version = "0.9.2"
@@ -10034,19 +10002,6 @@ dependencies = [
"tracing",
]
-[[package]]
-name = "tonic-build"
-version = "0.8.4"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5bf5e9b9c0f7e0a7c027dcfaba7b2c60816c7049171f679d99ee2ff65d0de8c4"
-dependencies = [
- "prettyplease 0.1.25",
- "proc-macro2",
- "prost-build",
- "quote",
- "syn 1.0.109",
-]
-
[[package]]
name = "tonic-build"
version = "0.9.2"
@@ -10070,7 +10025,7 @@ dependencies = [
"prost-types",
"tokio",
"tokio-stream",
- "tonic 0.9.2",
+ "tonic",
]
[[package]]
diff --git a/Cargo.toml b/Cargo.toml
index 4a7e192689df..3a3ccf501ddf 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -72,7 +72,7 @@ datafusion-sql = { git = "https://github.com/waynexia/arrow-datafusion.git", rev
datafusion-substrait = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "63e52dde9e44cac4b1f6c6e6b6bf6368ba3bd323" }
futures = "0.3"
futures-util = "0.3"
-greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "4398d20c56d5f7939cc2960789cb1fa7dd18e6fe" }
+greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "aee86f4a68c59873961c9b99ee7ed6a4341bf773" }
itertools = "0.10"
parquet = "40.0"
paste = "1.0"
diff --git a/src/catalog/src/remote.rs b/src/catalog/src/remote.rs
index 03f068d09bd3..617ec32096ba 100644
--- a/src/catalog/src/remote.rs
+++ b/src/catalog/src/remote.rs
@@ -32,7 +32,7 @@ pub mod mock;
// FIXME(LFC): Used in next PR.
#[allow(dead_code)]
-mod region_alive_keeper;
+pub mod region_alive_keeper;
#[derive(Debug, Clone)]
pub struct Kv(pub Vec<u8>, pub Vec<u8>);
diff --git a/src/catalog/src/remote/manager.rs b/src/catalog/src/remote/manager.rs
index a3d754e7be07..6cc2c787997e 100644
--- a/src/catalog/src/remote/manager.rs
+++ b/src/catalog/src/remote/manager.rs
@@ -20,13 +20,14 @@ use std::sync::Arc;
use async_stream::stream;
use async_trait::async_trait;
use common_catalog::consts::{MAX_SYS_TABLE_ID, MITO_ENGINE};
+use common_meta::ident::TableIdent;
use common_telemetry::{debug, error, info, warn};
use dashmap::DashMap;
use futures::Stream;
use futures_util::{StreamExt, TryStreamExt};
use metrics::{decrement_gauge, increment_gauge};
use parking_lot::RwLock;
-use snafu::{OptionExt, ResultExt};
+use snafu::{ensure, OptionExt, ResultExt};
use table::engine::manager::TableEngineManagerRef;
use table::engine::{EngineContext, TableReference};
use table::requests::{CreateTableRequest, OpenTableRequest};
@@ -43,6 +44,7 @@ use crate::helper::{
build_table_regional_prefix, CatalogKey, CatalogValue, SchemaKey, SchemaValue, TableGlobalKey,
TableGlobalValue, TableRegionalKey, TableRegionalValue, CATALOG_KEY_PREFIX,
};
+use crate::remote::region_alive_keeper::RegionAliveKeepers;
use crate::remote::{Kv, KvBackendRef};
use crate::{
handle_system_table_request, CatalogManager, CatalogProvider, CatalogProviderRef,
@@ -57,16 +59,23 @@ pub struct RemoteCatalogManager {
catalogs: Arc<RwLock<DashMap<String, CatalogProviderRef>>>,
engine_manager: TableEngineManagerRef,
system_table_requests: Mutex<Vec<RegisterSystemTableRequest>>,
+ region_alive_keepers: Arc<RegionAliveKeepers>,
}
impl RemoteCatalogManager {
- pub fn new(engine_manager: TableEngineManagerRef, node_id: u64, backend: KvBackendRef) -> Self {
+ pub fn new(
+ engine_manager: TableEngineManagerRef,
+ node_id: u64,
+ backend: KvBackendRef,
+ region_alive_keepers: Arc<RegionAliveKeepers>,
+ ) -> Self {
Self {
engine_manager,
node_id,
backend,
catalogs: Default::default(),
system_table_requests: Default::default(),
+ region_alive_keepers,
}
}
@@ -576,34 +585,44 @@ impl CatalogManager for RemoteCatalogManager {
}
async fn register_table(&self, request: RegisterTableRequest) -> Result<bool> {
- let catalog_name = request.catalog;
- let schema_name = request.schema;
+ let catalog = &request.catalog;
+ let schema = &request.schema;
+ let table_name = &request.table_name;
+
let schema_provider = self
- .catalog(&catalog_name)
+ .catalog(catalog)
.await?
.context(CatalogNotFoundSnafu {
- catalog_name: &catalog_name,
+ catalog_name: catalog,
})?
- .schema(&schema_name)
+ .schema(schema)
.await?
- .with_context(|| SchemaNotFoundSnafu {
- catalog: &catalog_name,
- schema: &schema_name,
- })?;
- if schema_provider.table_exist(&request.table_name).await? {
- return TableExistsSnafu {
- table: format!("{}.{}.{}", &catalog_name, &schema_name, &request.table_name),
+ .context(SchemaNotFoundSnafu { catalog, schema })?;
+ ensure!(
+ !schema_provider.table_exist(table_name).await?,
+ TableExistsSnafu {
+ table: common_catalog::format_full_table_name(catalog, schema, table_name),
}
- .fail();
- }
+ );
increment_gauge!(
crate::metrics::METRIC_CATALOG_MANAGER_TABLE_COUNT,
1.0,
- &[crate::metrics::db_label(&catalog_name, &schema_name)],
+ &[crate::metrics::db_label(catalog, schema)],
);
schema_provider
- .register_table(request.table_name, request.table)
+ .register_table(table_name.to_string(), request.table.clone())
+ .await?;
+
+ let table_ident = TableIdent {
+ catalog: request.catalog,
+ schema: request.schema,
+ table: request.table_name,
+ table_id: request.table_id,
+ engine: request.table.table_info().meta.engine.clone(),
+ };
+ self.region_alive_keepers
+ .register_table(table_ident, request.table)
.await?;
Ok(true)
@@ -626,6 +645,21 @@ impl CatalogManager for RemoteCatalogManager {
1.0,
&[crate::metrics::db_label(catalog_name, schema_name)],
);
+
+ if let Some(table) = result.as_ref() {
+ let table_info = table.table_info();
+ let table_ident = TableIdent {
+ catalog: request.catalog,
+ schema: request.schema,
+ table: request.table_name,
+ table_id: table_info.ident.table_id,
+ engine: table_info.meta.engine.clone(),
+ };
+ self.region_alive_keepers
+ .deregister_table(&table_ident)
+ .await;
+ }
+
Ok(result.is_none())
}
diff --git a/src/catalog/src/remote/region_alive_keeper.rs b/src/catalog/src/remote/region_alive_keeper.rs
index a291fe9337de..51192c9d889c 100644
--- a/src/catalog/src/remote/region_alive_keeper.rs
+++ b/src/catalog/src/remote/region_alive_keeper.rs
@@ -12,15 +12,193 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-use common_meta::instruction::TableIdent;
+use std::collections::HashMap;
+use std::future::Future;
+use std::sync::Arc;
+
+use common_meta::ident::TableIdent;
+use common_meta::RegionIdent;
use common_telemetry::{debug, error, info, warn};
+use snafu::ResultExt;
use store_api::storage::RegionNumber;
+use table::engine::manager::TableEngineManagerRef;
use table::engine::{CloseTableResult, EngineContext, TableEngineRef};
use table::requests::CloseTableRequest;
-use tokio::sync::mpsc;
+use table::TableRef;
+use tokio::sync::{mpsc, Mutex};
use tokio::task::JoinHandle;
use tokio::time::{Duration, Instant};
+use crate::error::{Result, TableEngineNotFoundSnafu};
+
+/// [RegionAliveKeepers] manages all [RegionAliveKeeper] in a scope of tables.
+pub struct RegionAliveKeepers {
+ table_engine_manager: TableEngineManagerRef,
+ keepers: Arc<Mutex<HashMap<TableIdent, Arc<RegionAliveKeeper>>>>,
+}
+
+impl RegionAliveKeepers {
+ pub fn new(table_engine_manager: TableEngineManagerRef) -> Self {
+ Self {
+ table_engine_manager,
+ keepers: Arc::new(Mutex::new(HashMap::new())),
+ }
+ }
+
+ async fn find_keeper(&self, table_ident: &TableIdent) -> Option<Arc<RegionAliveKeeper>> {
+ self.keepers.lock().await.get(table_ident).cloned()
+ }
+
+ pub(crate) async fn register_table(
+ &self,
+ table_ident: TableIdent,
+ table: TableRef,
+ ) -> Result<()> {
+ let keeper = self.find_keeper(&table_ident).await;
+ if keeper.is_some() {
+ return Ok(());
+ }
+
+ let table_engine = self
+ .table_engine_manager
+ .engine(&table_ident.engine)
+ .context(TableEngineNotFoundSnafu {
+ engine_name: &table_ident.engine,
+ })?;
+
+ let keeper = Arc::new(RegionAliveKeeper::new(table_engine, table_ident.clone()));
+ for r in table.table_info().meta.region_numbers.iter() {
+ keeper.register_region(*r).await;
+ }
+
+ info!("Register RegionAliveKeeper for table {table_ident}");
+ self.keepers.lock().await.insert(table_ident, keeper);
+ Ok(())
+ }
+
+ pub(crate) async fn deregister_table(&self, table_ident: &TableIdent) {
+ if self.keepers.lock().await.remove(table_ident).is_some() {
+ info!("Deregister RegionAliveKeeper for table {table_ident}");
+ }
+ }
+
+ pub async fn register_region(&self, region_ident: &RegionIdent) {
+ let table_ident = ®ion_ident.table_ident;
+ let Some(keeper) = self.find_keeper(table_ident).await else {
+ // Alive keeper could be affected by lagging msg, just warn and ignore.
+ warn!("Alive keeper for region {region_ident} is not found!");
+ return;
+ };
+ keeper.register_region(region_ident.region_number).await
+ }
+
+ pub async fn deregister_region(&self, region_ident: &RegionIdent) {
+ let table_ident = ®ion_ident.table_ident;
+ let Some(keeper) = self.find_keeper(table_ident).await else {
+ // Alive keeper could be affected by lagging msg, just warn and ignore.
+ warn!("Alive keeper for region {region_ident} is not found!");
+ return;
+ };
+ keeper.deregister_region(region_ident.region_number).await
+ }
+
+ pub async fn start(&self, heartbeat_interval_millis: u64) {
+ for keeper in self.keepers.lock().await.values() {
+ keeper.start(heartbeat_interval_millis).await;
+ }
+ }
+}
+
+/// [RegionAliveKeeper] starts a countdown for each region in a table. When deadline is reached,
+/// the region will be closed.
+/// The deadline is controlled by Metasrv. It works like "lease" for regions: a Datanode submits its
+/// opened regions to Metasrv, in heartbeats. If Metasrv decides some region could be resided in this
+/// Datanode, it will "extend" the region's "lease", with a deadline for [RegionAliveKeeper] to
+/// countdown.
+struct RegionAliveKeeper {
+ table_engine: TableEngineRef,
+ table_ident: TableIdent,
+ countdown_task_handles: Arc<Mutex<HashMap<RegionNumber, Arc<CountdownTaskHandle>>>>,
+}
+
+impl RegionAliveKeeper {
+ fn new(table_engine: TableEngineRef, table_ident: TableIdent) -> Self {
+ Self {
+ table_engine,
+ table_ident,
+ countdown_task_handles: Arc::new(Mutex::new(HashMap::new())),
+ }
+ }
+
+ async fn find_handle(&self, region: &RegionNumber) -> Option<Arc<CountdownTaskHandle>> {
+ self.countdown_task_handles
+ .lock()
+ .await
+ .get(region)
+ .cloned()
+ }
+
+ async fn register_region(&self, region: RegionNumber) {
+ if self.find_handle(®ion).await.is_some() {
+ return;
+ }
+
+ let countdown_task_handles = self.countdown_task_handles.clone();
+ let on_task_finished = async move {
+ let _ = countdown_task_handles.lock().await.remove(®ion);
+ };
+ let handle = Arc::new(CountdownTaskHandle::new(
+ self.table_engine.clone(),
+ self.table_ident.clone(),
+ region,
+ || on_task_finished,
+ ));
+
+ self.countdown_task_handles
+ .lock()
+ .await
+ .insert(region, handle);
+ info!(
+ "Register alive countdown for new region {region} in table {}",
+ self.table_ident
+ )
+ }
+
+ async fn deregister_region(&self, region: RegionNumber) {
+ if self
+ .countdown_task_handles
+ .lock()
+ .await
+ .remove(®ion)
+ .is_some()
+ {
+ info!(
+ "Deregister alive countdown for region {region} in table {}",
+ self.table_ident
+ )
+ }
+ }
+
+ async fn start(&self, heartbeat_interval_millis: u64) {
+ for handle in self.countdown_task_handles.lock().await.values() {
+ handle.start(heartbeat_interval_millis).await;
+ }
+ info!(
+ "RegionAliveKeeper for table {} is started!",
+ self.table_ident
+ )
+ }
+
+ async fn keep_lived(&self, designated_regions: Vec<RegionNumber>, deadline: Instant) {
+ for region in designated_regions {
+ if let Some(handle) = self.find_handle(®ion).await {
+ handle.reset_deadline(deadline).await;
+ }
+ // Else the region alive keeper might be triggered by lagging messages, we can safely ignore it.
+ }
+ }
+}
+
#[derive(Debug)]
enum CountdownCommand {
Start(u64),
@@ -42,14 +220,14 @@ impl CountdownTaskHandle {
/// be invoked if the task is cancelled (by dropping the handle). This is because we want something
/// meaningful to be done when the task is finished, e.g. deregister the handle from the map.
/// While dropping the handle does not necessarily mean the task is finished.
- fn new<F>(
+ fn new<Fut>(
table_engine: TableEngineRef,
table_ident: TableIdent,
region: RegionNumber,
- on_task_finished: F,
+ on_task_finished: impl FnOnce() -> Fut + Send + 'static,
) -> Self
where
- F: FnOnce() + Send + 'static,
+ Fut: Future<Output = ()> + Send,
{
let (tx, rx) = mpsc::channel(1024);
@@ -60,7 +238,8 @@ impl CountdownTaskHandle {
rx,
};
let handler = common_runtime::spawn_bg(async move {
- countdown_task.run(on_task_finished).await;
+ countdown_task.run().await;
+ on_task_finished().await;
});
Self { tx, handler }
@@ -103,10 +282,7 @@ struct CountdownTask {
}
impl CountdownTask {
- async fn run<F>(&mut self, on_task_finished: F)
- where
- F: FnOnce() + Send + 'static,
- {
+ async fn run(&mut self) {
// 30 years. See `Instant::far_future`.
let far_future = Instant::now() + Duration::from_secs(86400 * 365 * 30);
@@ -133,16 +309,8 @@ impl CountdownTask {
debug!("Reset deadline to region {region} of table {table_ident} to {deadline:?}");
countdown.set(tokio::time::sleep_until(deadline));
}
- // Else we have received a past deadline, it could be the following
- // possible reasons:
- // 1. the clock drift happened in Metasrv or Datanode;
- // 2. some messages are lagged;
- // 3. during the period of Datanode startup.
- // We can safely ignore case 2 and 3. However, case 1 is catastrophic.
- // We must think of a way to resolve it, maybe using logical clock, or
- // simply fire an alarm for it? For now, we can tolerate that, because it's
- // seconds resolution to deadline, and clock drift is less likely
- // to happen in that resolution.
+ // Else the countdown could be not started yet, or during startup protection.
+ // Can be safely ignored.
},
None => {
info!(
@@ -168,8 +336,6 @@ impl CountdownTask {
}
}
}
-
- on_task_finished();
}
async fn close_region(&self) -> CloseTableResult {
@@ -202,12 +368,142 @@ mod test {
use std::sync::Arc;
use datatypes::schema::RawSchema;
+ use table::engine::manager::MemoryTableEngineManager;
use table::engine::{TableEngine, TableReference};
use table::requests::{CreateTableRequest, TableOptions};
+ use table::test_util::EmptyTable;
+ use tokio::sync::oneshot;
use super::*;
use crate::remote::mock::MockTableEngine;
+ #[tokio::test(flavor = "multi_thread")]
+ async fn test_region_alive_keepers() {
+ let table_engine = Arc::new(MockTableEngine::default());
+ let table_engine_manager = Arc::new(MemoryTableEngineManager::new(table_engine));
+ let keepers = RegionAliveKeepers::new(table_engine_manager);
+
+ let catalog = "my_catalog";
+ let schema = "my_schema";
+ let table = "my_table";
+ let table_ident = TableIdent {
+ catalog: catalog.to_string(),
+ schema: schema.to_string(),
+ table: table.to_string(),
+ table_id: 1,
+ engine: "MockTableEngine".to_string(),
+ };
+ let table = Arc::new(EmptyTable::new(CreateTableRequest {
+ id: 1,
+ catalog_name: catalog.to_string(),
+ schema_name: schema.to_string(),
+ table_name: table.to_string(),
+ desc: None,
+ schema: RawSchema {
+ column_schemas: vec![],
+ timestamp_index: None,
+ version: 0,
+ },
+ region_numbers: vec![1, 2, 3],
+ primary_key_indices: vec![],
+ create_if_not_exists: false,
+ table_options: TableOptions::default(),
+ engine: "MockTableEngine".to_string(),
+ }));
+
+ keepers
+ .register_table(table_ident.clone(), table)
+ .await
+ .unwrap();
+ assert!(keepers.keepers.lock().await.contains_key(&table_ident));
+
+ keepers
+ .register_region(&RegionIdent {
+ cluster_id: 1,
+ datanode_id: 1,
+ table_ident: table_ident.clone(),
+ region_number: 4,
+ })
+ .await;
+
+ keepers.start(5000).await;
+ for keeper in keepers.keepers.lock().await.values() {
+ for handle in keeper.countdown_task_handles.lock().await.values() {
+ // assert countdown tasks are started
+ assert!(deadline(&handle.tx).await <= Instant::now() + Duration::from_secs(20));
+ }
+ }
+
+ keepers
+ .deregister_region(&RegionIdent {
+ cluster_id: 1,
+ datanode_id: 1,
+ table_ident: table_ident.clone(),
+ region_number: 1,
+ })
+ .await;
+ let mut regions = keepers
+ .find_keeper(&table_ident)
+ .await
+ .unwrap()
+ .countdown_task_handles
+ .lock()
+ .await
+ .keys()
+ .copied()
+ .collect::<Vec<_>>();
+ regions.sort();
+ assert_eq!(regions, vec![2, 3, 4]);
+
+ keepers.deregister_table(&table_ident).await;
+ assert!(keepers.keepers.lock().await.is_empty());
+ }
+
+ #[tokio::test(flavor = "multi_thread")]
+ async fn test_region_alive_keeper() {
+ let table_engine = Arc::new(MockTableEngine::default());
+ let table_ident = TableIdent {
+ catalog: "my_catalog".to_string(),
+ schema: "my_schema".to_string(),
+ table: "my_table".to_string(),
+ table_id: 1024,
+ engine: "mito".to_string(),
+ };
+ let keeper = RegionAliveKeeper::new(table_engine, table_ident);
+
+ let region = 1;
+ assert!(keeper.find_handle(®ion).await.is_none());
+ keeper.register_region(region).await;
+ assert!(keeper.find_handle(®ion).await.is_some());
+
+ let sender = &keeper
+ .countdown_task_handles
+ .lock()
+ .await
+ .get(®ion)
+ .unwrap()
+ .tx
+ .clone();
+
+ let ten_seconds_later = || Instant::now() + Duration::from_secs(10);
+
+ keeper.keep_lived(vec![1, 2, 3], ten_seconds_later()).await;
+ assert!(keeper.find_handle(&2).await.is_none());
+ assert!(keeper.find_handle(&3).await.is_none());
+
+ let far_future = Instant::now() + Duration::from_secs(86400 * 365 * 29);
+ // assert if keeper is not started, keep_lived is of no use
+ assert!(deadline(sender).await > far_future);
+
+ keeper.start(1000).await;
+ keeper.keep_lived(vec![1, 2, 3], ten_seconds_later()).await;
+ // assert keep_lived works if keeper is started
+ assert!(deadline(sender).await <= ten_seconds_later());
+
+ keeper.deregister_region(region).await;
+ assert!(keeper.find_handle(®ion).await.is_none());
+ }
+
#[tokio::test(flavor = "multi_thread")]
async fn test_countdown_task_handle() {
let table_engine = Arc::new(MockTableEngine::default());
@@ -220,10 +516,12 @@ mod test {
};
let finished = Arc::new(AtomicBool::new(false));
let finished_clone = finished.clone();
- let handle =
- CountdownTaskHandle::new(table_engine.clone(), table_ident.clone(), 1, move || {
- finished_clone.store(true, Ordering::Relaxed)
- });
+ let handle = CountdownTaskHandle::new(
+ table_engine.clone(),
+ table_ident.clone(),
+ 1,
+ || async move { finished_clone.store(true, Ordering::Relaxed) },
+ );
let tx = handle.tx.clone();
// assert countdown task is running
@@ -244,7 +542,7 @@ mod test {
let finished = Arc::new(AtomicBool::new(false));
let finished_clone = finished.clone();
- let handle = CountdownTaskHandle::new(table_engine, table_ident, 1, move || {
+ let handle = CountdownTaskHandle::new(table_engine, table_ident, 1, || async move {
finished_clone.store(true, Ordering::Relaxed)
});
handle.tx.send(CountdownCommand::Start(100)).await.unwrap();
@@ -296,15 +594,9 @@ mod test {
rx,
};
common_runtime::spawn_bg(async move {
- task.run(|| ()).await;
+ task.run().await;
});
- async fn deadline(tx: &mpsc::Sender<CountdownCommand>) -> Instant {
- let (s, r) = tokio::sync::oneshot::channel();
- tx.send(CountdownCommand::Deadline(s)).await.unwrap();
- r.await.unwrap()
- }
-
// if countdown task is not started, its deadline is set to far future
assert!(deadline(&tx).await > Instant::now() + Duration::from_secs(86400 * 365 * 29));
@@ -326,4 +618,10 @@ mod test {
tokio::time::sleep(Duration::from_millis(2000)).await;
assert!(!table_engine.table_exists(ctx, &table_ref));
}
+
+ async fn deadline(tx: &mpsc::Sender<CountdownCommand>) -> Instant {
+ let (s, r) = oneshot::channel();
+ tx.send(CountdownCommand::Deadline(s)).await.unwrap();
+ r.await.unwrap()
+ }
}
diff --git a/src/catalog/tests/remote_catalog_tests.rs b/src/catalog/tests/remote_catalog_tests.rs
index 324bae49f232..776c6be6c901 100644
--- a/src/catalog/tests/remote_catalog_tests.rs
+++ b/src/catalog/tests/remote_catalog_tests.rs
@@ -22,6 +22,7 @@ mod tests {
use catalog::helper::{CatalogKey, CatalogValue, SchemaKey, SchemaValue};
use catalog::remote::mock::{MockKvBackend, MockTableEngine};
+ use catalog::remote::region_alive_keeper::RegionAliveKeepers;
use catalog::remote::{
CachedMetaKvBackend, KvBackend, KvBackendRef, RemoteCatalogManager, RemoteCatalogProvider,
RemoteSchemaProvider,
@@ -138,8 +139,12 @@ mod tests {
table_engine.clone(),
));
- let catalog_manager =
- RemoteCatalogManager::new(engine_manager.clone(), node_id, cached_backend.clone());
+ let catalog_manager = RemoteCatalogManager::new(
+ engine_manager.clone(),
+ node_id,
+ cached_backend.clone(),
+ Arc::new(RegionAliveKeepers::new(engine_manager.clone())),
+ );
catalog_manager.start().await.unwrap();
(
diff --git a/src/common/meta/src/error.rs b/src/common/meta/src/error.rs
index 20c9edbb7cc6..a75234fd924e 100644
--- a/src/common/meta/src/error.rs
+++ b/src/common/meta/src/error.rs
@@ -52,6 +52,9 @@ pub enum Error {
err_msg: String,
location: Location,
},
+
+ #[snafu(display("Invalid protobuf message, err: {}", err_msg))]
+ InvalidProtoMsg { err_msg: String, location: Location },
}
pub type Result<T> = std::result::Result<T, Error>;
@@ -61,7 +64,10 @@ impl ErrorExt for Error {
use Error::*;
match self {
IllegalServerState { .. } => StatusCode::Internal,
- SerdeJson { .. } | RouteInfoCorrupted { .. } => StatusCode::Unexpected,
+
+ SerdeJson { .. } | RouteInfoCorrupted { .. } | InvalidProtoMsg { .. } => {
+ StatusCode::Unexpected
+ }
SendMessage { .. } => StatusCode::Internal,
diff --git a/src/common/meta/src/ident.rs b/src/common/meta/src/ident.rs
new file mode 100644
index 000000000000..cfc08fa7bc83
--- /dev/null
+++ b/src/common/meta/src/ident.rs
@@ -0,0 +1,57 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::fmt::{Display, Formatter};
+
+use api::v1::meta::TableIdent as RawTableIdent;
+use serde::{Deserialize, Serialize};
+use snafu::OptionExt;
+
+use crate::error::{Error, InvalidProtoMsgSnafu};
+
+#[derive(Eq, Hash, PartialEq, Clone, Debug, Serialize, Deserialize)]
+pub struct TableIdent {
+ pub catalog: String,
+ pub schema: String,
+ pub table: String,
+ pub table_id: u32,
+ pub engine: String,
+}
+
+impl Display for TableIdent {
+ fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
+ write!(
+ f,
+ "Table(id={}, name='{}.{}.{}', engine='{}')",
+ self.table_id, self.catalog, self.schema, self.table, self.engine,
+ )
+ }
+}
+
+impl TryFrom<RawTableIdent> for TableIdent {
+ type Error = Error;
+
+ fn try_from(value: RawTableIdent) -> Result<Self, Self::Error> {
+ let table_name = value.table_name.context(InvalidProtoMsgSnafu {
+ err_msg: "'table_name' is missing in TableIdent",
+ })?;
+ Ok(Self {
+ catalog: table_name.catalog_name,
+ schema: table_name.schema_name,
+ table: table_name.table_name,
+ table_id: value.table_id,
+ engine: value.engine,
+ })
+ }
+}
diff --git a/src/common/meta/src/instruction.rs b/src/common/meta/src/instruction.rs
index 0b9bce4bdd62..da09f81b1b72 100644
--- a/src/common/meta/src/instruction.rs
+++ b/src/common/meta/src/instruction.rs
@@ -16,6 +16,7 @@ use std::fmt::{Display, Formatter};
use serde::{Deserialize, Serialize};
+use crate::ident::TableIdent;
use crate::{ClusterId, DatanodeId};
#[derive(Eq, Hash, PartialEq, Clone, Debug, Serialize, Deserialize)]
@@ -49,25 +50,6 @@ impl From<RegionIdent> for TableIdent {
}
}
-#[derive(Eq, Hash, PartialEq, Clone, Debug, Serialize, Deserialize)]
-pub struct TableIdent {
- pub catalog: String,
- pub schema: String,
- pub table: String,
- pub table_id: u32,
- pub engine: String,
-}
-
-impl Display for TableIdent {
- fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
- write!(
- f,
- "TableIdent(table_id='{}', table_name='{}.{}.{}', table_engine='{}')",
- self.table_id, self.catalog, self.schema, self.table, self.engine,
- )
- }
-}
-
#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Clone)]
pub struct SimpleReply {
pub result: bool,
diff --git a/src/common/meta/src/lib.rs b/src/common/meta/src/lib.rs
index b49a7c4620f9..7659bfed2c13 100644
--- a/src/common/meta/src/lib.rs
+++ b/src/common/meta/src/lib.rs
@@ -14,6 +14,7 @@
pub mod error;
pub mod heartbeat;
+pub mod ident;
pub mod instruction;
pub mod key;
pub mod peer;
diff --git a/src/datanode/src/heartbeat.rs b/src/datanode/src/heartbeat.rs
index fa3e362df0bf..c240af2c55b7 100644
--- a/src/datanode/src/heartbeat.rs
+++ b/src/datanode/src/heartbeat.rs
@@ -131,6 +131,9 @@ impl HeartbeatTask {
let addr = resolve_addr(&self.server_addr, &self.server_hostname);
info!("Starting heartbeat to Metasrv with interval {interval}. My node id is {node_id}, address is {addr}.");
+ // TODO(LFC): Continued in next PR.
+ // self.region_alive_keepers.start(interval).await;
+
let meta_client = self.meta_client.clone();
let catalog_manager_clone = self.catalog_manager.clone();
diff --git a/src/datanode/src/heartbeat/handler/close_region.rs b/src/datanode/src/heartbeat/handler/close_region.rs
index 6fac2cf5da33..638c1aa014e8 100644
--- a/src/datanode/src/heartbeat/handler/close_region.rs
+++ b/src/datanode/src/heartbeat/handler/close_region.rs
@@ -14,15 +14,14 @@
use std::sync::Arc;
+use catalog::remote::region_alive_keeper::RegionAliveKeepers;
use catalog::{CatalogManagerRef, DeregisterTableRequest};
use common_catalog::format_full_table_name;
use common_meta::error::Result as MetaResult;
use common_meta::heartbeat::handler::{
HandleControl, HeartbeatResponseHandler, HeartbeatResponseHandlerContext,
};
-use common_meta::instruction::{
- Instruction, InstructionReply, RegionIdent, SimpleReply, TableIdent,
-};
+use common_meta::instruction::{Instruction, InstructionReply, SimpleReply};
use common_telemetry::{error, info, warn};
use snafu::ResultExt;
use store_api::storage::RegionNumber;
@@ -36,6 +35,7 @@ use crate::error::{self, Result};
pub struct CloseRegionHandler {
catalog_manager: CatalogManagerRef,
table_engine_manager: TableEngineManagerRef,
+ region_alive_keepers: Arc<RegionAliveKeepers>,
}
impl HeartbeatResponseHandler for CloseRegionHandler {
@@ -53,29 +53,26 @@ impl HeartbeatResponseHandler for CloseRegionHandler {
let mailbox = ctx.mailbox.clone();
let self_ref = Arc::new(self.clone());
-
- let RegionIdent {
- table_ident:
- TableIdent {
- engine,
- catalog,
- schema,
- table,
- ..
- },
- region_number,
- ..
- } = region_ident;
-
+ let region_alive_keepers = self.region_alive_keepers.clone();
common_runtime::spawn_bg(async move {
+ let table_ident = ®ion_ident.table_ident;
+ let table_ref = TableReference::full(
+ &table_ident.catalog,
+ &table_ident.schema,
+ &table_ident.table,
+ );
let result = self_ref
.close_region_inner(
- engine,
- &TableReference::full(&catalog, &schema, &table),
- vec![region_number],
+ table_ident.engine.clone(),
+ &table_ref,
+ vec![region_ident.region_number],
)
.await;
+ if matches!(result, Ok(true)) {
+ region_alive_keepers.deregister_region(®ion_ident).await;
+ }
+
if let Err(e) = mailbox
.send((meta, CloseRegionHandler::map_result(result)))
.await
@@ -92,10 +89,12 @@ impl CloseRegionHandler {
pub fn new(
catalog_manager: CatalogManagerRef,
table_engine_manager: TableEngineManagerRef,
+ region_alive_keepers: Arc<RegionAliveKeepers>,
) -> Self {
Self {
catalog_manager,
table_engine_manager,
+ region_alive_keepers,
}
}
diff --git a/src/datanode/src/heartbeat/handler/open_region.rs b/src/datanode/src/heartbeat/handler/open_region.rs
index 361b50e279a0..71b4863f6d64 100644
--- a/src/datanode/src/heartbeat/handler/open_region.rs
+++ b/src/datanode/src/heartbeat/handler/open_region.rs
@@ -15,15 +15,14 @@
use std::sync::Arc;
use catalog::error::Error as CatalogError;
+use catalog::remote::region_alive_keeper::RegionAliveKeepers;
use catalog::{CatalogManagerRef, RegisterTableRequest};
use common_catalog::format_full_table_name;
use common_meta::error::Result as MetaResult;
use common_meta::heartbeat::handler::{
HandleControl, HeartbeatResponseHandler, HeartbeatResponseHandlerContext,
};
-use common_meta::instruction::{
- Instruction, InstructionReply, RegionIdent, SimpleReply, TableIdent,
-};
+use common_meta::instruction::{Instruction, InstructionReply, SimpleReply};
use common_telemetry::{error, warn};
use snafu::ResultExt;
use store_api::storage::RegionNumber;
@@ -37,6 +36,7 @@ use crate::error::{self, Result};
pub struct OpenRegionHandler {
catalog_manager: CatalogManagerRef,
table_engine_manager: TableEngineManagerRef,
+ region_alive_keepers: Arc<RegionAliveKeepers>,
}
impl HeartbeatResponseHandler for OpenRegionHandler {
@@ -55,9 +55,24 @@ impl HeartbeatResponseHandler for OpenRegionHandler {
let mailbox = ctx.mailbox.clone();
let self_ref = Arc::new(self.clone());
+ let region_alive_keepers = self.region_alive_keepers.clone();
common_runtime::spawn_bg(async move {
- let (engine, request) = OpenRegionHandler::prepare_request(region_ident);
- let result = self_ref.open_region_inner(engine, request).await;
+ let table_ident = ®ion_ident.table_ident;
+ let request = OpenTableRequest {
+ catalog_name: table_ident.catalog.clone(),
+ schema_name: table_ident.schema.clone(),
+ table_name: table_ident.table.clone(),
+ table_id: table_ident.table_id,
+ region_numbers: vec![region_ident.region_number],
+ };
+ let result = self_ref
+ .open_region_inner(table_ident.engine.clone(), request)
+ .await;
+
+ if matches!(result, Ok(true)) {
+ region_alive_keepers.register_region(®ion_ident).await;
+ }
+
if let Err(e) = mailbox
.send((meta, OpenRegionHandler::map_result(result)))
.await
@@ -73,10 +88,12 @@ impl OpenRegionHandler {
pub fn new(
catalog_manager: CatalogManagerRef,
table_engine_manager: TableEngineManagerRef,
+ region_alive_keepers: Arc<RegionAliveKeepers>,
) -> Self {
Self {
catalog_manager,
table_engine_manager,
+ region_alive_keepers,
}
}
@@ -97,32 +114,6 @@ impl OpenRegionHandler {
)
}
- fn prepare_request(ident: RegionIdent) -> (String, OpenTableRequest) {
- let RegionIdent {
- table_ident:
- TableIdent {
- catalog,
- schema,
- table,
- table_id,
- engine,
- },
- region_number,
- ..
- } = ident;
-
- (
- engine,
- OpenTableRequest {
- catalog_name: catalog,
- schema_name: schema,
- table_name: table,
- table_id,
- region_numbers: vec![region_number],
- },
- )
- }
-
/// Returns true if a table or target regions have been opened.
async fn regions_opened(
&self,
diff --git a/src/datanode/src/instance.rs b/src/datanode/src/instance.rs
index aa00d6423b2f..72336f95b7a1 100644
--- a/src/datanode/src/instance.rs
+++ b/src/datanode/src/instance.rs
@@ -18,7 +18,8 @@ use std::time::Duration;
use std::{fs, path};
use api::v1::meta::Role;
-use catalog::remote::CachedMetaKvBackend;
+use catalog::remote::region_alive_keeper::RegionAliveKeepers;
+use catalog::remote::{CachedMetaKvBackend, RemoteCatalogManager};
use catalog::{CatalogManager, CatalogManagerRef, RegisterTableRequest};
use common_base::paths::{CLUSTER_DIR, WAL_DIR};
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, MIN_USER_TABLE_ID};
@@ -56,9 +57,9 @@ use table::Table;
use crate::datanode::{DatanodeOptions, ObjectStoreConfig, ProcedureConfig, WalConfig};
use crate::error::{
- self, CatalogSnafu, MetaClientInitSnafu, MissingMetasrvOptsSnafu, MissingNodeIdSnafu,
- NewCatalogSnafu, OpenLogStoreSnafu, RecoverProcedureSnafu, Result, ShutdownInstanceSnafu,
- StartProcedureManagerSnafu, StopProcedureManagerSnafu,
+ self, CatalogSnafu, IncorrectInternalStateSnafu, MetaClientInitSnafu, MissingMetasrvOptsSnafu,
+ MissingNodeIdSnafu, NewCatalogSnafu, OpenLogStoreSnafu, RecoverProcedureSnafu, Result,
+ ShutdownInstanceSnafu, StartProcedureManagerSnafu, StopProcedureManagerSnafu,
};
use crate::heartbeat::handler::close_region::CloseRegionHandler;
use crate::heartbeat::handler::open_region::OpenRegionHandler;
@@ -150,7 +151,7 @@ impl Instance {
);
// create remote catalog manager
- let (catalog_manager, table_id_provider) = match opts.mode {
+ let (catalog_manager, table_id_provider, heartbeat_task) = match opts.mode {
Mode::Standalone => {
if opts.enable_memory_catalog {
let catalog = Arc::new(catalog::local::MemoryCatalogManager::default());
@@ -170,6 +171,7 @@ impl Instance {
(
catalog.clone() as CatalogManagerRef,
Some(catalog as TableIdProviderRef),
+ None,
)
} else {
let catalog = Arc::new(
@@ -181,51 +183,58 @@ impl Instance {
(
catalog.clone() as CatalogManagerRef,
Some(catalog as TableIdProviderRef),
+ None,
)
}
}
Mode::Distributed => {
- let kv_backend = Arc::new(CachedMetaKvBackend::new(
- meta_client.as_ref().unwrap().clone(),
- ));
+ let meta_client = meta_client.context(IncorrectInternalStateSnafu {
+ state: "meta client is not provided when creating distributed Datanode",
+ })?;
+
+ let kv_backend = Arc::new(CachedMetaKvBackend::new(meta_client.clone()));
- let catalog = Arc::new(catalog::remote::RemoteCatalogManager::new(
+ let region_alive_keepers =
+ Arc::new(RegionAliveKeepers::new(engine_manager.clone()));
+
+ let catalog_manager = Arc::new(RemoteCatalogManager::new(
engine_manager.clone(),
opts.node_id.context(MissingNodeIdSnafu)?,
kv_backend,
+ region_alive_keepers.clone(),
));
- (catalog as CatalogManagerRef, None)
+
+ let handlers_executor = HandlerGroupExecutor::new(vec![
+ Arc::new(ParseMailboxMessageHandler::default()),
+ Arc::new(OpenRegionHandler::new(
+ catalog_manager.clone(),
+ engine_manager.clone(),
+ region_alive_keepers.clone(),
+ )),
+ Arc::new(CloseRegionHandler::new(
+ catalog_manager.clone(),
+ engine_manager.clone(),
+ region_alive_keepers,
+ )),
+ ]);
+
+ let heartbeat_task = Some(HeartbeatTask::new(
+ opts.node_id.context(MissingNodeIdSnafu)?,
+ opts.rpc_addr.clone(),
+ opts.rpc_hostname.clone(),
+ meta_client,
+ catalog_manager.clone(),
+ Arc::new(handlers_executor),
+ ));
+
+ (catalog_manager as CatalogManagerRef, None, heartbeat_task)
}
};
let factory = QueryEngineFactory::new(catalog_manager.clone(), false);
let query_engine = factory.query_engine();
- let handlers_executor = HandlerGroupExecutor::new(vec![
- Arc::new(ParseMailboxMessageHandler::default()),
- Arc::new(OpenRegionHandler::new(
- catalog_manager.clone(),
- engine_manager.clone(),
- )),
- Arc::new(CloseRegionHandler::new(
- catalog_manager.clone(),
- engine_manager.clone(),
- )),
- ]);
-
- let heartbeat_task = match opts.mode {
- Mode::Standalone => None,
- Mode::Distributed => Some(HeartbeatTask::new(
- opts.node_id.context(MissingNodeIdSnafu)?,
- opts.rpc_addr.clone(),
- opts.rpc_hostname.clone(),
- meta_client.as_ref().unwrap().clone(),
- catalog_manager.clone(),
- Arc::new(handlers_executor),
- )),
- };
-
let procedure_manager =
create_procedure_manager(opts.node_id.unwrap_or(0), &opts.procedure, object_store)
.await?;
diff --git a/src/datanode/src/tests.rs b/src/datanode/src/tests.rs
index e17c18914223..6a278cefa4a2 100644
--- a/src/datanode/src/tests.rs
+++ b/src/datanode/src/tests.rs
@@ -19,14 +19,14 @@ use api::v1::greptime_request::Request as GrpcRequest;
use api::v1::meta::HeartbeatResponse;
use api::v1::query_request::Query;
use api::v1::QueryRequest;
+use catalog::remote::region_alive_keeper::RegionAliveKeepers;
use catalog::CatalogManagerRef;
use common_meta::heartbeat::handler::{
HandlerGroupExecutor, HeartbeatResponseHandlerContext, HeartbeatResponseHandlerExecutor,
};
use common_meta::heartbeat::mailbox::{HeartbeatMailbox, MessageMeta};
-use common_meta::instruction::{
- Instruction, InstructionReply, RegionIdent, SimpleReply, TableIdent,
-};
+use common_meta::ident::TableIdent;
+use common_meta::instruction::{Instruction, InstructionReply, RegionIdent, SimpleReply};
use common_query::Output;
use datatypes::prelude::ConcreteDataType;
use servers::query_handler::grpc::GrpcQueryHandler;
@@ -61,7 +61,11 @@ async fn test_close_region_handler() {
} = prepare_handler_test("test_close_region_handler").await;
let executor = Arc::new(HandlerGroupExecutor::new(vec![Arc::new(
- CloseRegionHandler::new(catalog_manager_ref.clone(), engine_manager_ref.clone()),
+ CloseRegionHandler::new(
+ catalog_manager_ref.clone(),
+ engine_manager_ref.clone(),
+ Arc::new(RegionAliveKeepers::new(engine_manager_ref.clone())),
+ ),
)]));
prepare_table(instance.inner()).await;
@@ -127,14 +131,18 @@ async fn test_open_region_handler() {
..
} = prepare_handler_test("test_open_region_handler").await;
+ let region_alive_keeper = Arc::new(RegionAliveKeepers::new(engine_manager_ref.clone()));
+
let executor = Arc::new(HandlerGroupExecutor::new(vec![
Arc::new(OpenRegionHandler::new(
catalog_manager_ref.clone(),
engine_manager_ref.clone(),
+ region_alive_keeper.clone(),
)),
Arc::new(CloseRegionHandler::new(
catalog_manager_ref.clone(),
engine_manager_ref.clone(),
+ region_alive_keeper,
)),
]));
diff --git a/src/frontend/src/heartbeat/handler/invalidate_table_cache.rs b/src/frontend/src/heartbeat/handler/invalidate_table_cache.rs
index 92d905fdbbfe..111c1ae86f2f 100644
--- a/src/frontend/src/heartbeat/handler/invalidate_table_cache.rs
+++ b/src/frontend/src/heartbeat/handler/invalidate_table_cache.rs
@@ -18,7 +18,8 @@ use common_meta::error::Result as MetaResult;
use common_meta::heartbeat::handler::{
HandleControl, HeartbeatResponseHandler, HeartbeatResponseHandlerContext,
};
-use common_meta::instruction::{Instruction, InstructionReply, SimpleReply, TableIdent};
+use common_meta::ident::TableIdent;
+use common_meta::instruction::{Instruction, InstructionReply, SimpleReply};
use common_meta::table_name::TableName;
use common_telemetry::{error, info};
use partition::manager::TableRouteCacheInvalidatorRef;
diff --git a/src/frontend/src/heartbeat/handler/tests.rs b/src/frontend/src/heartbeat/handler/tests.rs
index 0e6d03782438..c066ad601b59 100644
--- a/src/frontend/src/heartbeat/handler/tests.rs
+++ b/src/frontend/src/heartbeat/handler/tests.rs
@@ -23,7 +23,8 @@ use common_meta::heartbeat::handler::{
HandlerGroupExecutor, HeartbeatResponseHandlerContext, HeartbeatResponseHandlerExecutor,
};
use common_meta::heartbeat::mailbox::{HeartbeatMailbox, MessageMeta};
-use common_meta::instruction::{Instruction, InstructionReply, SimpleReply, TableIdent};
+use common_meta::ident::TableIdent;
+use common_meta::instruction::{Instruction, InstructionReply, SimpleReply};
use common_meta::table_name::TableName;
use partition::manager::TableRouteCacheInvalidator;
use tokio::sync::mpsc;
diff --git a/src/meta-client/Cargo.toml b/src/meta-client/Cargo.toml
index 3785db611eef..0945cadd3741 100644
--- a/src/meta-client/Cargo.toml
+++ b/src/meta-client/Cargo.toml
@@ -12,7 +12,7 @@ common-error = { path = "../common/error" }
common-grpc = { path = "../common/grpc" }
common-telemetry = { path = "../common/telemetry" }
common-meta = { path = "../common/meta" }
-etcd-client = "0.10"
+etcd-client = "0.11"
rand.workspace = true
serde.workspace = true
serde_json.workspace = true
diff --git a/src/meta-srv/Cargo.toml b/src/meta-srv/Cargo.toml
index 77679b943c96..539a3140230c 100644
--- a/src/meta-srv/Cargo.toml
+++ b/src/meta-srv/Cargo.toml
@@ -24,7 +24,7 @@ common-telemetry = { path = "../common/telemetry" }
common-time = { path = "../common/time" }
dashmap = "5.4"
derive_builder = "0.12"
-etcd-client = "0.10"
+etcd-client = "0.11"
futures.workspace = true
h2 = "0.3"
http-body = "0.4"
diff --git a/src/meta-srv/src/handler.rs b/src/meta-srv/src/handler.rs
index 7b1205dfb9f0..658f45d3a5be 100644
--- a/src/meta-srv/src/handler.rs
+++ b/src/meta-srv/src/handler.rs
@@ -130,6 +130,7 @@ impl Pushers {
.push(HeartbeatResponse {
header: Some(pusher.header()),
mailbox_message: Some(mailbox_message),
+ ..Default::default()
})
.await
}
@@ -151,6 +152,7 @@ impl Pushers {
.push(HeartbeatResponse {
header: Some(pusher.header()),
mailbox_message: Some(mailbox_message),
+ ..Default::default()
})
.await?;
}
@@ -232,6 +234,7 @@ impl HeartbeatHandlerGroup {
let res = HeartbeatResponse {
header,
mailbox_message: acc.into_mailbox_message(),
+ ..Default::default()
};
Ok(res)
}
diff --git a/src/meta-srv/src/handler/failure_handler.rs b/src/meta-srv/src/handler/failure_handler.rs
index 68f772959007..953efaf6af96 100644
--- a/src/meta-srv/src/handler/failure_handler.rs
+++ b/src/meta-srv/src/handler/failure_handler.rs
@@ -19,7 +19,7 @@ use std::sync::Arc;
use api::v1::meta::{HeartbeatRequest, Role};
use async_trait::async_trait;
use common_catalog::consts::MITO_ENGINE;
-use common_meta::instruction::TableIdent;
+use common_meta::ident::TableIdent;
use common_meta::RegionIdent;
use table::engine::table_id;
diff --git a/src/meta-srv/src/handler/failure_handler/runner.rs b/src/meta-srv/src/handler/failure_handler/runner.rs
index b9fd28ef937b..c292c118b9bd 100644
--- a/src/meta-srv/src/handler/failure_handler/runner.rs
+++ b/src/meta-srv/src/handler/failure_handler/runner.rs
@@ -246,7 +246,7 @@ impl FailureDetectorContainer {
#[cfg(test)]
mod tests {
use common_catalog::consts::MITO_ENGINE;
- use common_meta::instruction::TableIdent;
+ use common_meta::ident::TableIdent;
use rand::Rng;
use super::*;
diff --git a/src/meta-srv/src/handler/response_header_handler.rs b/src/meta-srv/src/handler/response_header_handler.rs
index 305e40a5e2d8..6a0934b220a0 100644
--- a/src/meta-srv/src/handler/response_header_handler.rs
+++ b/src/meta-srv/src/handler/response_header_handler.rs
@@ -88,6 +88,7 @@ mod tests {
let res = HeartbeatResponse {
header,
mailbox_message: acc.into_mailbox_message(),
+ ..Default::default()
};
assert_eq!(1, res.header.unwrap().cluster_id);
}
diff --git a/src/meta-srv/src/procedure/region_failover.rs b/src/meta-srv/src/procedure/region_failover.rs
index a409dbc13674..7424dc613916 100644
--- a/src/meta-srv/src/procedure/region_failover.rs
+++ b/src/meta-srv/src/procedure/region_failover.rs
@@ -343,7 +343,8 @@ mod tests {
use api::v1::meta::{HeartbeatResponse, MailboxMessage, Peer, RequestHeader};
use catalog::helper::TableGlobalKey;
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, MITO_ENGINE};
- use common_meta::instruction::{Instruction, InstructionReply, SimpleReply, TableIdent};
+ use common_meta::ident::TableIdent;
+ use common_meta::instruction::{Instruction, InstructionReply, SimpleReply};
use common_meta::DatanodeId;
use common_procedure::BoxedProcedure;
use rand::prelude::SliceRandom;
diff --git a/src/meta-srv/src/procedure/region_failover/failover_start.rs b/src/meta-srv/src/procedure/region_failover/failover_start.rs
index 2956fd026ecf..27ec574f4bfa 100644
--- a/src/meta-srv/src/procedure/region_failover/failover_start.rs
+++ b/src/meta-srv/src/procedure/region_failover/failover_start.rs
@@ -14,7 +14,7 @@
use async_trait::async_trait;
use common_error::prelude::{ErrorExt, StatusCode};
-use common_meta::instruction::TableIdent;
+use common_meta::ident::TableIdent;
use common_meta::peer::Peer;
use common_meta::RegionIdent;
use common_telemetry::info;
diff --git a/src/meta-srv/src/procedure/region_failover/invalidate_cache.rs b/src/meta-srv/src/procedure/region_failover/invalidate_cache.rs
index 52739a0ab89a..e43ec05cb106 100644
--- a/src/meta-srv/src/procedure/region_failover/invalidate_cache.rs
+++ b/src/meta-srv/src/procedure/region_failover/invalidate_cache.rs
@@ -14,7 +14,8 @@
use api::v1::meta::MailboxMessage;
use async_trait::async_trait;
-use common_meta::instruction::{Instruction, TableIdent};
+use common_meta::ident::TableIdent;
+use common_meta::instruction::Instruction;
use common_meta::RegionIdent;
use common_telemetry::info;
use serde::{Deserialize, Serialize};
diff --git a/src/table/src/test_util/empty_table.rs b/src/table/src/test_util/empty_table.rs
index c2388dc2922e..679ace68876b 100644
--- a/src/table/src/test_util/empty_table.rs
+++ b/src/table/src/test_util/empty_table.rs
@@ -36,6 +36,7 @@ impl EmptyTable {
.primary_key_indices(req.primary_key_indices)
.next_column_id(0)
.options(req.table_options)
+ .region_numbers(req.region_numbers)
.build();
let table_info = TableInfoBuilder::default()
.catalog_name(req.catalog_name)
diff --git a/src/table/src/test_util/memtable.rs b/src/table/src/test_util/memtable.rs
index ecb888f8d09a..f2e942ce8d9b 100644
--- a/src/table/src/test_util/memtable.rs
+++ b/src/table/src/test_util/memtable.rs
@@ -77,7 +77,7 @@ impl MemTable {
.schema(schema)
.primary_key_indices(vec![])
.value_indices(vec![])
- .engine("mock".to_string())
+ .engine("mito".to_string())
.next_column_id(0)
.engine_options(Default::default())
.options(Default::default())
diff --git a/tests-integration/tests/region_failover.rs b/tests-integration/tests/region_failover.rs
index 869faebe4666..053d5e355b15 100644
--- a/tests-integration/tests/region_failover.rs
+++ b/tests-integration/tests/region_failover.rs
@@ -20,7 +20,7 @@ use api::v1::meta::Peer;
use catalog::helper::TableGlobalKey;
use catalog::remote::{CachedMetaKvBackend, Kv};
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, MITO_ENGINE};
-use common_meta::instruction::TableIdent;
+use common_meta::ident::TableIdent;
use common_meta::rpc::router::TableRoute;
use common_meta::table_name::TableName;
use common_meta::RegionIdent;
|
feat
|
region alive keeper in Datanode (#1780)
|
c52bc613e0039e1f0ae9a91af46f28767662b4f6
|
2024-03-06 13:54:55
|
Weny Xu
|
chore: add bin opt to build cmd (#3440)
| false
|
diff --git a/Makefile b/Makefile
index 61452f0f8746..53aad6d68daf 100644
--- a/Makefile
+++ b/Makefile
@@ -3,6 +3,7 @@ CARGO_PROFILE ?=
FEATURES ?=
TARGET_DIR ?=
TARGET ?=
+BUILD_BIN ?= greptime
CARGO_BUILD_OPTS := --locked
IMAGE_REGISTRY ?= docker.io
IMAGE_NAMESPACE ?= greptime
@@ -45,6 +46,10 @@ ifneq ($(strip $(TARGET)),)
CARGO_BUILD_OPTS += --target ${TARGET}
endif
+ifneq ($(strip $(BUILD_BIN)),)
+ CARGO_BUILD_OPTS += --bin ${BUILD_BIN}
+endif
+
ifneq ($(strip $(RELEASE)),)
CARGO_BUILD_OPTS += --release
endif
|
chore
|
add bin opt to build cmd (#3440)
|
b8bd8456f0ea05f8214a21ce7880c4e9fcf5e2d2
|
2024-07-15 09:04:19
|
Ning Sun
|
fix: remove path label for cache store (#4336)
| false
|
diff --git a/src/common/datasource/src/object_store/fs.rs b/src/common/datasource/src/object_store/fs.rs
index 6d342f8eb30b..16e30b0044c0 100644
--- a/src/common/datasource/src/object_store/fs.rs
+++ b/src/common/datasource/src/object_store/fs.rs
@@ -31,7 +31,7 @@ pub fn build_fs_backend(root: &str) -> Result<ObjectStore> {
.expect("input error level must be valid"),
)
.layer(object_store::layers::TracingLayer)
- .layer(object_store::layers::PrometheusMetricsLayer)
+ .layer(object_store::layers::PrometheusMetricsLayer::new(true))
.finish();
Ok(object_store)
}
diff --git a/src/common/datasource/src/object_store/s3.rs b/src/common/datasource/src/object_store/s3.rs
index 2b6ac7c2ee73..6efc6474c45d 100644
--- a/src/common/datasource/src/object_store/s3.rs
+++ b/src/common/datasource/src/object_store/s3.rs
@@ -94,7 +94,7 @@ pub fn build_s3_backend(
.expect("input error level must be valid"),
)
.layer(object_store::layers::TracingLayer)
- .layer(object_store::layers::PrometheusMetricsLayer)
+ .layer(object_store::layers::PrometheusMetricsLayer::new(true))
.finish())
}
diff --git a/src/datanode/src/store.rs b/src/datanode/src/store.rs
index 6e6e5bea6813..9946ec44839a 100644
--- a/src/datanode/src/store.rs
+++ b/src/datanode/src/store.rs
@@ -60,7 +60,7 @@ pub(crate) async fn new_object_store(
object_store
};
- let store = with_instrument_layers(object_store);
+ let store = with_instrument_layers(object_store, true);
Ok(store)
}
diff --git a/src/mito2/src/access_layer.rs b/src/mito2/src/access_layer.rs
index 47f5af9241e6..a72527fb3351 100644
--- a/src/mito2/src/access_layer.rs
+++ b/src/mito2/src/access_layer.rs
@@ -208,18 +208,15 @@ pub(crate) struct SstWriteRequest {
pub(crate) fulltext_index_config: FulltextIndexConfig,
}
-/// Creates a fs object store with atomic write dir.
-pub(crate) async fn new_fs_object_store(root: &str) -> Result<ObjectStore> {
+pub(crate) async fn new_fs_cache_store(root: &str) -> Result<ObjectStore> {
let atomic_write_dir = join_dir(root, ".tmp/");
clean_dir(&atomic_write_dir).await?;
let mut builder = Fs::default();
builder.root(root).atomic_write_dir(&atomic_write_dir);
- let object_store = ObjectStore::new(builder).context(OpenDalSnafu)?.finish();
+ let store = ObjectStore::new(builder).context(OpenDalSnafu)?.finish();
- // Add layers.
- let object_store = with_instrument_layers(object_store);
- Ok(object_store)
+ Ok(with_instrument_layers(store, false))
}
/// Clean the directory.
diff --git a/src/mito2/src/cache/write_cache.rs b/src/mito2/src/cache/write_cache.rs
index ff544662f905..a95bcff15f45 100644
--- a/src/mito2/src/cache/write_cache.rs
+++ b/src/mito2/src/cache/write_cache.rs
@@ -24,7 +24,7 @@ use object_store::manager::ObjectStoreManagerRef;
use object_store::ObjectStore;
use snafu::ResultExt;
-use crate::access_layer::{new_fs_object_store, SstWriteRequest};
+use crate::access_layer::{new_fs_cache_store, SstWriteRequest};
use crate::cache::file_cache::{FileCache, FileCacheRef, FileType, IndexKey, IndexValue};
use crate::error::{self, Result};
use crate::metrics::{FLUSH_ELAPSED, UPLOAD_BYTES_TOTAL};
@@ -86,7 +86,7 @@ impl WriteCache {
) -> Result<Self> {
info!("Init write cache on {cache_dir}, capacity: {cache_capacity}");
- let local_store = new_fs_object_store(cache_dir).await?;
+ let local_store = new_fs_cache_store(cache_dir).await?;
Self::new(
local_store,
object_store_manager,
diff --git a/src/mito2/src/sst/index/intermediate.rs b/src/mito2/src/sst/index/intermediate.rs
index 02095eda348b..1568261e206f 100644
--- a/src/mito2/src/sst/index/intermediate.rs
+++ b/src/mito2/src/sst/index/intermediate.rs
@@ -19,7 +19,7 @@ use object_store::util::{self, normalize_dir};
use store_api::storage::{ColumnId, RegionId};
use uuid::Uuid;
-use crate::access_layer::new_fs_object_store;
+use crate::access_layer::new_fs_cache_store;
use crate::error::Result;
use crate::sst::file::FileId;
use crate::sst::index::store::InstrumentedStore;
@@ -37,7 +37,7 @@ impl IntermediateManager {
/// Create a new `IntermediateManager` with the given root path.
/// It will clean up all garbage intermediate files from previous runs.
pub async fn init_fs(aux_path: impl AsRef<str>) -> Result<Self> {
- let store = new_fs_object_store(&normalize_dir(aux_path.as_ref())).await?;
+ let store = new_fs_cache_store(&normalize_dir(aux_path.as_ref())).await?;
let store = InstrumentedStore::new(store);
// Remove all garbage intermediate files from previous runs.
diff --git a/src/object-store/src/layers/prometheus.rs b/src/object-store/src/layers/prometheus.rs
index 080ace52bff6..5a2d0b603261 100644
--- a/src/object-store/src/layers/prometheus.rs
+++ b/src/object-store/src/layers/prometheus.rs
@@ -84,7 +84,15 @@ fn increment_errors_total(op: Operation, kind: ErrorKind) {
///
/// The metric buckets for these histograms are automatically generated based on the `exponential_buckets(0.01, 2.0, 16)` configuration.
#[derive(Default, Debug, Clone)]
-pub struct PrometheusMetricsLayer;
+pub struct PrometheusMetricsLayer {
+ pub path_label: bool,
+}
+
+impl PrometheusMetricsLayer {
+ pub fn new(path_label: bool) -> Self {
+ Self { path_label }
+ }
+}
impl<A: Access> Layer<A> for PrometheusMetricsLayer {
type LayeredAccess = PrometheusAccess<A>;
@@ -96,6 +104,7 @@ impl<A: Access> Layer<A> for PrometheusMetricsLayer {
PrometheusAccess {
inner,
scheme: scheme.to_string(),
+ path_label: self.path_label,
}
}
}
@@ -104,6 +113,17 @@ impl<A: Access> Layer<A> for PrometheusMetricsLayer {
pub struct PrometheusAccess<A: Access> {
inner: A,
scheme: String,
+ path_label: bool,
+}
+
+impl<A: Access> PrometheusAccess<A> {
+ fn get_path_label<'a>(&self, path: &'a str) -> &'a str {
+ if self.path_label {
+ extract_parent_path(path)
+ } else {
+ ""
+ }
+ }
}
impl<A: Access> Debug for PrometheusAccess<A> {
@@ -128,7 +148,7 @@ impl<A: Access> LayeredAccess for PrometheusAccess<A> {
}
async fn create_dir(&self, path: &str, args: OpCreateDir) -> Result<RpCreateDir> {
- let path_label = extract_parent_path(path);
+ let path_label = self.get_path_label(path);
REQUESTS_TOTAL
.with_label_values(&[&self.scheme, Operation::CreateDir.into_static(), path_label])
.inc();
@@ -146,7 +166,7 @@ impl<A: Access> LayeredAccess for PrometheusAccess<A> {
}
async fn read(&self, path: &str, args: OpRead) -> Result<(RpRead, Self::Reader)> {
- let path_label = extract_parent_path(path);
+ let path_label = self.get_path_label(path);
REQUESTS_TOTAL
.with_label_values(&[&self.scheme, Operation::Read.into_static(), path_label])
.inc();
@@ -176,7 +196,7 @@ impl<A: Access> LayeredAccess for PrometheusAccess<A> {
}
async fn write(&self, path: &str, args: OpWrite) -> Result<(RpWrite, Self::Writer)> {
- let path_label = extract_parent_path(path);
+ let path_label = self.get_path_label(path);
REQUESTS_TOTAL
.with_label_values(&[&self.scheme, Operation::Write.into_static(), path_label])
.inc();
@@ -206,7 +226,7 @@ impl<A: Access> LayeredAccess for PrometheusAccess<A> {
}
async fn stat(&self, path: &str, args: OpStat) -> Result<RpStat> {
- let path_label = extract_parent_path(path);
+ let path_label = self.get_path_label(path);
REQUESTS_TOTAL
.with_label_values(&[&self.scheme, Operation::Stat.into_static(), path_label])
.inc();
@@ -223,7 +243,7 @@ impl<A: Access> LayeredAccess for PrometheusAccess<A> {
}
async fn delete(&self, path: &str, args: OpDelete) -> Result<RpDelete> {
- let path_label = extract_parent_path(path);
+ let path_label = self.get_path_label(path);
REQUESTS_TOTAL
.with_label_values(&[&self.scheme, Operation::Delete.into_static(), path_label])
.inc();
@@ -241,7 +261,7 @@ impl<A: Access> LayeredAccess for PrometheusAccess<A> {
}
async fn list(&self, path: &str, args: OpList) -> Result<(RpList, Self::Lister)> {
- let path_label = extract_parent_path(path);
+ let path_label = self.get_path_label(path);
REQUESTS_TOTAL
.with_label_values(&[&self.scheme, Operation::List.into_static(), path_label])
.inc();
@@ -277,7 +297,7 @@ impl<A: Access> LayeredAccess for PrometheusAccess<A> {
}
async fn presign(&self, path: &str, args: OpPresign) -> Result<RpPresign> {
- let path_label = extract_parent_path(path);
+ let path_label = self.get_path_label(path);
REQUESTS_TOTAL
.with_label_values(&[&self.scheme, Operation::Presign.into_static(), path_label])
.inc();
@@ -295,7 +315,7 @@ impl<A: Access> LayeredAccess for PrometheusAccess<A> {
}
fn blocking_create_dir(&self, path: &str, args: OpCreateDir) -> Result<RpCreateDir> {
- let path_label = extract_parent_path(path);
+ let path_label = self.get_path_label(path);
REQUESTS_TOTAL
.with_label_values(&[
&self.scheme,
@@ -322,7 +342,7 @@ impl<A: Access> LayeredAccess for PrometheusAccess<A> {
}
fn blocking_read(&self, path: &str, args: OpRead) -> Result<(RpRead, Self::BlockingReader)> {
- let path_label = extract_parent_path(path);
+ let path_label = self.get_path_label(path);
REQUESTS_TOTAL
.with_label_values(&[
&self.scheme,
@@ -363,7 +383,7 @@ impl<A: Access> LayeredAccess for PrometheusAccess<A> {
}
fn blocking_write(&self, path: &str, args: OpWrite) -> Result<(RpWrite, Self::BlockingWriter)> {
- let path_label = extract_parent_path(path);
+ let path_label = self.get_path_label(path);
REQUESTS_TOTAL
.with_label_values(&[
&self.scheme,
@@ -404,7 +424,7 @@ impl<A: Access> LayeredAccess for PrometheusAccess<A> {
}
fn blocking_stat(&self, path: &str, args: OpStat) -> Result<RpStat> {
- let path_label = extract_parent_path(path);
+ let path_label = self.get_path_label(path);
REQUESTS_TOTAL
.with_label_values(&[
&self.scheme,
@@ -429,7 +449,7 @@ impl<A: Access> LayeredAccess for PrometheusAccess<A> {
}
fn blocking_delete(&self, path: &str, args: OpDelete) -> Result<RpDelete> {
- let path_label = extract_parent_path(path);
+ let path_label = self.get_path_label(path);
REQUESTS_TOTAL
.with_label_values(&[
&self.scheme,
@@ -455,7 +475,7 @@ impl<A: Access> LayeredAccess for PrometheusAccess<A> {
}
fn blocking_list(&self, path: &str, args: OpList) -> Result<(RpList, Self::BlockingLister)> {
- let path_label = extract_parent_path(path);
+ let path_label = self.get_path_label(path);
REQUESTS_TOTAL
.with_label_values(&[
&self.scheme,
diff --git a/src/object-store/src/util.rs b/src/object-store/src/util.rs
index 376e1941c589..c8f7ac893fa4 100644
--- a/src/object-store/src/util.rs
+++ b/src/object-store/src/util.rs
@@ -138,7 +138,7 @@ pub(crate) fn extract_parent_path(path: &str) -> &str {
}
/// Attaches instrument layers to the object store.
-pub fn with_instrument_layers(object_store: ObjectStore) -> ObjectStore {
+pub fn with_instrument_layers(object_store: ObjectStore, path_label: bool) -> ObjectStore {
object_store
.layer(
LoggingLayer::default()
@@ -148,7 +148,7 @@ pub fn with_instrument_layers(object_store: ObjectStore) -> ObjectStore {
.expect("input error level must be valid"),
)
.layer(TracingLayer)
- .layer(PrometheusMetricsLayer)
+ .layer(PrometheusMetricsLayer::new(path_label))
}
#[cfg(test)]
|
fix
|
remove path label for cache store (#4336)
|
8ce8a8f3c7681872c997cb5f6c104f6805da9ec7
|
2023-12-26 15:26:49
|
Ruihang Xia
|
fix: revert unfinished route table change (#3008)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index 4892674eb661..fdbb53e24d63 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -136,9 +136,9 @@ dependencies = [
[[package]]
name = "anstream"
-version = "0.6.5"
+version = "0.6.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d664a92ecae85fd0a7392615844904654d1d5f5514837f471ddef4a057aba1b6"
+checksum = "2ab91ebe16eb252986481c5b62f6098f3b698a45e34b5b98200cf20dd2484a44"
dependencies = [
"anstyle",
"anstyle-parse",
@@ -156,37 +156,37 @@ checksum = "7079075b41f533b8c61d2a4d073c4676e1f8b249ff94a393b0595db304e0dd87"
[[package]]
name = "anstyle-parse"
-version = "0.2.3"
+version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c75ac65da39e5fe5ab759307499ddad880d724eed2f6ce5b5e8a26f4f387928c"
+checksum = "317b9a89c1868f5ea6ff1d9539a69f45dffc21ce321ac1fd1160dfa48c8e2140"
dependencies = [
"utf8parse",
]
[[package]]
name = "anstyle-query"
-version = "1.0.2"
+version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e28923312444cdd728e4738b3f9c9cac739500909bb3d3c94b43551b16517648"
+checksum = "5ca11d4be1bab0c8bc8734a9aa7bf4ee8316d462a08c6ac5052f888fef5b494b"
dependencies = [
- "windows-sys 0.52.0",
+ "windows-sys 0.48.0",
]
[[package]]
name = "anstyle-wincon"
-version = "3.0.2"
+version = "3.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1cd54b81ec8d6180e24654d0b371ad22fc3dd083b6ff8ba325b72e00c87660a7"
+checksum = "f0699d10d2f4d628a98ee7b57b289abbc98ff3bad977cb3152709d4bf2330628"
dependencies = [
"anstyle",
- "windows-sys 0.52.0",
+ "windows-sys 0.48.0",
]
[[package]]
name = "anyhow"
-version = "1.0.76"
+version = "1.0.75"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "59d2a3357dde987206219e78ecfbbb6e8dad06cbb65292758d3270e6254f7355"
+checksum = "a4668cab20f66d8d020e1fbc0ebe47217433c1b6c8f2040faf858554e394ace6"
[[package]]
name = "anymap"
@@ -206,7 +206,7 @@ dependencies = [
"datatypes",
"greptime-proto",
"paste",
- "prost 0.12.3",
+ "prost 0.12.2",
"snafu",
"tonic 0.10.2",
"tonic-build 0.9.2",
@@ -229,9 +229,9 @@ checksum = "b3f9eb837c6a783fbf002e3e5cc7925a3aa6893d6d42f9169517528983777590"
[[package]]
name = "aquamarine"
-version = "0.3.3"
+version = "0.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d1da02abba9f9063d786eab1509833ebb2fac0f966862ca59439c76b9c566760"
+checksum = "df752953c49ce90719c7bf1fc587bc8227aed04732ea0c0f85e5397d7fdbd1a1"
dependencies = [
"include_dir",
"itertools 0.10.5",
@@ -316,7 +316,7 @@ dependencies = [
"chrono",
"chrono-tz 0.8.4",
"half 2.3.1",
- "hashbrown 0.14.3",
+ "hashbrown 0.14.2",
"num",
]
@@ -395,7 +395,7 @@ dependencies = [
"bytes",
"futures",
"paste",
- "prost 0.12.3",
+ "prost 0.12.2",
"tokio",
"tonic 0.10.2",
]
@@ -461,7 +461,7 @@ dependencies = [
"arrow-data",
"arrow-schema",
"half 2.3.1",
- "hashbrown 0.14.3",
+ "hashbrown 0.14.2",
]
[[package]]
@@ -597,7 +597,7 @@ checksum = "5fd55a5ba1179988837d24ab4c7cc8ed6efdeff578ede0416b4225a5fca35bd0"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.43",
+ "syn 2.0.39",
]
[[package]]
@@ -619,18 +619,18 @@ checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.43",
+ "syn 2.0.39",
]
[[package]]
name = "async-trait"
-version = "0.1.75"
+version = "0.1.74"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "fdf6721fb0140e4f897002dd086c06f6c27775df19cfe1fccb21181a48fd2c98"
+checksum = "a66537f1bb974b254c98ed142ff995236e81b9d0fe4db0575f46612cb15eb0f9"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.43",
+ "syn 2.0.39",
]
[[package]]
@@ -770,7 +770,7 @@ dependencies = [
"heck",
"proc-macro2",
"quote",
- "syn 2.0.43",
+ "syn 2.0.39",
]
[[package]]
@@ -851,7 +851,7 @@ version = "0.5.0"
dependencies = [
"arrow",
"chrono",
- "clap 4.4.11",
+ "clap 4.4.8",
"client",
"futures-util",
"indicatif",
@@ -899,7 +899,7 @@ dependencies = [
"regex",
"rustc-hash",
"shlex",
- "syn 2.0.43",
+ "syn 2.0.39",
]
[[package]]
@@ -974,9 +974,9 @@ dependencies = [
[[package]]
name = "borsh"
-version = "1.3.0"
+version = "1.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "26d4d6dafc1a3bb54687538972158f07b2c948bc57d5890df22c0739098b3028"
+checksum = "bf617fabf5cdbdc92f774bfe5062d870f228b80056d41180797abf48bed4056e"
dependencies = [
"borsh-derive",
"cfg_aliases",
@@ -984,15 +984,15 @@ dependencies = [
[[package]]
name = "borsh-derive"
-version = "1.3.0"
+version = "1.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "bf4918709cc4dd777ad2b6303ed03cb37f3ca0ccede8c1b0d28ac6db8f4710e0"
+checksum = "f404657a7ea7b5249e36808dff544bc88a28f26e0ac40009f674b7a009d14be3"
dependencies = [
"once_cell",
"proc-macro-crate 2.0.0",
"proc-macro2",
"quote",
- "syn 2.0.43",
+ "syn 2.0.39",
"syn_derive",
]
@@ -1401,9 +1401,9 @@ dependencies = [
[[package]]
name = "clap"
-version = "4.4.11"
+version = "4.4.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "bfaff671f6b22ca62406885ece523383b9b64022e341e53e009a62ebc47a45f2"
+checksum = "2275f18819641850fa26c89acc84d465c1bf91ce57bc2748b28c420473352f64"
dependencies = [
"clap_builder",
"clap_derive",
@@ -1411,9 +1411,9 @@ dependencies = [
[[package]]
name = "clap_builder"
-version = "4.4.11"
+version = "4.4.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a216b506622bb1d316cd51328dce24e07bdff4a6128a47c7e7fad11878d5adbb"
+checksum = "07cdf1b148b25c1e1f7a42225e30a0d99a615cd4637eae7365548dd4529b95bc"
dependencies = [
"anstream",
"anstyle",
@@ -1430,7 +1430,7 @@ dependencies = [
"heck",
"proc-macro2",
"quote",
- "syn 2.0.43",
+ "syn 2.0.39",
]
[[package]]
@@ -1478,7 +1478,7 @@ dependencies = [
"moka",
"parking_lot 0.12.1",
"prometheus",
- "prost 0.12.3",
+ "prost 0.12.2",
"rand",
"session",
"snafu",
@@ -1520,7 +1520,7 @@ dependencies = [
"auth",
"catalog",
"chrono",
- "clap 4.4.11",
+ "clap 4.4.8",
"client",
"common-base",
"common-catalog",
@@ -1551,7 +1551,7 @@ dependencies = [
"partition",
"plugins",
"prometheus",
- "prost 0.12.3",
+ "prost 0.12.2",
"query",
"rand",
"regex",
@@ -1751,7 +1751,7 @@ dependencies = [
"flatbuffers",
"futures",
"lazy_static",
- "prost 0.12.3",
+ "prost 0.12.2",
"rand",
"snafu",
"tokio",
@@ -1790,7 +1790,7 @@ dependencies = [
"snafu",
"static_assertions",
"syn 1.0.109",
- "syn 2.0.43",
+ "syn 2.0.39",
]
[[package]]
@@ -1835,7 +1835,7 @@ dependencies = [
"hyper",
"lazy_static",
"prometheus",
- "prost 0.12.3",
+ "prost 0.12.2",
"rand",
"regex",
"rskafka",
@@ -2002,18 +2002,18 @@ dependencies = [
[[package]]
name = "concurrent-queue"
-version = "2.4.0"
+version = "2.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d16048cd947b08fa32c24458a22f5dc5e835264f689f4f5653210c69fd107363"
+checksum = "f057a694a54f12365049b0958a1685bb52d567f5593b355fbf685838e873d400"
dependencies = [
"crossbeam-utils",
]
[[package]]
name = "config"
-version = "0.13.4"
+version = "0.13.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "23738e11972c7643e4ec947840fc463b6a571afcd3e735bdfce7d03c7a784aca"
+checksum = "d379af7f68bfc21714c6c7dea883544201741d2ce8274bb12fa54f89507f52a7"
dependencies = [
"async-trait",
"json5",
@@ -2085,9 +2085,9 @@ checksum = "e4c78c047431fee22c1a7bb92e00ad095a02a983affe4d8a72e2a2c62c1b94f3"
[[package]]
name = "const-oid"
-version = "0.9.6"
+version = "0.9.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c2459377285ad874054d797f3ccebf984978aa39129f6eafde5cdc8315b612f8"
+checksum = "28c122c3980598d243d63d9a704629a2d748d101f278052ff068be5a4423ab6f"
[[package]]
name = "const-random"
@@ -2117,9 +2117,9 @@ checksum = "f7144d30dcf0fafbce74250a3963025d8d52177934239851c917d29f1df280c2"
[[package]]
name = "core-foundation"
-version = "0.9.4"
+version = "0.9.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "91e195e091a93c46f7102ec7818a2aa394e1e1771c3ab4825963fa03e45afb8f"
+checksum = "194a7a9e6de53fa55116934067c844d9d749312f75c6f6d0980e8c252f8c2146"
dependencies = [
"core-foundation-sys",
"libc",
@@ -2127,9 +2127,9 @@ dependencies = [
[[package]]
name = "core-foundation-sys"
-version = "0.8.6"
+version = "0.8.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "06ea2b9bc92be3c2baa9334a323ebca2d6f074ff852cd1d7b11064035cd3868f"
+checksum = "e496a50fda8aacccc86d7529e2c1e0892dbd0f898a6b5645b5561b89c3210efa"
[[package]]
name = "cpp_demangle"
@@ -2377,7 +2377,7 @@ dependencies = [
"proc-macro2",
"quote",
"strsim 0.10.0",
- "syn 2.0.43",
+ "syn 2.0.39",
]
[[package]]
@@ -2399,7 +2399,7 @@ checksum = "836a9bbc7ad63342d6d6e7b815ccab164bc77a2d95d84bc3117a8c0d5c98e2d5"
dependencies = [
"darling_core 0.20.3",
"quote",
- "syn 2.0.43",
+ "syn 2.0.39",
]
[[package]]
@@ -2409,7 +2409,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "978747c1d849a7d2ee5e8adc0159961c48fb7e5db2f06af6723b80123bb53856"
dependencies = [
"cfg-if 1.0.0",
- "hashbrown 0.14.3",
+ "hashbrown 0.14.2",
"lock_api",
"once_cell",
"parking_lot_core 0.9.9",
@@ -2441,7 +2441,7 @@ dependencies = [
"futures",
"glob",
"half 2.3.1",
- "hashbrown 0.14.3",
+ "hashbrown 0.14.2",
"indexmap 2.1.0",
"itertools 0.11.0",
"log",
@@ -2491,7 +2491,7 @@ dependencies = [
"datafusion-common",
"datafusion-expr",
"futures",
- "hashbrown 0.14.3",
+ "hashbrown 0.14.2",
"log",
"object_store",
"parking_lot 0.12.1",
@@ -2525,7 +2525,7 @@ dependencies = [
"datafusion-common",
"datafusion-expr",
"datafusion-physical-expr",
- "hashbrown 0.14.3",
+ "hashbrown 0.14.2",
"itertools 0.11.0",
"log",
"regex-syntax 0.8.2",
@@ -2548,7 +2548,7 @@ dependencies = [
"datafusion-common",
"datafusion-expr",
"half 2.3.1",
- "hashbrown 0.14.3",
+ "hashbrown 0.14.2",
"hex",
"indexmap 2.1.0",
"itertools 0.11.0",
@@ -2582,7 +2582,7 @@ dependencies = [
"datafusion-physical-expr",
"futures",
"half 2.3.1",
- "hashbrown 0.14.3",
+ "hashbrown 0.14.2",
"indexmap 2.1.0",
"itertools 0.11.0",
"log",
@@ -2617,8 +2617,8 @@ dependencies = [
"datafusion",
"itertools 0.11.0",
"object_store",
- "prost 0.12.3",
- "prost-types 0.12.3",
+ "prost 0.12.2",
+ "prost-types 0.12.2",
"substrait 0.17.1",
"tokio",
]
@@ -2674,7 +2674,7 @@ dependencies = [
"object-store",
"pin-project",
"prometheus",
- "prost 0.12.3",
+ "prost 0.12.2",
"query",
"reqwest",
"secrecy",
@@ -2748,16 +2748,16 @@ version = "0.7.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fffa369a668c8af7dbf8b5e56c9f744fbd399949ed171606040001947de40b1c"
dependencies = [
- "const-oid 0.9.6",
+ "const-oid 0.9.5",
"pem-rfc7468 0.7.0",
"zeroize",
]
[[package]]
name = "deranged"
-version = "0.3.10"
+version = "0.3.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8eb30d70a07a3b04884d2677f06bec33509dc67ca60d92949e5535352d3191dc"
+checksum = "0f32d04922c60427da6f9fef14d042d9edddef64cb9d4ce0d64d0685fbeb1fd3"
dependencies = [
"powerfmt",
"serde",
@@ -2782,7 +2782,7 @@ checksum = "d150dea618e920167e5973d70ae6ece4385b7164e0d799fe7c122dd0a5d912ad"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.43",
+ "syn 2.0.39",
]
[[package]]
@@ -2866,7 +2866,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292"
dependencies = [
"block-buffer",
- "const-oid 0.9.6",
+ "const-oid 0.9.5",
"crypto-common",
"subtle",
]
@@ -3022,7 +3022,7 @@ checksum = "eecf8589574ce9b895052fa12d69af7a233f99e6107f5cb8dd1044f2a17bfdcb"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.43",
+ "syn 2.0.39",
]
[[package]]
@@ -3034,7 +3034,7 @@ dependencies = [
"once_cell",
"proc-macro2",
"quote",
- "syn 2.0.43",
+ "syn 2.0.39",
]
[[package]]
@@ -3045,21 +3045,21 @@ checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5"
[[package]]
name = "erased-serde"
-version = "0.4.1"
+version = "0.3.31"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4adbf0983fe06bd3a5c19c8477a637c2389feb0994eca7a59e3b961054aa7c0a"
+checksum = "6c138974f9d5e7fe373eb04df7cae98833802ae4b11c24ac7039a21d5af4b26c"
dependencies = [
"serde",
]
[[package]]
name = "errno"
-version = "0.3.8"
+version = "0.3.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a258e46cdc063eb8519c00b9fc845fc47bcfca4130e2f08e88665ceda8474245"
+checksum = "f258a7194e7f7c2a7837a8913aeab7fd8c383457034fa20ce4dd3dcb813e8eb8"
dependencies = [
"libc",
- "windows-sys 0.52.0",
+ "windows-sys 0.48.0",
]
[[package]]
@@ -3088,7 +3088,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f5231ad671c74ee5dc02753a0a9c855fe6e90de2a07acb2582f8a702470e04d1"
dependencies = [
"http",
- "prost 0.12.3",
+ "prost 0.12.2",
"tokio",
"tokio-stream",
"tonic 0.10.2",
@@ -3154,7 +3154,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ef033ed5e9bad94e55838ca0ca906db0e043f517adda0c8b79c7a8c66c93c1b5"
dependencies = [
"cfg-if 1.0.0",
- "rustix 0.38.28",
+ "rustix 0.38.25",
"windows-sys 0.48.0",
]
@@ -3189,14 +3189,14 @@ dependencies = [
[[package]]
name = "filetime"
-version = "0.2.23"
+version = "0.2.22"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1ee447700ac8aa0b2f2bd7bc4462ad686ba06baa6727ac149a2d6277f0d240fd"
+checksum = "d4029edd3e734da6fe05b6cd7bd2960760a616bd2ddd0d59a0124746d6272af0"
dependencies = [
"cfg-if 1.0.0",
"libc",
- "redox_syscall 0.4.1",
- "windows-sys 0.52.0",
+ "redox_syscall 0.3.5",
+ "windows-sys 0.48.0",
]
[[package]]
@@ -3341,7 +3341,7 @@ dependencies = [
"operator",
"partition",
"prometheus",
- "prost 0.12.3",
+ "prost 0.12.2",
"query",
"raft-engine",
"regex",
@@ -3389,7 +3389,7 @@ checksum = "b0fa992f1656e1707946bbba340ad244f0814009ef8c0118eb7b658395f19a2e"
dependencies = [
"frunk_proc_macro_helpers",
"quote",
- "syn 2.0.43",
+ "syn 2.0.39",
]
[[package]]
@@ -3401,7 +3401,7 @@ dependencies = [
"frunk_core",
"proc-macro2",
"quote",
- "syn 2.0.43",
+ "syn 2.0.39",
]
[[package]]
@@ -3413,7 +3413,7 @@ dependencies = [
"frunk_core",
"frunk_proc_macro_helpers",
"quote",
- "syn 2.0.43",
+ "syn 2.0.39",
]
[[package]]
@@ -3505,7 +3505,7 @@ checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.43",
+ "syn 2.0.39",
]
[[package]]
@@ -3600,9 +3600,9 @@ dependencies = [
[[package]]
name = "gimli"
-version = "0.28.1"
+version = "0.28.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4271d37baee1b8c7e4b708028c57d816cf9d2434acb33a549475f78c181f6253"
+checksum = "6fb8d784f27acf97159b40fc4db5ecd8aa23b9ad5ef69cdd136d3bc80665f0c0"
[[package]]
name = "git2"
@@ -3628,7 +3628,7 @@ name = "greptime-proto"
version = "0.1.0"
source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=a31ea166fc015ea7ff111ac94e26c3a5d64364d2#a31ea166fc015ea7ff111ac94e26c3a5d64364d2"
dependencies = [
- "prost 0.12.3",
+ "prost 0.12.2",
"serde",
"serde_json",
"strum 0.25.0",
@@ -3693,9 +3693,9 @@ dependencies = [
[[package]]
name = "hashbrown"
-version = "0.14.3"
+version = "0.14.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "290f1a1d9242c78d09ce40a5e87e7554ee637af1351968159f4952f028f75604"
+checksum = "f93e7192158dbcda357bdec5fb5788eebf8bbac027f3f33e719d29135ae84156"
dependencies = [
"ahash 0.8.6",
"allocator-api2",
@@ -3707,7 +3707,7 @@ version = "0.8.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e8094feaf31ff591f651a2664fb9cfd92bba7a60ce3197265e9482ebe753c8f7"
dependencies = [
- "hashbrown 0.14.3",
+ "hashbrown 0.14.2",
]
[[package]]
@@ -3785,9 +3785,9 @@ checksum = "dfa686283ad6dd069f105e5ab091b04c62850d3e4cf5d67debad1933f55023df"
[[package]]
name = "hkdf"
-version = "0.12.4"
+version = "0.12.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7b5f8eb2ad728638ea2c7d47a21db23b7b58a72ed6a38256b8a1849f15fbbdf7"
+checksum = "791a029f6b9fc27657f6f188ec6e5e43f6911f6f878e0dc5501396e09809d437"
dependencies = [
"hmac",
]
@@ -3803,11 +3803,11 @@ dependencies = [
[[package]]
name = "home"
-version = "0.5.9"
+version = "0.5.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e3d1354bf6b7235cb4a0576c2619fd4ed18183f689b12b006a0ee7329eeff9a5"
+checksum = "5444c27eef6923071f7ebcc33e3444508466a76f7a2b93da00ed6e19f30c1ddb"
dependencies = [
- "windows-sys 0.52.0",
+ "windows-sys 0.48.0",
]
[[package]]
@@ -3834,9 +3834,9 @@ dependencies = [
[[package]]
name = "http-body"
-version = "0.4.6"
+version = "0.4.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7ceab25649e9960c0311ea418d17bee82c0dcec1bd053b5f9a66e265a693bed2"
+checksum = "d5f38f16d184e36f2408a55281cd658ecbd3ca05cce6d6510a176eca393e26d1"
dependencies = [
"bytes",
"http",
@@ -3895,9 +3895,9 @@ dependencies = [
[[package]]
name = "hyper"
-version = "0.14.28"
+version = "0.14.27"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "bf96e135eb83a2a8ddf766e426a841d8ddd7449d5f00d34ea02b41d2f19eef80"
+checksum = "ffb1cfd654a8219eaef89881fdb3bb3b1cdc5fa75ded05d6933b2b382e395468"
dependencies = [
"bytes",
"futures-channel",
@@ -3910,7 +3910,7 @@ dependencies = [
"httpdate",
"itoa",
"pin-project-lite",
- "socket2 0.5.5",
+ "socket2 0.4.10",
"tokio",
"tower-service",
"tracing",
@@ -3926,7 +3926,7 @@ dependencies = [
"futures-util",
"http",
"hyper",
- "rustls 0.21.10",
+ "rustls 0.21.9",
"tokio",
"tokio-rustls 0.24.1",
]
@@ -4024,7 +4024,7 @@ dependencies = [
"greptime-proto",
"mockall",
"pin-project",
- "prost 0.12.3",
+ "prost 0.12.2",
"rand",
"regex",
"regex-automata 0.1.10",
@@ -4051,7 +4051,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d530e1a18b1cb4c484e6e34556a0d948706958449fca0cab753d649f2bce3d1f"
dependencies = [
"equivalent",
- "hashbrown 0.14.3",
+ "hashbrown 0.14.2",
"serde",
]
@@ -4076,9 +4076,9 @@ checksum = "bfa799dd5ed20a7e349f3b4639aa80d74549c81716d9ec4f994c9b5815598306"
[[package]]
name = "inferno"
-version = "0.11.19"
+version = "0.11.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "321f0f839cd44a4686e9504b0a62b4d69a50b62072144c71c68f5873c167b8d9"
+checksum = "abfb2e51b23c338595ae0b6bdaaa7a4a8b860b8d788a4331cb07b50fe5dea71b"
dependencies = [
"ahash 0.8.6",
"indexmap 2.1.0",
@@ -4126,9 +4126,9 @@ checksum = "924df4f0e24e2e7f9cdd90babb0b96f93b20f3ecfa949ea9e6613756b8c8e1bf"
[[package]]
name = "inventory"
-version = "0.3.14"
+version = "0.3.13"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c8573b2b1fb643a372c73b23f4da5f888677feef3305146d68a539250a9bccc7"
+checksum = "0508c56cfe9bfd5dfeb0c22ab9a6abfda2f27bdca422132e494266351ed8d83c"
[[package]]
name = "io-lifetimes"
@@ -4176,7 +4176,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cb0889898416213fab133e1d33a0e5858a48177452750691bde3666d0fdbaf8b"
dependencies = [
"hermit-abi 0.3.3",
- "rustix 0.38.28",
+ "rustix 0.38.25",
"windows-sys 0.48.0",
]
@@ -4198,20 +4198,11 @@ dependencies = [
"either",
]
-[[package]]
-name = "itertools"
-version = "0.12.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "25db6b064527c5d482d0423354fcd07a89a2dfe07b67892e62411946db7f07b0"
-dependencies = [
- "either",
-]
-
[[package]]
name = "itoa"
-version = "1.0.10"
+version = "1.0.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b1a46d1a171d865aa5f83f92695765caa047a9b4cbae2cbf37dbd613a793fd4c"
+checksum = "af150ab688ff2122fcef229be89cb50dd66af9e01a4ff320cc137eecc9bacc38"
[[package]]
name = "jobserver"
@@ -4224,9 +4215,9 @@ dependencies = [
[[package]]
name = "js-sys"
-version = "0.3.66"
+version = "0.3.65"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "cee9c64da59eae3b50095c18d3e74f8b73c0b86d2792824ff01bbce68ba229ca"
+checksum = "54c0c35952f67de54bb584e9fd912b3023117cbafc0a77d8f3dee1fb5f572fe8"
dependencies = [
"wasm-bindgen",
]
@@ -4244,14 +4235,13 @@ dependencies = [
[[package]]
name = "jsonwebtoken"
-version = "9.2.0"
+version = "8.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5c7ea04a7c5c055c175f189b6dc6ba036fd62306b58c66c9f6389036c503a3f4"
+checksum = "6971da4d9c3aa03c3d8f3ff0f4155b534aad021292003895a469716b2a230378"
dependencies = [
"base64 0.21.5",
- "js-sys",
- "pem",
- "ring 0.17.7",
+ "pem 1.1.1",
+ "ring 0.16.20",
"serde",
"serde_json",
"simple_asn1",
@@ -4387,9 +4377,9 @@ dependencies = [
[[package]]
name = "libc"
-version = "0.2.151"
+version = "0.2.150"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "302d7ab3130588088d277783b1e2d2e10c9e9e4a16dd9050e6ec93fb3e7048f4"
+checksum = "89d92a4743f9a61002fae18374ed11e7973f530cb3a3255fb354818118b2203c"
[[package]]
name = "libgit2-sys"
@@ -4467,9 +4457,9 @@ checksum = "f051f77a7c8e6957c0696eac88f26b0117e54f52d3fc682ab19397a8812846a4"
[[package]]
name = "linux-raw-sys"
-version = "0.4.12"
+version = "0.4.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c4cd1a83af159aa67994778be9070f0ae1bd732942279cabb14f86f986a21456"
+checksum = "969488b55f8ac402214f3f5fd243ebb7206cf82de60d3172994707a4bcc2b829"
[[package]]
name = "lock_api"
@@ -4578,7 +4568,7 @@ version = "0.12.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2994eeba8ed550fd9b47a0b38f0242bc3344e496483c6180b69139cc2fa5d1d7"
dependencies = [
- "hashbrown 0.14.3",
+ "hashbrown 0.14.2",
]
[[package]]
@@ -4633,9 +4623,9 @@ dependencies = [
[[package]]
name = "mach2"
-version = "0.4.2"
+version = "0.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "19b955cdeb2a02b9117f121ce63aa52d08ade45de53e48fe6a38b39c10f6f709"
+checksum = "6d0d1830bcd151a6fc4aea1369af235b36c1528fe976b8ff678683c9995eade8"
dependencies = [
"libc",
]
@@ -4727,9 +4717,9 @@ dependencies = [
[[package]]
name = "memmap2"
-version = "0.9.3"
+version = "0.8.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "45fd3a57831bf88bc63f8cebc0cf956116276e97fef3966103e96416209f7c92"
+checksum = "43a5a03cefb0d953ec0be133036f14e109412fa594edc2f77227249db66cc3ed"
dependencies = [
"libc",
]
@@ -4828,7 +4818,7 @@ dependencies = [
"once_cell",
"parking_lot 0.12.1",
"prometheus",
- "prost 0.12.3",
+ "prost 0.12.2",
"rand",
"regex",
"serde",
@@ -4930,9 +4920,9 @@ dependencies = [
[[package]]
name = "mio"
-version = "0.8.10"
+version = "0.8.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8f3d0b296e374a4e6f3c7b0a1f5a51d748a0d34c85e7dc48fc3fa9a87657fe09"
+checksum = "3dce281c5e46beae905d4de1870d8b1509a9142b62eedf18b443b011ca8343d0"
dependencies = [
"libc",
"log",
@@ -4984,7 +4974,7 @@ dependencies = [
"parquet",
"paste",
"prometheus",
- "prost 0.12.3",
+ "prost 0.12.2",
"regex",
"serde",
"serde_json",
@@ -5085,7 +5075,7 @@ dependencies = [
"proc-macro-error",
"proc-macro2",
"quote",
- "syn 2.0.43",
+ "syn 2.0.39",
"termcolor",
"thiserror",
]
@@ -5108,11 +5098,11 @@ dependencies = [
"mio",
"mysql_common",
"once_cell",
- "pem",
+ "pem 3.0.2",
"percent-encoding",
"pin-project",
"rand",
- "rustls 0.21.10",
+ "rustls 0.21.9",
"rustls-pemfile 1.0.4",
"serde",
"serde_json",
@@ -5336,7 +5326,7 @@ checksum = "cfb77679af88f8b125209d354a202862602672222e7f2313fdd6dc349bad4712"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.43",
+ "syn 2.0.39",
]
[[package]]
@@ -5484,9 +5474,9 @@ dependencies = [
[[package]]
name = "once_cell"
-version = "1.19.0"
+version = "1.18.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92"
+checksum = "dd8b5dd2ae5ed71462c540258bedcb51965123ad7e7ccf4b9a8cafaa4a63576d"
[[package]]
name = "oorandom"
@@ -5617,7 +5607,7 @@ source = "git+https://github.com/waynexia/opentelemetry-rust.git?rev=33841b38dda
dependencies = [
"opentelemetry 0.21.0 (git+https://github.com/waynexia/opentelemetry-rust.git?rev=33841b38dda79b15f2024952be5f32533325ca02)",
"opentelemetry_sdk 0.20.0",
- "prost 0.12.3",
+ "prost 0.12.2",
"tonic 0.10.2",
]
@@ -5655,7 +5645,7 @@ dependencies = [
"glob",
"once_cell",
"opentelemetry 0.21.0 (git+https://github.com/waynexia/opentelemetry-rust.git?rev=33841b38dda79b15f2024952be5f32533325ca02)",
- "ordered-float 4.2.0",
+ "ordered-float 4.1.1",
"percent-encoding",
"rand",
"thiserror",
@@ -5675,7 +5665,7 @@ dependencies = [
"glob",
"once_cell",
"opentelemetry 0.21.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "ordered-float 4.2.0",
+ "ordered-float 4.1.1",
"percent-encoding",
"rand",
"thiserror",
@@ -5785,9 +5775,9 @@ dependencies = [
[[package]]
name = "ordered-float"
-version = "4.2.0"
+version = "4.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a76df7075c7d4d01fdcb46c912dd17fba5b60c78ea480b475f2b6ab6f666584e"
+checksum = "536900a8093134cf9ccf00a27deb3532421099e958d9dd431135d0c7543ca1e8"
dependencies = [
"num-traits",
]
@@ -5809,7 +5799,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a4d6a8c22fc714f0c2373e6091bf6f5e9b37b1bc0b1184874b7e0a4e303d318f"
dependencies = [
"dlv-list 0.5.2",
- "hashbrown 0.14.3",
+ "hashbrown 0.14.2",
]
[[package]]
@@ -5935,7 +5925,7 @@ dependencies = [
"chrono",
"flate2",
"futures",
- "hashbrown 0.14.3",
+ "hashbrown 0.14.2",
"lz4",
"num",
"num-bigint",
@@ -6005,9 +5995,18 @@ checksum = "19b17cddbe7ec3f8bc800887bab5e717348c95ea2ca0b1bf0837fb964dc67099"
[[package]]
name = "pem"
-version = "3.0.3"
+version = "1.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1b8fcc794035347fb64beda2d3b462595dd2753e3f268d89c5aae77e8cf2c310"
+checksum = "a8835c273a76a90455d7344889b0964598e3316e2a79ede8e36f16bdcf2228b8"
+dependencies = [
+ "base64 0.13.1",
+]
+
+[[package]]
+name = "pem"
+version = "3.0.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3163d2912b7c3b52d651a055f2c7eec9ba5cd22d26ef75b8dd3a59980b185923"
dependencies = [
"base64 0.21.5",
"serde",
@@ -6068,7 +6067,7 @@ dependencies = [
"pest_meta",
"proc-macro2",
"quote",
- "syn 2.0.43",
+ "syn 2.0.39",
]
[[package]]
@@ -6110,7 +6109,7 @@ dependencies = [
"md5",
"postgres-types",
"rand",
- "ring 0.17.7",
+ "ring 0.17.5",
"stringprep",
"thiserror",
"time",
@@ -6185,7 +6184,7 @@ checksum = "4359fd9c9171ec6e8c62926d6faaf553a8dc3f64e1507e76da7911b4f6a04405"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.43",
+ "syn 2.0.39",
]
[[package]]
@@ -6219,7 +6218,7 @@ checksum = "c8ffb9f10fa047879315e6625af03c164b16962a5368d724ed16323b68ace47f"
dependencies = [
"der 0.7.8",
"pkcs8 0.10.2",
- "spki 0.7.3",
+ "spki 0.7.2",
]
[[package]]
@@ -6240,14 +6239,14 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f950b2377845cebe5cf8b5165cb3cc1a5e0fa5cfa3e1f7f55707d8fd82e0a7b7"
dependencies = [
"der 0.7.8",
- "spki 0.7.3",
+ "spki 0.7.2",
]
[[package]]
name = "pkg-config"
-version = "0.3.28"
+version = "0.3.27"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "69d3587f8a9e599cc7ec2c00e331f71c4e69a5f9a4b8a6efd5b07466b9736f9a"
+checksum = "26072860ba924cbfa98ea39c8c19b4dd6a4a25423dbdf219c1eca91aa0cf6964"
[[package]]
name = "plotters"
@@ -6302,9 +6301,9 @@ dependencies = [
[[package]]
name = "portable-atomic"
-version = "1.6.0"
+version = "1.5.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7170ef9988bc169ba16dd36a7fa041e5c4cbeb6a35b76d4c03daded371eae7c0"
+checksum = "3bccab0e7fd7cc19f820a1c8c91720af652d0c88dc9664dd72aef2614f04af3b"
[[package]]
name = "postgres-protocol"
@@ -6358,9 +6357,9 @@ dependencies = [
"nix 0.26.4",
"once_cell",
"parking_lot 0.12.1",
- "prost 0.12.3",
- "prost-build 0.12.3",
- "prost-derive 0.12.3",
+ "prost 0.12.2",
+ "prost-build 0.12.2",
+ "prost-derive 0.12.2",
"protobuf",
"sha2",
"smallvec",
@@ -6438,7 +6437,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ae005bd773ab59b4725093fd7df83fd7892f7d8eafb48dbd7de6e024e4215f9d"
dependencies = [
"proc-macro2",
- "syn 2.0.43",
+ "syn 2.0.39",
]
[[package]]
@@ -6486,9 +6485,9 @@ dependencies = [
[[package]]
name = "proc-macro2"
-version = "1.0.71"
+version = "1.0.69"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "75cb1540fadbd5b8fbccc4dddad2734eba435053f725621c070711a14bb5f4b8"
+checksum = "134c189feb4956b20f6f547d2cf727d4c0fe06722b20a0eec87ed445a97f92da"
dependencies = [
"unicode-ident",
]
@@ -6555,7 +6554,7 @@ dependencies = [
"lazy_static",
"prometheus",
"promql-parser",
- "prost 0.12.3",
+ "prost 0.12.2",
"query",
"session",
"snafu",
@@ -6588,12 +6587,12 @@ dependencies = [
[[package]]
name = "prost"
-version = "0.12.3"
+version = "0.12.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "146c289cda302b98a28d40c8b3b90498d6e526dd24ac2ecea73e4e491685b94a"
+checksum = "5a5a410fc7882af66deb8d01d01737353cf3ad6204c408177ba494291a626312"
dependencies = [
"bytes",
- "prost-derive 0.12.3",
+ "prost-derive 0.12.2",
]
[[package]]
@@ -6620,9 +6619,9 @@ dependencies = [
[[package]]
name = "prost-build"
-version = "0.12.3"
+version = "0.12.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c55e02e35260070b6f716a2423c2ff1c3bb1642ddca6f99e1f26d06268a0e2d2"
+checksum = "1fa3d084c8704911bfefb2771be2f9b6c5c0da7343a71e0021ee3c665cada738"
dependencies = [
"bytes",
"heck",
@@ -6632,10 +6631,10 @@ dependencies = [
"once_cell",
"petgraph",
"prettyplease 0.2.15",
- "prost 0.12.3",
- "prost-types 0.12.3",
+ "prost 0.12.2",
+ "prost-types 0.12.2",
"regex",
- "syn 2.0.43",
+ "syn 2.0.39",
"tempfile",
"which",
]
@@ -6655,15 +6654,15 @@ dependencies = [
[[package]]
name = "prost-derive"
-version = "0.12.3"
+version = "0.12.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "efb6c9a1dd1def8e2124d17e83a20af56f1570d6c2d2bd9e266ccb768df3840e"
+checksum = "065717a5dfaca4a83d2fe57db3487b311365200000551d7a364e715dbf4346bc"
dependencies = [
"anyhow",
"itertools 0.11.0",
"proc-macro2",
"quote",
- "syn 2.0.43",
+ "syn 2.0.39",
]
[[package]]
@@ -6677,11 +6676,11 @@ dependencies = [
[[package]]
name = "prost-types"
-version = "0.12.3"
+version = "0.12.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "193898f59edcf43c26227dcd4c8427f00d99d61e95dcde58dabd49fa291d470e"
+checksum = "8339f32236f590281e2f6368276441394fcd1b2133b549cc895d0ae80f2f9a52"
dependencies = [
- "prost 0.12.3",
+ "prost 0.12.2",
]
[[package]]
@@ -6985,7 +6984,7 @@ dependencies = [
"crossbeam",
"fail",
"fs2",
- "hashbrown 0.14.3",
+ "hashbrown 0.14.2",
"hex",
"if_chain",
"lazy_static",
@@ -7094,6 +7093,15 @@ dependencies = [
"bitflags 1.3.2",
]
+[[package]]
+name = "redox_syscall"
+version = "0.3.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "567664f262709473930a4bf9e51bf2ebf3348f2e748ccc50dea20646858f8f29"
+dependencies = [
+ "bitflags 1.3.2",
+]
+
[[package]]
name = "redox_syscall"
version = "0.4.1"
@@ -7186,16 +7194,15 @@ dependencies = [
[[package]]
name = "reqsign"
-version = "0.14.6"
+version = "0.14.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "dce87f66ba6c6acef277a729f989a0eca946cb9ce6a15bcc036bda0f72d4b9fd"
+checksum = "1ad14258ddd8ef6e564d57a94613e138cc9c21ef8a1fec547206d853213c7959"
dependencies = [
"anyhow",
"async-trait",
"base64 0.21.5",
"chrono",
"form_urlencoded",
- "getrandom",
"hex",
"hmac",
"home",
@@ -7207,7 +7214,7 @@ dependencies = [
"quick-xml 0.31.0",
"rand",
"reqwest",
- "rsa 0.9.6",
+ "rsa 0.9.4",
"rust-ini 0.20.0",
"serde",
"serde_json",
@@ -7218,9 +7225,9 @@ dependencies = [
[[package]]
name = "reqwest"
-version = "0.11.23"
+version = "0.11.22"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "37b1ae8d9ac08420c66222fb9096fc5de435c3c48542bc5336c51892cffafb41"
+checksum = "046cd98826c46c2ac8ddecae268eb5c2e58628688a5fc7a2643704a73faba95b"
dependencies = [
"base64 0.21.5",
"bytes",
@@ -7240,7 +7247,7 @@ dependencies = [
"once_cell",
"percent-encoding",
"pin-project-lite",
- "rustls 0.21.10",
+ "rustls 0.21.9",
"rustls-native-certs",
"rustls-pemfile 1.0.4",
"serde",
@@ -7327,7 +7334,7 @@ checksum = "853977598f084a492323fe2f7896b4100a86284ee8473612de60021ea341310f"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.43",
+ "syn 2.0.39",
]
[[package]]
@@ -7347,9 +7354,9 @@ dependencies = [
[[package]]
name = "ring"
-version = "0.17.7"
+version = "0.17.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "688c63d65483050968b2a8937f7995f443e27041a0f7700aa59b0822aedebb74"
+checksum = "fb0205304757e5d899b9c2e448b867ffd03ae7f988002e47cd24954391394d0b"
dependencies = [
"cc",
"getrandom",
@@ -7361,13 +7368,12 @@ dependencies = [
[[package]]
name = "rkyv"
-version = "0.7.43"
+version = "0.7.42"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "527a97cdfef66f65998b5f3b637c26f5a5ec09cc52a3f9932313ac645f4190f5"
+checksum = "0200c8230b013893c0b2d6213d6ec64ed2b9be2e0e016682b7224ff82cff5c58"
dependencies = [
"bitvec",
"bytecheck",
- "bytes",
"hashbrown 0.12.3",
"ptr_meta",
"rend",
@@ -7379,9 +7385,9 @@ dependencies = [
[[package]]
name = "rkyv_derive"
-version = "0.7.43"
+version = "0.7.42"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b5c462a1328c8e67e4d6dbad1eb0355dd43e8ab432c6e227a43657f16ade5033"
+checksum = "b2e06b915b5c230a17d7a736d1e2e63ee753c256a8614ef3f5147b13a4f5541d"
dependencies = [
"proc-macro2",
"quote",
@@ -7421,11 +7427,11 @@ dependencies = [
[[package]]
name = "rsa"
-version = "0.9.6"
+version = "0.9.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5d0e5124fcb30e76a7e79bfee683a2746db83784b86289f6251b54b7950a0dfc"
+checksum = "6a3211b01eea83d80687da9eef70e39d65144a3894866a5153a2723e425a157f"
dependencies = [
- "const-oid 0.9.6",
+ "const-oid 0.9.5",
"digest",
"num-bigint-dig",
"num-integer",
@@ -7434,7 +7440,7 @@ dependencies = [
"pkcs8 0.10.2",
"rand_core",
"signature",
- "spki 0.7.3",
+ "spki 0.7.2",
"subtle",
"zeroize",
]
@@ -7521,7 +7527,7 @@ dependencies = [
"proc-macro2",
"quote",
"rust-embed-utils",
- "syn 2.0.43",
+ "syn 2.0.39",
"walkdir",
]
@@ -7608,15 +7614,15 @@ dependencies = [
[[package]]
name = "rustix"
-version = "0.38.28"
+version = "0.38.25"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "72e572a5e8ca657d7366229cdde4bd14c4eb5499a9573d4d366fe1b599daa316"
+checksum = "dc99bc2d4f1fed22595588a013687477aedf3cdcfb26558c559edb67b4d9b22e"
dependencies = [
"bitflags 2.4.1",
"errno",
"libc",
- "linux-raw-sys 0.4.12",
- "windows-sys 0.52.0",
+ "linux-raw-sys 0.4.11",
+ "windows-sys 0.48.0",
]
[[package]]
@@ -7633,12 +7639,12 @@ dependencies = [
[[package]]
name = "rustls"
-version = "0.21.10"
+version = "0.21.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f9d5a6813c0759e4609cd494e8e725babae6a2ca7b62a5536a13daaec6fcb7ba"
+checksum = "629648aced5775d558af50b2b4c7b02983a04b312126d45eeead26e7caa498b9"
dependencies = [
"log",
- "ring 0.17.7",
+ "ring 0.17.5",
"rustls-webpki 0.101.7",
"sct",
]
@@ -7650,7 +7656,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fe6b63262c9fcac8659abfaa96cac103d28166d3ff3eaf8f412e19f3ae9e5a48"
dependencies = [
"log",
- "ring 0.17.7",
+ "ring 0.17.5",
"rustls-pki-types",
"rustls-webpki 0.102.0",
"subtle",
@@ -7700,7 +7706,7 @@ version = "0.101.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8b6275d1ee7a1cd780b64aca7726599a1dbc893b1e64144529e55c3c2f745765"
dependencies = [
- "ring 0.17.7",
+ "ring 0.17.5",
"untrusted 0.9.0",
]
@@ -7710,7 +7716,7 @@ version = "0.102.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "de2635c8bc2b88d367767c5de8ea1d8db9af3f6219eba28442242d9ab81d1b89"
dependencies = [
- "ring 0.17.7",
+ "ring 0.17.5",
"rustls-pki-types",
"untrusted 0.9.0",
]
@@ -8052,9 +8058,9 @@ dependencies = [
[[package]]
name = "ryu"
-version = "1.0.16"
+version = "1.0.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f98d2aa92eebf49b69786be48e4477826b256916e84a57ff2a4f21923b48eb4c"
+checksum = "1ad4cc8da4ef723ed60bced201181d83791ad433213d8c24efffda1eec85d741"
[[package]]
name = "safe-lock"
@@ -8237,7 +8243,7 @@ version = "0.7.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "da046153aa2352493d6cb7da4b6e5c0c057d8a1d0a9aa8560baffdd945acd414"
dependencies = [
- "ring 0.17.7",
+ "ring 0.17.5",
"untrusted 0.9.0",
]
@@ -8312,7 +8318,7 @@ checksum = "43576ca501357b9b071ac53cdc7da8ef0cbd9493d8df094cd821777ea6e894d3"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.43",
+ "syn 2.0.39",
]
[[package]]
@@ -8355,14 +8361,14 @@ checksum = "3081f5ffbb02284dda55132aa26daecedd7372a42417bbbab6f14ab7d6bb9145"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.43",
+ "syn 2.0.39",
]
[[package]]
name = "serde_spanned"
-version = "0.6.5"
+version = "0.6.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "eb3622f419d1296904700073ea6cc23ad690adbd66f13ea683df73298736f0c1"
+checksum = "12022b835073e5b11e90a14f86838ceb1c8fb0325b72416845c487ac0fa95e80"
dependencies = [
"serde",
]
@@ -8376,7 +8382,7 @@ dependencies = [
"proc-macro2",
"quote",
"serde",
- "syn 2.0.43",
+ "syn 2.0.39",
]
[[package]]
@@ -8417,14 +8423,14 @@ dependencies = [
"darling 0.20.3",
"proc-macro2",
"quote",
- "syn 2.0.43",
+ "syn 2.0.39",
]
[[package]]
name = "serde_yaml"
-version = "0.9.29"
+version = "0.9.27"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a15e0ef66bf939a7c890a0bf6d5a733c70202225f9888a89ed5c62298b019129"
+checksum = "3cc7a1570e38322cfe4154732e5110f887ea57e22b76f4bfd32b5bdd3368666c"
dependencies = [
"indexmap 2.1.0",
"itoa",
@@ -8494,7 +8500,7 @@ dependencies = [
"pprof",
"prometheus",
"promql-parser",
- "prost 0.12.3",
+ "prost 0.12.2",
"query",
"rand",
"regex",
@@ -8722,9 +8728,9 @@ dependencies = [
[[package]]
name = "snap"
-version = "1.1.1"
+version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1b6b67fb9a61334225b5b790716f609cd58395f895b3fe8b328786812a40bc3b"
+checksum = "5e9f0ab6ef7eb7353d9119c170a436d1bf248eea575ac42d19d12f4e34130831"
[[package]]
name = "socket2"
@@ -8782,9 +8788,9 @@ dependencies = [
[[package]]
name = "spki"
-version = "0.7.3"
+version = "0.7.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d91ed6c858b01f942cd56b37a94b3e0a1798290327d1236e4d9cf4eaca44d29d"
+checksum = "9d1e996ef02c474957d681f1b05213dfb0abab947b446a62d37770b23500184a"
dependencies = [
"base64ct",
"der 0.7.8",
@@ -8818,11 +8824,11 @@ dependencies = [
[[package]]
name = "sqlformat"
-version = "0.2.3"
+version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ce81b7bd7c4493975347ef60d8c7e8b742d4694f4c49f93e0a12ea263938176c"
+checksum = "6b7b278788e7be4d0d29c0f39497a0eef3fba6bbc8e70d8bf7fde46edeaa9e85"
dependencies = [
- "itertools 0.12.0",
+ "itertools 0.11.0",
"nom",
"unicode_categories",
]
@@ -8847,7 +8853,7 @@ name = "sqlness-runner"
version = "0.5.0"
dependencies = [
"async-trait",
- "clap 4.4.11",
+ "clap 4.4.8",
"client",
"common-base",
"common-error",
@@ -9175,7 +9181,7 @@ dependencies = [
"proc-macro2",
"quote",
"rustversion",
- "syn 2.0.43",
+ "syn 2.0.39",
]
[[package]]
@@ -9207,7 +9213,7 @@ dependencies = [
"datatypes",
"futures",
"promql",
- "prost 0.12.3",
+ "prost 0.12.2",
"session",
"snafu",
"substrait 0.17.1",
@@ -9224,15 +9230,15 @@ dependencies = [
"git2",
"heck",
"prettyplease 0.2.15",
- "prost 0.12.3",
- "prost-build 0.12.3",
- "prost-types 0.12.3",
+ "prost 0.12.2",
+ "prost-build 0.12.2",
+ "prost-types 0.12.2",
"schemars",
"semver",
"serde",
"serde_json",
"serde_yaml",
- "syn 2.0.43",
+ "syn 2.0.39",
"typify",
"walkdir",
]
@@ -9245,21 +9251,21 @@ checksum = "81cdd64d312baedb58e21336b31bc043b77e01cc99033ce76ef539f78e965ebc"
[[package]]
name = "symbolic-common"
-version = "12.8.0"
+version = "12.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1cccfffbc6bb3bb2d3a26cd2077f4d055f6808d266f9d4d158797a4c60510dfe"
+checksum = "39eac77836da383d35edbd9ff4585b4fc1109929ff641232f2e9a1aefdfc9e91"
dependencies = [
"debugid",
- "memmap2 0.9.3",
+ "memmap2 0.8.0",
"stable_deref_trait",
"uuid",
]
[[package]]
name = "symbolic-demangle"
-version = "12.8.0"
+version = "12.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "76a99812da4020a67e76c4eb41f08c87364c14170495ff780f30dd519c221a68"
+checksum = "4ee1608a1d13061fb0e307a316de29f6c6e737b05459fe6bbf5dd8d7837c4fb7"
dependencies = [
"cpp_demangle",
"rustc-demangle",
@@ -9279,9 +9285,9 @@ dependencies = [
[[package]]
name = "syn"
-version = "2.0.43"
+version = "2.0.39"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ee659fb5f3d355364e1f3e5bc10fb82068efbf824a1e9d1c9504244a6469ad53"
+checksum = "23e78b90f2fcf45d3e842032ce32e3f2d1545ba6636271dcbf24fa306d87be7a"
dependencies = [
"proc-macro2",
"quote",
@@ -9306,7 +9312,7 @@ dependencies = [
"proc-macro-error",
"proc-macro2",
"quote",
- "syn 2.0.43",
+ "syn 2.0.39",
]
[[package]]
@@ -9409,7 +9415,7 @@ dependencies = [
"cfg-if 1.0.0",
"fastrand 2.0.1",
"redox_syscall 0.4.1",
- "rustix 0.38.28",
+ "rustix 0.38.25",
"windows-sys 0.48.0",
]
@@ -9491,7 +9497,7 @@ dependencies = [
"operator",
"partition",
"paste",
- "prost 0.12.3",
+ "prost 0.12.2",
"query",
"rand",
"rstest",
@@ -9540,22 +9546,22 @@ checksum = "222a222a5bfe1bba4a77b45ec488a741b3cb8872e5e499451fd7d0129c9c7c3d"
[[package]]
name = "thiserror"
-version = "1.0.51"
+version = "1.0.50"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f11c217e1416d6f036b870f14e0413d480dbf28edbee1f877abaf0206af43bb7"
+checksum = "f9a7210f5c9a7156bb50aa36aed4c95afb51df0df00713949448cf9e97d382d2"
dependencies = [
"thiserror-impl",
]
[[package]]
name = "thiserror-impl"
-version = "1.0.51"
+version = "1.0.50"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "01742297787513b79cf8e29d1056ede1313e2420b7b3b15d0a768b4921f549df"
+checksum = "266b2e40bc00e5a6c09c3584011e08b06f123c00362c92b975ba9843aaaa14b8"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.43",
+ "syn 2.0.39",
]
[[package]]
@@ -9622,9 +9628,9 @@ dependencies = [
[[package]]
name = "time"
-version = "0.3.31"
+version = "0.3.30"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f657ba42c3f86e7680e53c8cd3af8abbe56b5491790b46e22e19c0d57463583e"
+checksum = "c4a34ab300f2dee6e562c10a046fc05e358b29f9bf92277f30c3c8d82275f6f5"
dependencies = [
"deranged",
"itoa",
@@ -9642,18 +9648,18 @@ checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3"
[[package]]
name = "time-macros"
-version = "0.2.16"
+version = "0.2.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "26197e33420244aeb70c3e8c78376ca46571bc4e701e4791c2cd9f57dcb3a43f"
+checksum = "4ad70d68dba9e1f8aceda7aa6711965dfec1cac869f311a51bd08b3a2ccbce20"
dependencies = [
"time-core",
]
[[package]]
name = "timsort"
-version = "0.1.3"
+version = "0.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "639ce8ef6d2ba56be0383a94dd13b92138d58de44c62618303bb798fa92bdc00"
+checksum = "3cb4fa83bb73adf1c7219f4fe4bf3c0ac5635e4e51e070fad5df745a41bedfb8"
[[package]]
name = "tiny-keccak"
@@ -9691,9 +9697,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20"
[[package]]
name = "tokio"
-version = "1.35.1"
+version = "1.34.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c89b4efa943be685f629b149f53829423f8f5531ea21249408e8e2f8671ec104"
+checksum = "d0c014766411e834f7af5b8f4cf46257aab4036ca95e9d2c144a10f59ad6f5b9"
dependencies = [
"backtrace",
"bytes",
@@ -9727,7 +9733,7 @@ checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.43",
+ "syn 2.0.39",
]
[[package]]
@@ -9812,7 +9818,7 @@ version = "0.24.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c28327cf380ac148141087fbfb9de9d7bd4e84ab5d2c28fbc911d753de8a7081"
dependencies = [
- "rustls 0.21.10",
+ "rustls 0.21.9",
"tokio",
]
@@ -9977,8 +9983,8 @@ dependencies = [
"hyper-timeout",
"percent-encoding",
"pin-project",
- "prost 0.12.3",
- "rustls 0.21.10",
+ "prost 0.12.2",
+ "rustls 0.21.9",
"rustls-pemfile 1.0.4",
"tokio",
"tokio-rustls 0.24.1",
@@ -10010,9 +10016,9 @@ checksum = "9d021fc044c18582b9a2408cd0dd05b1596e3ecdb5c4df822bb0183545683889"
dependencies = [
"prettyplease 0.2.15",
"proc-macro2",
- "prost-build 0.12.3",
+ "prost-build 0.12.2",
"quote",
- "syn 2.0.43",
+ "syn 2.0.39",
]
[[package]]
@@ -10021,8 +10027,8 @@ version = "0.10.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3fa37c513df1339d197f4ba21d28c918b9ef1ac1768265f11ecb6b7f1cba1b76"
dependencies = [
- "prost 0.12.3",
- "prost-types 0.12.3",
+ "prost 0.12.2",
+ "prost-types 0.12.2",
"tokio",
"tokio-stream",
"tonic 0.10.2",
@@ -10123,7 +10129,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.43",
+ "syn 2.0.39",
]
[[package]]
@@ -10208,15 +10214,15 @@ dependencies = [
[[package]]
name = "triomphe"
-version = "0.1.11"
+version = "0.1.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "859eb650cfee7434994602c3a68b25d77ad9e68c8a6cd491616ef86661382eb3"
+checksum = "0eee8098afad3fb0c54a9007aab6804558410503ad676d4633f9c2559a00ac0f"
[[package]]
name = "try-lock"
-version = "0.2.5"
+version = "0.2.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b"
+checksum = "3528ecfd12c466c6f163363caf2d02a71161dd5e1cc6ae7b34207ea2d42d81ed"
[[package]]
name = "try_from"
@@ -10246,9 +10252,9 @@ checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825"
[[package]]
name = "typetag"
-version = "0.2.14"
+version = "0.2.13"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "196976efd4a62737b3a2b662cda76efb448d099b1049613d7a5d72743c611ce0"
+checksum = "80960fd143d4c96275c0e60b08f14b81fbb468e79bc0ef8fbda69fb0afafae43"
dependencies = [
"erased-serde",
"inventory",
@@ -10259,13 +10265,13 @@ dependencies = [
[[package]]
name = "typetag-impl"
-version = "0.2.14"
+version = "0.2.13"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2eea6765137e2414c44c7b1e07c73965a118a72c46148e1e168b3fc9d3ccf3aa"
+checksum = "bfc13d450dc4a695200da3074dacf43d449b968baee95e341920e47f61a3b40f"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.43",
+ "syn 2.0.39",
]
[[package]]
@@ -10291,7 +10297,7 @@ dependencies = [
"regress",
"schemars",
"serde_json",
- "syn 2.0.43",
+ "syn 2.0.39",
"thiserror",
"unicode-ident",
]
@@ -10308,7 +10314,7 @@ dependencies = [
"serde",
"serde_json",
"serde_tokenstream",
- "syn 2.0.43",
+ "syn 2.0.39",
"typify-impl",
]
@@ -10469,9 +10475,9 @@ dependencies = [
[[package]]
name = "unicode-bidi"
-version = "0.3.14"
+version = "0.3.13"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6f2528f27a9eb2b21e69c95319b30bd0efd85d09c379741b0f78ea1d86be2416"
+checksum = "92888ba5573ff080736b3648696b70cafad7d250551175acbaa4e0385b3e1460"
[[package]]
name = "unicode-casing"
@@ -10534,9 +10540,9 @@ checksum = "e1766d682d402817b5ac4490b3c3002d91dfa0d22812f341609f97b08757359c"
[[package]]
name = "unsafe-libyaml"
-version = "0.2.10"
+version = "0.2.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ab4c90930b95a82d00dc9e9ac071b4991924390d46cbd0dfe566148667605e4b"
+checksum = "f28467d3e1d3c6586d8f25fa243f544f5800fec42d97032474e17222c2b75cfa"
[[package]]
name = "untrusted"
@@ -10594,7 +10600,7 @@ checksum = "f49e7f3f3db8040a100710a11932239fd30697115e2ba4107080d8252939845e"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.43",
+ "syn 2.0.39",
]
[[package]]
@@ -10680,9 +10686,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423"
[[package]]
name = "wasm-bindgen"
-version = "0.2.89"
+version = "0.2.88"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0ed0d4f68a3015cc185aff4db9506a015f4b96f95303897bfa23f846db54064e"
+checksum = "7daec296f25a1bae309c0cd5c29c4b260e510e6d813c286b19eaadf409d40fce"
dependencies = [
"cfg-if 1.0.0",
"wasm-bindgen-macro",
@@ -10690,24 +10696,24 @@ dependencies = [
[[package]]
name = "wasm-bindgen-backend"
-version = "0.2.89"
+version = "0.2.88"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1b56f625e64f3a1084ded111c4d5f477df9f8c92df113852fa5a374dbda78826"
+checksum = "e397f4664c0e4e428e8313a469aaa58310d302159845980fd23b0f22a847f217"
dependencies = [
"bumpalo",
"log",
"once_cell",
"proc-macro2",
"quote",
- "syn 2.0.43",
+ "syn 2.0.39",
"wasm-bindgen-shared",
]
[[package]]
name = "wasm-bindgen-futures"
-version = "0.4.39"
+version = "0.4.38"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ac36a15a220124ac510204aec1c3e5db8a22ab06fd6706d881dc6149f8ed9a12"
+checksum = "9afec9963e3d0994cac82455b2b3502b81a7f40f9a0d32181f7528d9f4b43e02"
dependencies = [
"cfg-if 1.0.0",
"js-sys",
@@ -10717,9 +10723,9 @@ dependencies = [
[[package]]
name = "wasm-bindgen-macro"
-version = "0.2.89"
+version = "0.2.88"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0162dbf37223cd2afce98f3d0785506dcb8d266223983e4b5b525859e6e182b2"
+checksum = "5961017b3b08ad5f3fe39f1e79877f8ee7c23c5e5fd5eb80de95abc41f1f16b2"
dependencies = [
"quote",
"wasm-bindgen-macro-support",
@@ -10727,22 +10733,22 @@ dependencies = [
[[package]]
name = "wasm-bindgen-macro-support"
-version = "0.2.89"
+version = "0.2.88"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f0eb82fcb7930ae6219a7ecfd55b217f5f0893484b7a13022ebb2b2bf20b5283"
+checksum = "c5353b8dab669f5e10f5bd76df26a9360c748f054f862ff5f3f8aae0c7fb3907"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.43",
+ "syn 2.0.39",
"wasm-bindgen-backend",
"wasm-bindgen-shared",
]
[[package]]
name = "wasm-bindgen-shared"
-version = "0.2.89"
+version = "0.2.88"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7ab9b36309365056cd639da3134bf87fa8f3d86008abf99e612384a6eecd459f"
+checksum = "0d046c5d029ba91a1ed14da14dca44b68bf2f124cfbaf741c54151fdb3e0750b"
[[package]]
name = "wasm-streams"
@@ -10759,9 +10765,9 @@ dependencies = [
[[package]]
name = "web-sys"
-version = "0.3.66"
+version = "0.3.65"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "50c24a44ec86bb68fbecd1b3efed7e85ea5621b39b35ef2766b66cd984f8010f"
+checksum = "5db499c5f66323272151db0e666cd34f78617522fb0c1604d31a27c50c206a85"
dependencies = [
"js-sys",
"wasm-bindgen",
@@ -10783,7 +10789,7 @@ version = "0.22.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ed63aea5ce73d0ff405984102c42de94fc55a6b75765d621c65262469b3c9b53"
dependencies = [
- "ring 0.17.7",
+ "ring 0.17.5",
"untrusted 0.9.0",
]
@@ -10811,7 +10817,7 @@ dependencies = [
"either",
"home",
"once_cell",
- "rustix 0.38.28",
+ "rustix 0.38.25",
]
[[package]]
@@ -10911,15 +10917,6 @@ dependencies = [
"windows-targets 0.48.5",
]
-[[package]]
-name = "windows-sys"
-version = "0.52.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d"
-dependencies = [
- "windows-targets 0.52.0",
-]
-
[[package]]
name = "windows-targets"
version = "0.42.2"
@@ -10950,21 +10947,6 @@ dependencies = [
"windows_x86_64_msvc 0.48.5",
]
-[[package]]
-name = "windows-targets"
-version = "0.52.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8a18201040b24831fbb9e4eb208f8892e1f50a37feb53cc7ff887feb8f50e7cd"
-dependencies = [
- "windows_aarch64_gnullvm 0.52.0",
- "windows_aarch64_msvc 0.52.0",
- "windows_i686_gnu 0.52.0",
- "windows_i686_msvc 0.52.0",
- "windows_x86_64_gnu 0.52.0",
- "windows_x86_64_gnullvm 0.52.0",
- "windows_x86_64_msvc 0.52.0",
-]
-
[[package]]
name = "windows_aarch64_gnullvm"
version = "0.42.2"
@@ -10977,12 +10959,6 @@ version = "0.48.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8"
-[[package]]
-name = "windows_aarch64_gnullvm"
-version = "0.52.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "cb7764e35d4db8a7921e09562a0304bf2f93e0a51bfccee0bd0bb0b666b015ea"
-
[[package]]
name = "windows_aarch64_msvc"
version = "0.39.0"
@@ -11001,12 +10977,6 @@ version = "0.48.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc"
-[[package]]
-name = "windows_aarch64_msvc"
-version = "0.52.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "bbaa0368d4f1d2aaefc55b6fcfee13f41544ddf36801e793edbbfd7d7df075ef"
-
[[package]]
name = "windows_i686_gnu"
version = "0.39.0"
@@ -11025,12 +10995,6 @@ version = "0.48.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e"
-[[package]]
-name = "windows_i686_gnu"
-version = "0.52.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a28637cb1fa3560a16915793afb20081aba2c92ee8af57b4d5f28e4b3e7df313"
-
[[package]]
name = "windows_i686_msvc"
version = "0.39.0"
@@ -11049,12 +11013,6 @@ version = "0.48.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406"
-[[package]]
-name = "windows_i686_msvc"
-version = "0.52.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ffe5e8e31046ce6230cc7215707b816e339ff4d4d67c65dffa206fd0f7aa7b9a"
-
[[package]]
name = "windows_x86_64_gnu"
version = "0.39.0"
@@ -11073,12 +11031,6 @@ version = "0.48.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e"
-[[package]]
-name = "windows_x86_64_gnu"
-version = "0.52.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3d6fa32db2bc4a2f5abeacf2b69f7992cd09dca97498da74a151a3132c26befd"
-
[[package]]
name = "windows_x86_64_gnullvm"
version = "0.42.2"
@@ -11091,12 +11043,6 @@ version = "0.48.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc"
-[[package]]
-name = "windows_x86_64_gnullvm"
-version = "0.52.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1a657e1e9d3f514745a572a6846d3c7aa7dbe1658c056ed9c3344c4109a6949e"
-
[[package]]
name = "windows_x86_64_msvc"
version = "0.39.0"
@@ -11115,17 +11061,11 @@ version = "0.48.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538"
-[[package]]
-name = "windows_x86_64_msvc"
-version = "0.52.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "dff9641d1cd4be8d1a070daf9e3773c5f67e78b4d9d42263020c057706765c04"
-
[[package]]
name = "winnow"
-version = "0.5.30"
+version = "0.5.19"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9b5c3db89721d50d0e2a673f5043fc4722f76dcc352d7b1ab8b8288bed4ed2c5"
+checksum = "829846f3e3db426d4cee4510841b71a8e58aa2a76b1132579487ae430ccd9c7b"
dependencies = [
"memchr",
]
@@ -11169,10 +11109,10 @@ dependencies = [
"chrono",
"der 0.7.8",
"hex",
- "pem",
- "ring 0.17.7",
+ "pem 3.0.2",
+ "ring 0.17.5",
"signature",
- "spki 0.7.3",
+ "spki 0.7.2",
"thiserror",
"zeroize",
]
@@ -11203,22 +11143,22 @@ dependencies = [
[[package]]
name = "zerocopy"
-version = "0.7.32"
+version = "0.7.26"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "74d4d3961e53fa4c9a25a8637fc2bfaf2595b3d3ae34875568a5cf64787716be"
+checksum = "e97e415490559a91254a2979b4829267a57d2fcd741a98eee8b722fb57289aa0"
dependencies = [
"zerocopy-derive",
]
[[package]]
name = "zerocopy-derive"
-version = "0.7.32"
+version = "0.7.26"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6"
+checksum = "dd7e48ccf166952882ca8bd778a43502c64f33bf94c12ebe2a7f08e5a0f6689f"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.43",
+ "syn 2.0.39",
]
[[package]]
@@ -11238,7 +11178,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.43",
+ "syn 2.0.39",
]
[[package]]
diff --git a/src/cmd/src/cli/bench/metadata.rs b/src/cmd/src/cli/bench/metadata.rs
index 6eedc18eac18..7b77fed49dbd 100644
--- a/src/cmd/src/cli/bench/metadata.rs
+++ b/src/cmd/src/cli/bench/metadata.rs
@@ -14,7 +14,6 @@
use std::time::Instant;
-use common_meta::key::table_route::TableRouteValue;
use common_meta::key::TableMetadataManagerRef;
use common_meta::table_name::TableName;
@@ -54,11 +53,7 @@ impl TableMetadataBencher {
let start = Instant::now();
self.table_metadata_manager
- .create_table_metadata(
- table_info,
- TableRouteValue::physical(region_routes),
- region_wal_options,
- )
+ .create_table_metadata(table_info, region_routes, region_wal_options)
.await
.unwrap();
diff --git a/src/cmd/src/cli/upgrade.rs b/src/cmd/src/cli/upgrade.rs
index 6936b13fd7b4..e5615f4d8219 100644
--- a/src/cmd/src/cli/upgrade.rs
+++ b/src/cmd/src/cli/upgrade.rs
@@ -27,7 +27,7 @@ use common_meta::key::table_info::{TableInfoKey, TableInfoValue};
use common_meta::key::table_name::{TableNameKey, TableNameValue};
use common_meta::key::table_region::{TableRegionKey, TableRegionValue};
use common_meta::key::table_route::{TableRouteKey, TableRouteValue as NextTableRouteValue};
-use common_meta::key::{RegionDistribution, TableMetaKey, TableMetaValue};
+use common_meta::key::{RegionDistribution, TableMetaKey};
use common_meta::kv_backend::etcd::EtcdStore;
use common_meta::kv_backend::KvBackendRef;
use common_meta::range_stream::PaginationStream;
@@ -153,7 +153,7 @@ impl MigrateTableMetadata {
)
.unwrap();
- let new_table_value = NextTableRouteValue::physical(table_route.region_routes);
+ let new_table_value = NextTableRouteValue::new(table_route.region_routes);
let table_id = table_route.table.id as u32;
let new_key = TableRouteKey::new(table_id);
diff --git a/src/common/meta/src/ddl.rs b/src/common/meta/src/ddl.rs
index bb5220724ab6..793df3f9c4d6 100644
--- a/src/common/meta/src/ddl.rs
+++ b/src/common/meta/src/ddl.rs
@@ -21,10 +21,10 @@ use store_api::storage::{RegionNumber, TableId};
use crate::cache_invalidator::CacheInvalidatorRef;
use crate::datanode_manager::DatanodeManagerRef;
use crate::error::Result;
-use crate::key::table_route::TableRouteValue;
use crate::key::TableMetadataManagerRef;
use crate::region_keeper::MemoryRegionKeeperRef;
use crate::rpc::ddl::{CreateTableTask, SubmitDdlTaskRequest, SubmitDdlTaskResponse};
+use crate::rpc::router::RegionRoute;
pub mod alter_table;
pub mod create_table;
@@ -58,7 +58,7 @@ pub struct TableMetadata {
/// Table id.
pub table_id: TableId,
/// Route information for each region of the table.
- pub table_route: TableRouteValue,
+ pub region_routes: Vec<RegionRoute>,
/// The encoded wal options for regions of the table.
// If a region does not have an associated wal options, no key for the region would be found in the map.
pub region_wal_options: HashMap<RegionNumber, String>,
diff --git a/src/common/meta/src/ddl/alter_table.rs b/src/common/meta/src/ddl/alter_table.rs
index 092d4dd24263..5d3f0e447ce8 100644
--- a/src/common/meta/src/ddl/alter_table.rs
+++ b/src/common/meta/src/ddl/alter_table.rs
@@ -45,6 +45,7 @@ use crate::error::{
};
use crate::key::table_info::TableInfoValue;
use crate::key::table_name::TableNameKey;
+use crate::key::table_route::TableRouteValue;
use crate::key::DeserializedValueWithBytes;
use crate::metrics;
use crate::rpc::ddl::AlterTableTask;
@@ -182,23 +183,25 @@ impl AlterTableProcedure {
pub async fn submit_alter_region_requests(&mut self) -> Result<Status> {
let table_id = self.data.table_id();
+ let table_ref = self.data.table_ref();
- let table_route = self
+ let TableRouteValue { region_routes, .. } = self
.context
.table_metadata_manager
.table_route_manager()
.get(table_id)
.await?
- .context(TableRouteNotFoundSnafu { table_id })?
+ .with_context(|| TableRouteNotFoundSnafu {
+ table_name: table_ref.to_string(),
+ })?
.into_inner();
- let region_routes = table_route.region_routes();
- let leaders = find_leaders(region_routes);
+ let leaders = find_leaders(®ion_routes);
let mut alter_region_tasks = Vec::with_capacity(leaders.len());
for datanode in leaders {
let requester = self.context.datanode_manager.datanode(&datanode).await;
- let regions = find_leader_regions(region_routes, &datanode);
+ let regions = find_leader_regions(®ion_routes, &datanode);
for region in regions {
let region_id = RegionId::new(table_id, region);
diff --git a/src/common/meta/src/ddl/create_table.rs b/src/common/meta/src/ddl/create_table.rs
index c73844fc8337..35050643d3c2 100644
--- a/src/common/meta/src/ddl/create_table.rs
+++ b/src/common/meta/src/ddl/create_table.rs
@@ -18,8 +18,9 @@ use api::v1::region::region_request::Body as PbRegionRequest;
use api::v1::region::{
CreateRequest as PbCreateRegionRequest, RegionColumnDef, RegionRequest, RegionRequestHeader,
};
-use api::v1::{ColumnDef, SemanticType};
+use api::v1::{ColumnDef, CreateTableExpr, SemanticType};
use async_trait::async_trait;
+use common_catalog::consts::METRIC_ENGINE;
use common_config::WAL_OPTIONS_KEY;
use common_error::ext::BoxedError;
use common_procedure::error::{
@@ -39,9 +40,8 @@ use table::metadata::{RawTableInfo, TableId};
use crate::ddl::utils::{handle_operate_region_error, handle_retry_error, region_storage_path};
use crate::ddl::DdlContext;
-use crate::error::{self, Result, TableRouteNotFoundSnafu};
+use crate::error::{self, Result, TableInfoNotFoundSnafu};
use crate::key::table_name::TableNameKey;
-use crate::key::table_route::TableRouteValue;
use crate::metrics;
use crate::region_keeper::OperatingRegionGuard;
use crate::rpc::ddl::CreateTableTask;
@@ -60,13 +60,13 @@ impl CreateTableProcedure {
pub fn new(
cluster_id: u64,
task: CreateTableTask,
- table_route: TableRouteValue,
+ region_routes: Vec<RegionRoute>,
region_wal_options: HashMap<RegionNumber, String>,
context: DdlContext,
) -> Self {
Self {
context,
- creator: TableCreator::new(cluster_id, task, table_route, region_wal_options),
+ creator: TableCreator::new(cluster_id, task, region_routes, region_wal_options),
}
}
@@ -78,12 +78,10 @@ impl CreateTableProcedure {
opening_regions: vec![],
};
- if let TableRouteValue::Physical(x) = &creator.data.table_route {
- creator.opening_regions = creator
- .register_opening_regions(&context, &x.region_routes)
- .map_err(BoxedError::new)
- .context(ExternalSnafu)?;
- }
+ creator
+ .register_opening_regions(&context)
+ .map_err(BoxedError::new)
+ .context(ExternalSnafu)?;
Ok(CreateTableProcedure { context, creator })
}
@@ -96,6 +94,10 @@ impl CreateTableProcedure {
self.table_info().ident.table_id
}
+ pub fn region_routes(&self) -> &Vec<RegionRoute> {
+ &self.creator.data.region_routes
+ }
+
pub fn region_wal_options(&self) -> &HashMap<RegionNumber, String> {
&self.creator.data.region_wal_options
}
@@ -130,10 +132,7 @@ impl CreateTableProcedure {
Ok(Status::executing(true))
}
- pub fn new_region_request_builder(
- &self,
- physical_table_id: Option<TableId>,
- ) -> Result<CreateRequestBuilder> {
+ pub fn new_region_request_builder(&self) -> Result<CreateRequestBuilder> {
let create_table_expr = &self.creator.data.task.create_table;
let column_defs = create_table_expr
@@ -192,54 +191,16 @@ impl CreateTableProcedure {
options: create_table_expr.table_options.clone(),
};
- Ok(CreateRequestBuilder {
- template,
- physical_table_id,
- })
+ let builder = CreateRequestBuilder::new_template(self.context.clone(), template);
+ Ok(builder)
}
pub async fn on_datanode_create_regions(&mut self) -> Result<Status> {
- match &self.creator.data.table_route {
- TableRouteValue::Physical(x) => {
- let region_routes = x.region_routes.clone();
- let request_builder = self.new_region_request_builder(None)?;
- self.create_regions(®ion_routes, request_builder).await
- }
- TableRouteValue::Logical(x) => {
- let physical_table_id = x.physical_table_id();
-
- let physical_table_route = self
- .context
- .table_metadata_manager
- .table_route_manager()
- .get(physical_table_id)
- .await?
- .context(TableRouteNotFoundSnafu {
- table_id: physical_table_id,
- })?;
- let region_routes = physical_table_route.region_routes();
-
- let request_builder = self.new_region_request_builder(Some(physical_table_id))?;
-
- self.create_regions(region_routes, request_builder).await
- }
- }
- }
-
- async fn create_regions(
- &mut self,
- region_routes: &[RegionRoute],
- request_builder: CreateRequestBuilder,
- ) -> Result<Status> {
// Registers opening regions
- let guards = self
- .creator
- .register_opening_regions(&self.context, region_routes)?;
- if !guards.is_empty() {
- self.creator.opening_regions = guards;
- }
+ self.creator.register_opening_regions(&self.context)?;
let create_table_data = &self.creator.data;
+ let region_routes = &create_table_data.region_routes;
let region_wal_options = &create_table_data.region_wal_options;
let create_table_expr = &create_table_data.task.create_table;
@@ -247,6 +208,8 @@ impl CreateTableProcedure {
let schema = &create_table_expr.schema_name;
let storage_path = region_storage_path(catalog, schema);
+ let mut request_builder = self.new_region_request_builder()?;
+
let leaders = find_leaders(region_routes);
let mut create_region_tasks = Vec::with_capacity(leaders.len());
@@ -258,7 +221,12 @@ impl CreateTableProcedure {
for region_number in regions {
let region_id = RegionId::new(self.table_id(), region_number);
let create_region_request = request_builder
- .build_one(region_id, storage_path.clone(), region_wal_options)
+ .build_one(
+ &self.creator.data.task.create_table,
+ region_id,
+ storage_path.clone(),
+ region_wal_options,
+ )
.await?;
requests.push(PbRegionRequest::Create(create_region_request));
@@ -302,13 +270,10 @@ impl CreateTableProcedure {
let manager = &self.context.table_metadata_manager;
let raw_table_info = self.table_info().clone();
+ let region_routes = self.region_routes().clone();
let region_wal_options = self.region_wal_options().clone();
manager
- .create_table_metadata(
- raw_table_info,
- self.creator.data.table_route.clone(),
- region_wal_options,
- )
+ .create_table_metadata(raw_table_info, region_routes, region_wal_options)
.await?;
info!("Created table metadata for table {table_id}");
@@ -364,7 +329,7 @@ impl TableCreator {
pub fn new(
cluster_id: u64,
task: CreateTableTask,
- table_route: TableRouteValue,
+ region_routes: Vec<RegionRoute>,
region_wal_options: HashMap<RegionNumber, String>,
) -> Self {
Self {
@@ -372,23 +337,21 @@ impl TableCreator {
state: CreateTableState::Prepare,
cluster_id,
task,
- table_route,
+ region_routes,
region_wal_options,
},
opening_regions: vec![],
}
}
- /// Registers and returns the guards of the opening region if they don't exist.
- fn register_opening_regions(
- &self,
- context: &DdlContext,
- region_routes: &[RegionRoute],
- ) -> Result<Vec<OperatingRegionGuard>> {
+ /// Register opening regions if doesn't exist.
+ pub fn register_opening_regions(&mut self, context: &DdlContext) -> Result<()> {
+ let region_routes = &self.data.region_routes;
+
let opening_regions = operating_leader_regions(region_routes);
if self.opening_regions.len() == opening_regions.len() {
- return Ok(vec![]);
+ return Ok(());
}
let mut opening_region_guards = Vec::with_capacity(opening_regions.len());
@@ -403,7 +366,9 @@ impl TableCreator {
})?;
opening_region_guards.push(guard);
}
- Ok(opening_region_guards)
+
+ self.opening_regions = opening_region_guards;
+ Ok(())
}
}
@@ -421,7 +386,7 @@ pub enum CreateTableState {
pub struct CreateTableData {
pub state: CreateTableState,
pub task: CreateTableTask,
- table_route: TableRouteValue,
+ pub region_routes: Vec<RegionRoute>,
pub region_wal_options: HashMap<RegionNumber, String>,
pub cluster_id: u64,
}
@@ -434,18 +399,28 @@ impl CreateTableData {
/// Builder for [PbCreateRegionRequest].
pub struct CreateRequestBuilder {
+ context: DdlContext,
template: PbCreateRegionRequest,
/// Optional. Only for metric engine.
physical_table_id: Option<TableId>,
}
impl CreateRequestBuilder {
+ fn new_template(context: DdlContext, template: PbCreateRegionRequest) -> Self {
+ Self {
+ context,
+ template,
+ physical_table_id: None,
+ }
+ }
+
pub fn template(&self) -> &PbCreateRegionRequest {
&self.template
}
async fn build_one(
- &self,
+ &mut self,
+ create_expr: &CreateTableExpr,
region_id: RegionId,
storage_path: String,
region_wal_options: &HashMap<RegionNumber, String>,
@@ -463,18 +438,49 @@ impl CreateRequestBuilder {
.insert(WAL_OPTIONS_KEY.to_string(), wal_options.clone())
});
- if let Some(physical_table_id) = self.physical_table_id {
- // Logical table has the same region numbers with physical table, and they have a one-to-one mapping.
- // For example, region 0 of logical table must resides with region 0 of physical table. So here we can
- // simply concat the physical table id and the logical region number to get the physical region id.
- let physical_region_id = RegionId::new(physical_table_id, region_id.region_number());
+ if self.template.engine == METRIC_ENGINE {
+ self.metric_engine_hook(create_expr, region_id, &mut request)
+ .await?;
+ }
+
+ Ok(request)
+ }
+ async fn metric_engine_hook(
+ &mut self,
+ create_expr: &CreateTableExpr,
+ region_id: RegionId,
+ request: &mut PbCreateRegionRequest,
+ ) -> Result<()> {
+ if let Some(physical_table_name) = request.options.get(LOGICAL_TABLE_METADATA_KEY) {
+ let table_id = if let Some(table_id) = self.physical_table_id {
+ table_id
+ } else {
+ let table_name_manager = self.context.table_metadata_manager.table_name_manager();
+ let table_name_key = TableNameKey::new(
+ &create_expr.catalog_name,
+ &create_expr.schema_name,
+ physical_table_name,
+ );
+ let table_id = table_name_manager
+ .get(table_name_key)
+ .await?
+ .context(TableInfoNotFoundSnafu {
+ table_name: physical_table_name,
+ })?
+ .table_id();
+ self.physical_table_id = Some(table_id);
+ table_id
+ };
+ // Concat physical table's table id and corresponding region number to get
+ // the physical region id.
+ let physical_region_id = RegionId::new(table_id, region_id.region_number());
request.options.insert(
LOGICAL_TABLE_METADATA_KEY.to_string(),
physical_region_id.as_u64().to_string(),
);
}
- Ok(request)
+ Ok(())
}
}
diff --git a/src/common/meta/src/ddl/drop_table.rs b/src/common/meta/src/ddl/drop_table.rs
index 94c6cdf0a06a..6076e6125294 100644
--- a/src/common/meta/src/ddl/drop_table.rs
+++ b/src/common/meta/src/ddl/drop_table.rs
@@ -307,7 +307,7 @@ impl DropTableData {
}
fn region_routes(&self) -> &Vec<RegionRoute> {
- self.table_route_value.region_routes()
+ &self.table_route_value.region_routes
}
fn table_info(&self) -> &RawTableInfo {
diff --git a/src/common/meta/src/ddl_manager.rs b/src/common/meta/src/ddl_manager.rs
index 6b1e4bf94f38..b1821047370e 100644
--- a/src/common/meta/src/ddl_manager.rs
+++ b/src/common/meta/src/ddl_manager.rs
@@ -177,7 +177,7 @@ impl DdlManager {
&self,
cluster_id: u64,
create_table_task: CreateTableTask,
- table_route: TableRouteValue,
+ region_routes: Vec<RegionRoute>,
region_wal_options: HashMap<RegionNumber, String>,
) -> Result<ProcedureId> {
let context = self.create_context();
@@ -185,7 +185,7 @@ impl DdlManager {
let procedure = CreateTableProcedure::new(
cluster_id,
create_table_task,
- table_route,
+ region_routes,
region_wal_options,
context,
);
@@ -275,10 +275,11 @@ async fn handle_truncate_table_task(
table_name: table_ref.to_string(),
})?;
- let table_route_value =
- table_route_value.context(error::TableRouteNotFoundSnafu { table_id })?;
+ let table_route_value = table_route_value.with_context(|| error::TableRouteNotFoundSnafu {
+ table_name: table_ref.to_string(),
+ })?;
- let table_route = table_route_value.into_inner().region_routes().clone();
+ let table_route = table_route_value.into_inner().region_routes;
let id = ddl_manager
.submit_truncate_table_task(
@@ -355,8 +356,9 @@ async fn handle_drop_table_task(
table_name: table_ref.to_string(),
})?;
- let table_route_value =
- table_route_value.context(error::TableRouteNotFoundSnafu { table_id })?;
+ let table_route_value = table_route_value.with_context(|| error::TableRouteNotFoundSnafu {
+ table_name: table_ref.to_string(),
+ })?;
let id = ddl_manager
.submit_drop_table_task(
@@ -390,7 +392,7 @@ async fn handle_create_table_task(
let TableMetadata {
table_id,
- table_route,
+ region_routes,
region_wal_options,
} = table_meta;
@@ -400,7 +402,7 @@ async fn handle_create_table_task(
.submit_create_table_task(
cluster_id,
create_table_task,
- table_route,
+ region_routes,
region_wal_options,
)
.await?;
diff --git a/src/common/meta/src/error.rs b/src/common/meta/src/error.rs
index c120c8ba939d..519d8ec7a1af 100644
--- a/src/common/meta/src/error.rs
+++ b/src/common/meta/src/error.rs
@@ -135,9 +135,9 @@ pub enum Error {
source: table::error::Error,
},
- #[snafu(display("Failed to find table route for table id {}", table_id))]
+ #[snafu(display("Table route not found: {}", table_name))]
TableRouteNotFound {
- table_id: TableId,
+ table_name: String,
location: Location,
},
diff --git a/src/common/meta/src/key.rs b/src/common/meta/src/key.rs
index bb2b87a973f5..d86880d9b339 100644
--- a/src/common/meta/src/key.rs
+++ b/src/common/meta/src/key.rs
@@ -147,14 +147,6 @@ pub trait TableMetaKey {
fn as_raw_key(&self) -> Vec<u8>;
}
-pub trait TableMetaValue {
- fn try_from_raw_value(raw_value: &[u8]) -> Result<Self>
- where
- Self: Sized;
-
- fn try_as_raw_value(&self) -> Result<Vec<u8>>;
-}
-
pub type TableMetadataManagerRef = Arc<TableMetadataManager>;
pub struct TableMetadataManager {
@@ -229,9 +221,7 @@ impl<T: DeserializeOwned + Serialize> Serialize for DeserializedValueWithBytes<T
}
}
-impl<'de, T: DeserializeOwned + Serialize + TableMetaValue> Deserialize<'de>
- for DeserializedValueWithBytes<T>
-{
+impl<'de, T: DeserializeOwned + Serialize> Deserialize<'de> for DeserializedValueWithBytes<T> {
/// - Deserialize behaviors:
///
/// The `inner` field will be deserialized from the `bytes` field.
@@ -258,11 +248,11 @@ impl<T: Serialize + DeserializeOwned + Clone> Clone for DeserializedValueWithByt
}
}
-impl<T: Serialize + DeserializeOwned + TableMetaValue> DeserializedValueWithBytes<T> {
+impl<T: Serialize + DeserializeOwned> DeserializedValueWithBytes<T> {
/// Returns a struct containing a deserialized value and an original `bytes`.
/// It accepts original bytes of inner.
pub fn from_inner_bytes(bytes: Bytes) -> Result<Self> {
- let inner = T::try_from_raw_value(&bytes)?;
+ let inner = serde_json::from_slice(&bytes).context(error::SerdeJsonSnafu)?;
Ok(Self { bytes, inner })
}
@@ -383,10 +373,13 @@ impl TableMetadataManager {
pub async fn create_table_metadata(
&self,
mut table_info: RawTableInfo,
- table_route_value: TableRouteValue,
+ region_routes: Vec<RegionRoute>,
region_wal_options: HashMap<RegionNumber, String>,
) -> Result<()> {
- let region_numbers = table_route_value.region_numbers();
+ let region_numbers = region_routes
+ .iter()
+ .map(|region| region.region.id.region_number())
+ .collect::<Vec<_>>();
table_info.meta.region_numbers = region_numbers;
let table_id = table_info.ident.table_id;
let engine = table_info.meta.engine.clone();
@@ -410,28 +403,30 @@ impl TableMetadataManager {
.table_info_manager()
.build_create_txn(table_id, &table_info_value)?;
+ // Creates datanode table key value pairs.
+ let distribution = region_distribution(®ion_routes)?;
+ let create_datanode_table_txn = self.datanode_table_manager().build_create_txn(
+ table_id,
+ &engine,
+ ®ion_storage_path,
+ region_options,
+ region_wal_options,
+ distribution,
+ )?;
+
+ // Creates table route.
+ let table_route_value = TableRouteValue::new(region_routes);
let (create_table_route_txn, on_create_table_route_failure) = self
.table_route_manager()
.build_create_txn(table_id, &table_route_value)?;
- let mut txn = Txn::merge_all(vec![
+ let txn = Txn::merge_all(vec![
create_table_name_txn,
create_table_info_txn,
+ create_datanode_table_txn,
create_table_route_txn,
]);
- if let TableRouteValue::Physical(x) = &table_route_value {
- let create_datanode_table_txn = self.datanode_table_manager().build_create_txn(
- table_id,
- &engine,
- ®ion_storage_path,
- region_options,
- region_wal_options,
- region_distribution(&x.region_routes)?,
- )?;
- txn = txn.merge(create_datanode_table_txn);
- }
-
let r = self.kv_backend.txn(txn).await?;
// Checks whether metadata was already created.
@@ -483,7 +478,7 @@ impl TableMetadataManager {
.build_delete_txn(table_id, table_info_value)?;
// Deletes datanode table key value pairs.
- let distribution = region_distribution(table_route_value.region_routes())?;
+ let distribution = region_distribution(&table_route_value.region_routes)?;
let delete_datanode_txn = self
.datanode_table_manager()
.build_delete_txn(table_id, distribution)?;
@@ -608,7 +603,7 @@ impl TableMetadataManager {
) -> Result<()> {
// Updates the datanode table key value pairs.
let current_region_distribution =
- region_distribution(current_table_route_value.region_routes())?;
+ region_distribution(¤t_table_route_value.region_routes)?;
let new_region_distribution = region_distribution(&new_region_routes)?;
let update_datanode_table_txn = self.datanode_table_manager().build_update_txn(
@@ -656,7 +651,7 @@ impl TableMetadataManager {
where
F: Fn(&RegionRoute) -> Option<Option<RegionStatus>>,
{
- let mut new_region_routes = current_table_route_value.region_routes().clone();
+ let mut new_region_routes = current_table_route_value.region_routes.clone();
let mut updated = 0;
for route in &mut new_region_routes {
@@ -716,12 +711,12 @@ impl_table_meta_key!(TableNameKey<'_>, TableInfoKey, DatanodeTableKey);
macro_rules! impl_table_meta_value {
($($val_ty: ty), *) => {
$(
- impl $crate::key::TableMetaValue for $val_ty {
- fn try_from_raw_value(raw_value: &[u8]) -> Result<Self> {
+ impl $val_ty {
+ pub fn try_from_raw_value(raw_value: &[u8]) -> Result<Self> {
serde_json::from_slice(raw_value).context(SerdeJsonSnafu)
}
- fn try_as_raw_value(&self) -> Result<Vec<u8>> {
+ pub fn try_as_raw_value(&self) -> Result<Vec<u8>> {
serde_json::to_vec(self).context(SerdeJsonSnafu)
}
}
@@ -749,7 +744,8 @@ macro_rules! impl_optional_meta_value {
impl_table_meta_value! {
TableNameValue,
TableInfoValue,
- DatanodeTableValue
+ DatanodeTableValue,
+ TableRouteValue
}
impl_optional_meta_value! {
@@ -769,7 +765,6 @@ mod tests {
use super::datanode_table::DatanodeTableKey;
use super::test_utils;
use crate::ddl::utils::region_storage_path;
- use crate::error::Result;
use crate::key::datanode_table::RegionInfo;
use crate::key::table_info::TableInfoValue;
use crate::key::table_name::TableNameKey;
@@ -785,14 +780,14 @@ mod tests {
let region_routes = vec![region_route.clone()];
let expected_region_routes =
- TableRouteValue::physical(vec![region_route.clone(), region_route.clone()]);
+ TableRouteValue::new(vec![region_route.clone(), region_route.clone()]);
let expected = serde_json::to_vec(&expected_region_routes).unwrap();
// Serialize behaviors:
// The inner field will be ignored.
let value = DeserializedValueWithBytes {
// ignored
- inner: TableRouteValue::physical(region_routes.clone()),
+ inner: TableRouteValue::new(region_routes.clone()),
bytes: Bytes::from(expected.clone()),
};
@@ -836,56 +831,43 @@ mod tests {
test_utils::new_test_table_info(10, region_numbers)
}
- async fn create_physical_table_metadata(
- table_metadata_manager: &TableMetadataManager,
- table_info: RawTableInfo,
- region_routes: Vec<RegionRoute>,
- ) -> Result<()> {
- table_metadata_manager
- .create_table_metadata(
- table_info,
- TableRouteValue::physical(region_routes),
- HashMap::default(),
- )
- .await
- }
-
#[tokio::test]
async fn test_create_table_metadata() {
let mem_kv = Arc::new(MemoryKvBackend::default());
let table_metadata_manager = TableMetadataManager::new(mem_kv);
let region_route = new_test_region_route();
- let region_routes = &vec![region_route.clone()];
+ let region_routes = vec![region_route.clone()];
let table_info: RawTableInfo =
new_test_table_info(region_routes.iter().map(|r| r.region.id.region_number())).into();
// creates metadata.
- create_physical_table_metadata(
- &table_metadata_manager,
- table_info.clone(),
- region_routes.clone(),
- )
- .await
- .unwrap();
-
+ table_metadata_manager
+ .create_table_metadata(
+ table_info.clone(),
+ region_routes.clone(),
+ HashMap::default(),
+ )
+ .await
+ .unwrap();
// if metadata was already created, it should be ok.
- assert!(create_physical_table_metadata(
- &table_metadata_manager,
- table_info.clone(),
- region_routes.clone(),
- )
- .await
- .is_ok());
-
+ table_metadata_manager
+ .create_table_metadata(
+ table_info.clone(),
+ region_routes.clone(),
+ HashMap::default(),
+ )
+ .await
+ .unwrap();
let mut modified_region_routes = region_routes.clone();
modified_region_routes.push(region_route.clone());
// if remote metadata was exists, it should return an error.
- assert!(create_physical_table_metadata(
- &table_metadata_manager,
- table_info.clone(),
- modified_region_routes
- )
- .await
- .is_err());
+ assert!(table_metadata_manager
+ .create_table_metadata(
+ table_info.clone(),
+ modified_region_routes,
+ HashMap::default()
+ )
+ .await
+ .is_err());
let (remote_table_info, remote_table_route) = table_metadata_manager
.get_full_table_info(10)
@@ -897,7 +879,7 @@ mod tests {
table_info
);
assert_eq!(
- remote_table_route.unwrap().into_inner().region_routes(),
+ remote_table_route.unwrap().into_inner().region_routes,
region_routes
);
}
@@ -907,23 +889,23 @@ mod tests {
let mem_kv = Arc::new(MemoryKvBackend::default());
let table_metadata_manager = TableMetadataManager::new(mem_kv);
let region_route = new_test_region_route();
- let region_routes = &vec![region_route.clone()];
+ let region_routes = vec![region_route.clone()];
let table_info: RawTableInfo =
new_test_table_info(region_routes.iter().map(|r| r.region.id.region_number())).into();
let table_id = table_info.ident.table_id;
let datanode_id = 2;
- let table_route_value = DeserializedValueWithBytes::from_inner(TableRouteValue::physical(
- region_routes.clone(),
- ));
+ let table_route_value =
+ DeserializedValueWithBytes::from_inner(TableRouteValue::new(region_routes.clone()));
// creates metadata.
- create_physical_table_metadata(
- &table_metadata_manager,
- table_info.clone(),
- region_routes.clone(),
- )
- .await
- .unwrap();
+ table_metadata_manager
+ .create_table_metadata(
+ table_info.clone(),
+ region_routes.clone(),
+ HashMap::default(),
+ )
+ .await
+ .unwrap();
let table_info_value =
DeserializedValueWithBytes::from_inner(TableInfoValue::new(table_info.clone()));
@@ -978,7 +960,7 @@ mod tests {
.unwrap()
.unwrap()
.into_inner();
- assert_eq!(removed_table_route.region_routes(), region_routes);
+ assert_eq!(removed_table_route.region_routes, region_routes);
}
#[tokio::test]
@@ -991,14 +973,14 @@ mod tests {
new_test_table_info(region_routes.iter().map(|r| r.region.id.region_number())).into();
let table_id = table_info.ident.table_id;
// creates metadata.
- create_physical_table_metadata(
- &table_metadata_manager,
- table_info.clone(),
- region_routes.clone(),
- )
- .await
- .unwrap();
-
+ table_metadata_manager
+ .create_table_metadata(
+ table_info.clone(),
+ region_routes.clone(),
+ HashMap::default(),
+ )
+ .await
+ .unwrap();
let new_table_name = "another_name".to_string();
let table_info_value =
DeserializedValueWithBytes::from_inner(TableInfoValue::new(table_info.clone()));
@@ -1063,14 +1045,14 @@ mod tests {
new_test_table_info(region_routes.iter().map(|r| r.region.id.region_number())).into();
let table_id = table_info.ident.table_id;
// creates metadata.
- create_physical_table_metadata(
- &table_metadata_manager,
- table_info.clone(),
- region_routes.clone(),
- )
- .await
- .unwrap();
-
+ table_metadata_manager
+ .create_table_metadata(
+ table_info.clone(),
+ region_routes.clone(),
+ HashMap::default(),
+ )
+ .await
+ .unwrap();
let mut new_table_info = table_info.clone();
new_table_info.name = "hi".to_string();
let current_table_info_value =
@@ -1141,18 +1123,17 @@ mod tests {
let table_info: RawTableInfo =
new_test_table_info(region_routes.iter().map(|r| r.region.id.region_number())).into();
let table_id = table_info.ident.table_id;
- let current_table_route_value = DeserializedValueWithBytes::from_inner(
- TableRouteValue::physical(region_routes.clone()),
- );
-
+ let current_table_route_value =
+ DeserializedValueWithBytes::from_inner(TableRouteValue::new(region_routes.clone()));
// creates metadata.
- create_physical_table_metadata(
- &table_metadata_manager,
- table_info.clone(),
- region_routes.clone(),
- )
- .await
- .unwrap();
+ table_metadata_manager
+ .create_table_metadata(
+ table_info.clone(),
+ region_routes.clone(),
+ HashMap::default(),
+ )
+ .await
+ .unwrap();
table_metadata_manager
.update_leader_region_status(table_id, ¤t_table_route_value, |region_route| {
@@ -1173,11 +1154,11 @@ mod tests {
.unwrap();
assert_eq!(
- updated_route_value.region_routes()[0].leader_status,
+ updated_route_value.region_routes[0].leader_status,
Some(RegionStatus::Downgraded)
);
assert_eq!(
- updated_route_value.region_routes()[1].leader_status,
+ updated_route_value.region_routes[1].leader_status,
Some(RegionStatus::Downgraded)
);
}
@@ -1212,19 +1193,17 @@ mod tests {
let engine = table_info.meta.engine.as_str();
let region_storage_path =
region_storage_path(&table_info.catalog_name, &table_info.schema_name);
- let current_table_route_value = DeserializedValueWithBytes::from_inner(
- TableRouteValue::physical(region_routes.clone()),
- );
-
+ let current_table_route_value =
+ DeserializedValueWithBytes::from_inner(TableRouteValue::new(region_routes.clone()));
// creates metadata.
- create_physical_table_metadata(
- &table_metadata_manager,
- table_info.clone(),
- region_routes.clone(),
- )
- .await
- .unwrap();
-
+ table_metadata_manager
+ .create_table_metadata(
+ table_info.clone(),
+ region_routes.clone(),
+ HashMap::default(),
+ )
+ .await
+ .unwrap();
assert_datanode_table(&table_metadata_manager, table_id, ®ion_routes).await;
let new_region_routes = vec![
new_region_route(1, 1),
diff --git a/src/common/meta/src/key/datanode_table.rs b/src/common/meta/src/key/datanode_table.rs
index b2e25e014bc8..3ddb00a19ac2 100644
--- a/src/common/meta/src/key/datanode_table.rs
+++ b/src/common/meta/src/key/datanode_table.rs
@@ -24,8 +24,7 @@ use table::metadata::TableId;
use crate::error::{InvalidTableMetadataSnafu, Result};
use crate::key::{
- RegionDistribution, TableMetaKey, TableMetaValue, DATANODE_TABLE_KEY_PATTERN,
- DATANODE_TABLE_KEY_PREFIX,
+ RegionDistribution, TableMetaKey, DATANODE_TABLE_KEY_PATTERN, DATANODE_TABLE_KEY_PREFIX,
};
use crate::kv_backend::txn::{Txn, TxnOp};
use crate::kv_backend::KvBackendRef;
diff --git a/src/common/meta/src/key/table_info.rs b/src/common/meta/src/key/table_info.rs
index 5415a0f1f941..21f8656451b7 100644
--- a/src/common/meta/src/key/table_info.rs
+++ b/src/common/meta/src/key/table_info.rs
@@ -18,7 +18,7 @@ use serde::{Deserialize, Serialize};
use table::engine::TableReference;
use table::metadata::{RawTableInfo, TableId};
-use super::{DeserializedValueWithBytes, TableMetaValue, TABLE_INFO_KEY_PREFIX};
+use super::{DeserializedValueWithBytes, TABLE_INFO_KEY_PREFIX};
use crate::error::Result;
use crate::key::{to_removed_key, TableMetaKey};
use crate::kv_backend::txn::{Compare, CompareOp, Txn, TxnOp, TxnOpResponse};
diff --git a/src/common/meta/src/key/table_name.rs b/src/common/meta/src/key/table_name.rs
index 12d44dace180..cf3690e3ff8d 100644
--- a/src/common/meta/src/key/table_name.rs
+++ b/src/common/meta/src/key/table_name.rs
@@ -18,7 +18,7 @@ use serde::{Deserialize, Serialize};
use snafu::OptionExt;
use table::metadata::TableId;
-use super::{TableMetaValue, TABLE_NAME_KEY_PATTERN, TABLE_NAME_KEY_PREFIX};
+use super::{TABLE_NAME_KEY_PATTERN, TABLE_NAME_KEY_PREFIX};
use crate::error::{Error, InvalidTableMetadataSnafu, Result};
use crate::key::{to_removed_key, TableMetaKey};
use crate::kv_backend::memory::MemoryKvBackend;
diff --git a/src/common/meta/src/key/table_region.rs b/src/common/meta/src/key/table_region.rs
index e51e1a547194..7dabc8f114ef 100644
--- a/src/common/meta/src/key/table_region.rs
+++ b/src/common/meta/src/key/table_region.rs
@@ -71,8 +71,8 @@ impl_table_meta_value! {TableRegionValue}
#[cfg(test)]
mod tests {
+
use super::*;
- use crate::key::TableMetaValue;
#[test]
fn test_serde() {
diff --git a/src/common/meta/src/key/table_route.rs b/src/common/meta/src/key/table_route.rs
index f799f321e544..231c71ccba92 100644
--- a/src/common/meta/src/key/table_route.rs
+++ b/src/common/meta/src/key/table_route.rs
@@ -16,12 +16,11 @@ use std::collections::HashMap;
use std::fmt::Display;
use serde::{Deserialize, Serialize};
-use snafu::ResultExt;
-use store_api::storage::{RegionId, RegionNumber};
+use store_api::storage::RegionId;
use table::metadata::TableId;
-use super::{DeserializedValueWithBytes, TableMetaValue};
-use crate::error::{Result, SerdeJsonSnafu};
+use super::DeserializedValueWithBytes;
+use crate::error::Result;
use crate::key::{to_removed_key, RegionDistribution, TableMetaKey, TABLE_ROUTE_PREFIX};
use crate::kv_backend::txn::{Compare, CompareOp, Txn, TxnOp, TxnOpResponse};
use crate::kv_backend::KvBackendRef;
@@ -39,121 +38,42 @@ impl TableRouteKey {
}
#[derive(Debug, PartialEq, Serialize, Deserialize, Clone)]
-#[serde(tag = "type", rename_all = "snake_case")]
-pub enum TableRouteValue {
- Physical(PhysicalTableRouteValue),
- Logical(LogicalTableRouteValue),
-}
-
-#[derive(Debug, PartialEq, Serialize, Deserialize, Clone)]
-pub struct PhysicalTableRouteValue {
+pub struct TableRouteValue {
pub region_routes: Vec<RegionRoute>,
version: u64,
}
-#[derive(Debug, PartialEq, Serialize, Deserialize, Clone)]
-pub struct LogicalTableRouteValue {
- // TODO(LFC): Add table route for MetricsEngine table.
-}
-
impl TableRouteValue {
- pub fn physical(region_routes: Vec<RegionRoute>) -> Self {
- Self::Physical(PhysicalTableRouteValue::new(region_routes))
+ pub fn new(region_routes: Vec<RegionRoute>) -> Self {
+ Self {
+ region_routes,
+ version: 0,
+ }
}
/// Returns a new version [TableRouteValue] with `region_routes`.
pub fn update(&self, region_routes: Vec<RegionRoute>) -> Self {
- let version = self.physical_table_route().version;
- Self::Physical(PhysicalTableRouteValue {
+ Self {
region_routes,
- version: version + 1,
- })
+ version: self.version + 1,
+ }
}
/// Returns the version.
///
/// For test purpose.
- #[cfg(any(test, feature = "testing"))]
+ #[cfg(any(tets, feature = "testing"))]
pub fn version(&self) -> u64 {
- self.physical_table_route().version
+ self.version
}
/// Returns the corresponding [RegionRoute].
pub fn region_route(&self, region_id: RegionId) -> Option<RegionRoute> {
- self.physical_table_route()
- .region_routes
+ self.region_routes
.iter()
.find(|route| route.region.id == region_id)
.cloned()
}
-
- /// Gets the [RegionRoute]s of this [TableRouteValue::Physical].
- ///
- /// # Panics
- /// The route type is not the [TableRouteValue::Physical].
- pub fn region_routes(&self) -> &Vec<RegionRoute> {
- &self.physical_table_route().region_routes
- }
-
- fn physical_table_route(&self) -> &PhysicalTableRouteValue {
- match self {
- TableRouteValue::Physical(x) => x,
- _ => unreachable!("Mistakenly been treated as a Physical TableRoute: {self:?}"),
- }
- }
-
- pub fn region_numbers(&self) -> Vec<RegionNumber> {
- match self {
- TableRouteValue::Physical(x) => x
- .region_routes
- .iter()
- .map(|region_route| region_route.region.id.region_number())
- .collect::<Vec<_>>(),
- TableRouteValue::Logical(x) => x
- .region_ids()
- .iter()
- .map(|region_id| region_id.region_number())
- .collect::<Vec<_>>(),
- }
- }
-}
-
-impl TableMetaValue for TableRouteValue {
- fn try_from_raw_value(raw_value: &[u8]) -> Result<Self> {
- let r = serde_json::from_slice::<TableRouteValue>(raw_value);
- match r {
- // Compatible with old TableRouteValue.
- Err(e) if e.is_data() => Ok(Self::Physical(
- serde_json::from_slice::<PhysicalTableRouteValue>(raw_value)
- .context(SerdeJsonSnafu)?,
- )),
- Ok(x) => Ok(x),
- Err(e) => Err(e).context(SerdeJsonSnafu),
- }
- }
-
- fn try_as_raw_value(&self) -> Result<Vec<u8>> {
- serde_json::to_vec(self).context(SerdeJsonSnafu)
- }
-}
-
-impl PhysicalTableRouteValue {
- pub fn new(region_routes: Vec<RegionRoute>) -> Self {
- Self {
- region_routes,
- version: 0,
- }
- }
-}
-
-impl LogicalTableRouteValue {
- pub fn physical_table_id(&self) -> TableId {
- todo!()
- }
-
- pub fn region_ids(&self) -> Vec<RegionId> {
- todo!()
- }
}
impl TableMetaKey for TableRouteKey {
@@ -349,24 +269,7 @@ impl TableRouteManager {
) -> Result<Option<RegionDistribution>> {
self.get(table_id)
.await?
- .map(|table_route| region_distribution(table_route.region_routes()))
+ .map(|table_route| region_distribution(&table_route.into_inner().region_routes))
.transpose()
}
}
-
-#[cfg(test)]
-mod tests {
- use super::*;
-
- #[test]
- fn test_table_route_compatibility() {
- let old_raw_v = r#"{"region_routes":[{"region":{"id":1,"name":"r1","partition":null,"attrs":{}},"leader_peer":{"id":2,"addr":"a2"},"follower_peers":[]},{"region":{"id":1,"name":"r1","partition":null,"attrs":{}},"leader_peer":{"id":2,"addr":"a2"},"follower_peers":[]}],"version":0}"#;
- let v = TableRouteValue::try_from_raw_value(old_raw_v.as_bytes()).unwrap();
-
- let new_raw_v = format!("{:?}", v);
- assert_eq!(
- new_raw_v,
- r#"Physical(PhysicalTableRouteValue { region_routes: [RegionRoute { region: Region { id: 1(0, 1), name: "r1", partition: None, attrs: {} }, leader_peer: Some(Peer { id: 2, addr: "a2" }), follower_peers: [], leader_status: None }, RegionRoute { region: Region { id: 1(0, 1), name: "r1", partition: None, attrs: {} }, leader_peer: Some(Peer { id: 2, addr: "a2" }), follower_peers: [], leader_status: None }], version: 0 })"#
- );
- }
-}
diff --git a/src/frontend/src/instance/standalone.rs b/src/frontend/src/instance/standalone.rs
index 21496e28edc5..d46ee3d45886 100644
--- a/src/frontend/src/instance/standalone.rs
+++ b/src/frontend/src/instance/standalone.rs
@@ -18,14 +18,10 @@ use std::sync::Arc;
use api::v1::region::{QueryRequest, RegionRequest, RegionResponse};
use async_trait::async_trait;
use client::region::check_response_header;
-use common_catalog::consts::METRIC_ENGINE;
use common_error::ext::BoxedError;
use common_meta::datanode_manager::{AffectedRows, Datanode, DatanodeManager, DatanodeRef};
use common_meta::ddl::{TableMetadata, TableMetadataAllocator, TableMetadataAllocatorContext};
use common_meta::error::{self as meta_error, Result as MetaResult, UnsupportedSnafu};
-use common_meta::key::table_route::{
- LogicalTableRouteValue, PhysicalTableRouteValue, TableRouteValue,
-};
use common_meta::peer::Peer;
use common_meta::rpc::ddl::CreateTableTask;
use common_meta::rpc::router::{Region, RegionRoute};
@@ -38,7 +34,7 @@ use common_telemetry::{debug, info, tracing};
use datanode::region_server::RegionServer;
use servers::grpc::region_server::RegionServerHandler;
use snafu::{ensure, OptionExt, ResultExt};
-use store_api::storage::{RegionId, RegionNumber, TableId};
+use store_api::storage::{RegionId, TableId};
use crate::error::{InvalidRegionRequestSnafu, InvokeRegionServerSnafu, Result};
@@ -155,29 +151,17 @@ impl StandaloneTableMetadataAllocator {
};
Ok(table_id)
}
+}
- fn create_wal_options(
+#[async_trait]
+impl TableMetadataAllocator for StandaloneTableMetadataAllocator {
+ async fn create(
&self,
- table_route: &TableRouteValue,
- ) -> MetaResult<HashMap<RegionNumber, String>> {
- match table_route {
- TableRouteValue::Physical(x) => {
- let region_numbers = x
- .region_routes
- .iter()
- .map(|route| route.region.id.region_number())
- .collect();
- allocate_region_wal_options(region_numbers, &self.wal_options_allocator)
- }
- TableRouteValue::Logical(_) => Ok(HashMap::new()),
- }
- }
-}
+ _ctx: &TableMetadataAllocatorContext,
+ task: &CreateTableTask,
+ ) -> MetaResult<TableMetadata> {
+ let table_id = self.allocate_table_id(task).await?;
-fn create_table_route(table_id: TableId, task: &CreateTableTask) -> TableRouteValue {
- if task.create_table.engine == METRIC_ENGINE {
- TableRouteValue::Logical(LogicalTableRouteValue {})
- } else {
let region_routes = task
.partitions
.iter()
@@ -198,22 +182,13 @@ fn create_table_route(table_id: TableId, task: &CreateTableTask) -> TableRouteVa
}
})
.collect::<Vec<_>>();
- TableRouteValue::Physical(PhysicalTableRouteValue::new(region_routes))
- }
-}
-
-#[async_trait]
-impl TableMetadataAllocator for StandaloneTableMetadataAllocator {
- async fn create(
- &self,
- _ctx: &TableMetadataAllocatorContext,
- task: &CreateTableTask,
- ) -> MetaResult<TableMetadata> {
- let table_id = self.allocate_table_id(task).await?;
-
- let table_route = create_table_route(table_id, task);
- let region_wal_options = self.create_wal_options(&table_route)?;
+ let region_numbers = region_routes
+ .iter()
+ .map(|route| route.region.id.region_number())
+ .collect();
+ let region_wal_options =
+ allocate_region_wal_options(region_numbers, &self.wal_options_allocator)?;
debug!(
"Allocated region wal options {:?} for table {}",
@@ -222,8 +197,8 @@ impl TableMetadataAllocator for StandaloneTableMetadataAllocator {
Ok(TableMetadata {
table_id,
- table_route,
- region_wal_options,
+ region_routes,
+ region_wal_options: HashMap::default(),
})
}
}
diff --git a/src/meta-srv/src/handler/region_lease_handler.rs b/src/meta-srv/src/handler/region_lease_handler.rs
index eb792cf9ecd2..d3433179fea0 100644
--- a/src/meta-srv/src/handler/region_lease_handler.rs
+++ b/src/meta-srv/src/handler/region_lease_handler.rs
@@ -104,7 +104,6 @@ mod test {
use std::sync::Arc;
use common_meta::distributed_time_constants;
- use common_meta::key::table_route::TableRouteValue;
use common_meta::key::test_utils::new_test_table_info;
use common_meta::key::TableMetadataManager;
use common_meta::kv_backend::memory::MemoryKvBackend;
@@ -162,11 +161,7 @@ mod test {
let table_metadata_manager = keeper.table_metadata_manager();
table_metadata_manager
- .create_table_metadata(
- table_info,
- TableRouteValue::physical(region_routes),
- HashMap::default(),
- )
+ .create_table_metadata(table_info, region_routes, HashMap::default())
.await
.unwrap();
@@ -308,11 +303,7 @@ mod test {
let table_metadata_manager = keeper.table_metadata_manager();
table_metadata_manager
- .create_table_metadata(
- table_info,
- TableRouteValue::physical(region_routes),
- HashMap::default(),
- )
+ .create_table_metadata(table_info, region_routes, HashMap::default())
.await
.unwrap();
diff --git a/src/meta-srv/src/procedure/region_failover/deactivate_region.rs b/src/meta-srv/src/procedure/region_failover/deactivate_region.rs
index c2d06590aec2..84466eb19928 100644
--- a/src/meta-srv/src/procedure/region_failover/deactivate_region.rs
+++ b/src/meta-srv/src/procedure/region_failover/deactivate_region.rs
@@ -207,7 +207,7 @@ mod tests {
.unwrap();
let should_downgraded = table_route_value
- .region_routes()
+ .region_routes
.iter()
.find(|route| route.region.id.region_number() == failed_region.region_number)
.unwrap();
diff --git a/src/meta-srv/src/procedure/region_failover/update_metadata.rs b/src/meta-srv/src/procedure/region_failover/update_metadata.rs
index 23ade1a2a1fe..505f1cb55a51 100644
--- a/src/meta-srv/src/procedure/region_failover/update_metadata.rs
+++ b/src/meta-srv/src/procedure/region_failover/update_metadata.rs
@@ -85,7 +85,7 @@ impl UpdateRegionMetadata {
.context(error::TableMetadataManagerSnafu)?
.context(TableRouteNotFoundSnafu { table_id })?;
- let mut new_region_routes = table_route_value.region_routes().clone();
+ let mut new_region_routes = table_route_value.region_routes.clone();
for region_route in new_region_routes.iter_mut() {
if region_route.region.id.region_number() == failed_region.region_number {
@@ -233,8 +233,7 @@ mod tests {
.unwrap()
.unwrap()
.into_inner()
- .region_routes()
- .clone()
+ .region_routes
}
// Original region routes:
@@ -396,8 +395,8 @@ mod tests {
.unwrap()
.into_inner();
- let peers = &extract_all_peers(table_route_value.region_routes());
- let actual = table_route_value.region_routes();
+ let peers = &extract_all_peers(&table_route_value.region_routes);
+ let actual = &table_route_value.region_routes;
let expected = &vec![
new_region_route(1, peers, 2),
new_region_route(2, peers, 3),
@@ -416,7 +415,7 @@ mod tests {
.unwrap()
.into_inner();
- let map = region_distribution(table_route_value.region_routes()).unwrap();
+ let map = region_distribution(&table_route_value.region_routes).unwrap();
assert_eq!(map.len(), 2);
assert_eq!(map.get(&2), Some(&vec![1, 3]));
assert_eq!(map.get(&3), Some(&vec![2, 4]));
diff --git a/src/meta-srv/src/procedure/region_migration/migration_start.rs b/src/meta-srv/src/procedure/region_migration/migration_start.rs
index fa84a1a6dd5e..3ef5d46c6595 100644
--- a/src/meta-srv/src/procedure/region_migration/migration_start.rs
+++ b/src/meta-srv/src/procedure/region_migration/migration_start.rs
@@ -84,7 +84,7 @@ impl RegionMigrationStart {
let table_route = ctx.get_table_route_value().await?;
let region_route = table_route
- .region_routes()
+ .region_routes
.iter()
.find(|route| route.region.id == region_id)
.cloned()
@@ -137,6 +137,7 @@ impl RegionMigrationStart {
#[cfg(test)]
mod tests {
use std::assert_matches::assert_matches;
+ use std::collections::HashMap;
use common_meta::key::test_utils::new_test_table_info;
use common_meta::peer::Peer;
@@ -186,8 +187,10 @@ mod tests {
..Default::default()
};
- env.create_physical_table_metadata(table_info, vec![region_route])
- .await;
+ env.table_metadata_manager()
+ .create_table_metadata(table_info, vec![region_route], HashMap::default())
+ .await
+ .unwrap();
let err = state
.retrieve_region_route(&mut ctx, RegionId::new(1024, 3))
@@ -218,8 +221,10 @@ mod tests {
..Default::default()
}];
- env.create_physical_table_metadata(table_info, region_routes)
- .await;
+ env.table_metadata_manager()
+ .create_table_metadata(table_info, region_routes, HashMap::default())
+ .await
+ .unwrap();
let (next, _) = state.next(&mut ctx).await.unwrap();
@@ -249,8 +254,10 @@ mod tests {
..Default::default()
}];
- env.create_physical_table_metadata(table_info, region_routes)
- .await;
+ env.table_metadata_manager()
+ .create_table_metadata(table_info, region_routes, HashMap::default())
+ .await
+ .unwrap();
let (next, _) = state.next(&mut ctx).await.unwrap();
@@ -274,8 +281,10 @@ mod tests {
..Default::default()
}];
- env.create_physical_table_metadata(table_info, region_routes)
- .await;
+ env.table_metadata_manager()
+ .create_table_metadata(table_info, region_routes, HashMap::default())
+ .await
+ .unwrap();
let (next, _) = state.next(&mut ctx).await.unwrap();
diff --git a/src/meta-srv/src/procedure/region_migration/open_candidate_region.rs b/src/meta-srv/src/procedure/region_migration/open_candidate_region.rs
index 74b904ce0105..dc6ebb2f4df9 100644
--- a/src/meta-srv/src/procedure/region_migration/open_candidate_region.rs
+++ b/src/meta-srv/src/procedure/region_migration/open_candidate_region.rs
@@ -187,7 +187,6 @@ mod tests {
use std::assert_matches::assert_matches;
use common_catalog::consts::MITO2_ENGINE;
- use common_meta::key::table_route::TableRouteValue;
use common_meta::key::test_utils::new_test_table_info;
use common_meta::peer::Peer;
use common_meta::rpc::router::{Region, RegionRoute};
@@ -410,11 +409,7 @@ mod tests {
}];
env.table_metadata_manager()
- .create_table_metadata(
- table_info,
- TableRouteValue::physical(region_routes),
- HashMap::default(),
- )
+ .create_table_metadata(table_info, region_routes, HashMap::default())
.await
.unwrap();
diff --git a/src/meta-srv/src/procedure/region_migration/test_util.rs b/src/meta-srv/src/procedure/region_migration/test_util.rs
index b34b1e655f4c..1c95a2d393a7 100644
--- a/src/meta-srv/src/procedure/region_migration/test_util.rs
+++ b/src/meta-srv/src/procedure/region_migration/test_util.rs
@@ -22,7 +22,6 @@ use api::v1::meta::{HeartbeatResponse, MailboxMessage, RequestHeader};
use common_meta::instruction::{
DowngradeRegionReply, InstructionReply, SimpleReply, UpgradeRegionReply,
};
-use common_meta::key::table_route::TableRouteValue;
use common_meta::key::{TableMetadataManager, TableMetadataManagerRef};
use common_meta::kv_backend::memory::MemoryKvBackend;
use common_meta::peer::Peer;
@@ -145,22 +144,6 @@ impl TestingEnv {
provider: Arc::new(MockContextProvider::default()),
}
}
-
- // Creates a table metadata with the physical table route.
- pub async fn create_physical_table_metadata(
- &self,
- table_info: RawTableInfo,
- region_routes: Vec<RegionRoute>,
- ) {
- self.table_metadata_manager
- .create_table_metadata(
- table_info,
- TableRouteValue::physical(region_routes),
- HashMap::default(),
- )
- .await
- .unwrap();
- }
}
/// Generates a [InstructionReply::OpenRegion] reply.
@@ -386,11 +369,7 @@ impl ProcedureMigrationTestSuite {
) {
self.env
.table_metadata_manager()
- .create_table_metadata(
- table_info,
- TableRouteValue::physical(region_routes),
- HashMap::default(),
- )
+ .create_table_metadata(table_info, region_routes, HashMap::default())
.await
.unwrap();
}
@@ -398,7 +377,7 @@ impl ProcedureMigrationTestSuite {
/// Verifies table metadata after region migration.
pub(crate) async fn verify_table_metadata(&self) {
let region_id = self.context.persistent_ctx.region_id;
- let table_route = self
+ let region_routes = self
.env
.table_metadata_manager
.table_route_manager()
@@ -406,25 +385,22 @@ impl ProcedureMigrationTestSuite {
.await
.unwrap()
.unwrap()
- .into_inner();
- let region_routes = table_route.region_routes();
+ .into_inner()
+ .region_routes;
let expected_leader_id = self.context.persistent_ctx.to_peer.id;
let removed_follower_id = self.context.persistent_ctx.from_peer.id;
let region_route = region_routes
- .iter()
+ .into_iter()
.find(|route| route.region.id == region_id)
.unwrap();
assert!(!region_route.is_leader_downgraded());
- assert_eq!(
- region_route.leader_peer.as_ref().unwrap().id,
- expected_leader_id
- );
+ assert_eq!(region_route.leader_peer.unwrap().id, expected_leader_id);
assert!(!region_route
.follower_peers
- .iter()
+ .into_iter()
.any(|route| route.id == removed_follower_id))
}
}
diff --git a/src/meta-srv/src/procedure/region_migration/update_metadata/downgrade_leader_region.rs b/src/meta-srv/src/procedure/region_migration/update_metadata/downgrade_leader_region.rs
index 5a76d34819e7..7deaddb5c27b 100644
--- a/src/meta-srv/src/procedure/region_migration/update_metadata/downgrade_leader_region.rs
+++ b/src/meta-srv/src/procedure/region_migration/update_metadata/downgrade_leader_region.rs
@@ -74,6 +74,7 @@ impl UpdateMetadata {
#[cfg(test)]
mod tests {
use std::assert_matches::assert_matches;
+ use std::collections::HashMap;
use common_meta::key::test_utils::new_test_table_info;
use common_meta::peer::Peer;
@@ -135,10 +136,12 @@ mod tests {
},
];
- env.create_physical_table_metadata(table_info, region_routes)
- .await;
-
let table_metadata_manager = env.table_metadata_manager();
+ table_metadata_manager
+ .create_table_metadata(table_info, region_routes, HashMap::default())
+ .await
+ .unwrap();
+
let original_table_route = table_metadata_manager
.table_route_manager()
.get(table_id)
@@ -187,10 +190,11 @@ mod tests {
..Default::default()
}];
- env.create_physical_table_metadata(table_info, region_routes)
- .await;
-
let table_metadata_manager = env.table_metadata_manager();
+ table_metadata_manager
+ .create_table_metadata(table_info, region_routes, HashMap::default())
+ .await
+ .unwrap();
let (next, _) = state.next(&mut ctx).await.unwrap();
@@ -208,7 +212,7 @@ mod tests {
// It should remain unchanged.
assert_eq!(latest_table_route.version(), 0);
- assert!(!latest_table_route.region_routes()[0].is_leader_downgraded());
+ assert!(!latest_table_route.region_routes[0].is_leader_downgraded());
assert!(ctx.volatile_ctx.table_route.is_none());
}
@@ -229,10 +233,11 @@ mod tests {
..Default::default()
}];
- env.create_physical_table_metadata(table_info, region_routes)
- .await;
-
let table_metadata_manager = env.table_metadata_manager();
+ table_metadata_manager
+ .create_table_metadata(table_info, region_routes, HashMap::default())
+ .await
+ .unwrap();
let (next, _) = state.next(&mut ctx).await.unwrap();
@@ -248,7 +253,7 @@ mod tests {
.unwrap()
.unwrap();
- assert!(latest_table_route.region_routes()[0].is_leader_downgraded());
+ assert!(latest_table_route.region_routes[0].is_leader_downgraded());
assert!(ctx.volatile_ctx.table_route.is_none());
}
}
diff --git a/src/meta-srv/src/procedure/region_migration/update_metadata/rollback_downgraded_region.rs b/src/meta-srv/src/procedure/region_migration/update_metadata/rollback_downgraded_region.rs
index 7281737752a4..6c1a2648535a 100644
--- a/src/meta-srv/src/procedure/region_migration/update_metadata/rollback_downgraded_region.rs
+++ b/src/meta-srv/src/procedure/region_migration/update_metadata/rollback_downgraded_region.rs
@@ -59,6 +59,7 @@ impl UpdateMetadata {
#[cfg(test)]
mod tests {
use std::assert_matches::assert_matches;
+ use std::collections::HashMap;
use common_meta::key::test_utils::new_test_table_info;
use common_meta::peer::Peer;
@@ -127,10 +128,12 @@ mod tests {
region_routes
};
- env.create_physical_table_metadata(table_info, region_routes)
- .await;
-
let table_metadata_manager = env.table_metadata_manager();
+ table_metadata_manager
+ .create_table_metadata(table_info, region_routes, HashMap::default())
+ .await
+ .unwrap();
+
let old_table_route = table_metadata_manager
.table_route_manager()
.get(table_id)
@@ -163,14 +166,15 @@ mod tests {
state.rollback_downgraded_region(&mut ctx).await.unwrap();
- let table_route = table_metadata_manager
+ let region_routes = table_metadata_manager
.table_route_manager()
.get(table_id)
.await
.unwrap()
.unwrap()
- .into_inner();
- assert_eq!(&expected_region_routes, table_route.region_routes());
+ .into_inner()
+ .region_routes;
+ assert_eq!(expected_region_routes, region_routes);
}
#[tokio::test]
@@ -210,10 +214,11 @@ mod tests {
region_routes
};
- env.create_physical_table_metadata(table_info, region_routes)
- .await;
-
let table_metadata_manager = env.table_metadata_manager();
+ table_metadata_manager
+ .create_table_metadata(table_info, region_routes, HashMap::default())
+ .await
+ .unwrap();
let (next, _) = state.next(&mut ctx).await.unwrap();
@@ -224,13 +229,14 @@ mod tests {
assert!(ctx.volatile_ctx.table_route.is_none());
- let table_route = table_metadata_manager
+ let region_routes = table_metadata_manager
.table_route_manager()
.get(table_id)
.await
.unwrap()
.unwrap()
- .into_inner();
- assert_eq!(&expected_region_routes, table_route.region_routes());
+ .into_inner()
+ .region_routes;
+ assert_eq!(expected_region_routes, region_routes);
}
}
diff --git a/src/meta-srv/src/procedure/region_migration/update_metadata/upgrade_candidate_region.rs b/src/meta-srv/src/procedure/region_migration/update_metadata/upgrade_candidate_region.rs
index 597d9afe9a7b..4886df0e5af4 100644
--- a/src/meta-srv/src/procedure/region_migration/update_metadata/upgrade_candidate_region.rs
+++ b/src/meta-srv/src/procedure/region_migration/update_metadata/upgrade_candidate_region.rs
@@ -33,7 +33,7 @@ impl UpdateMetadata {
let region_id = ctx.region_id();
let table_route_value = ctx.get_table_route_value().await?.clone();
- let mut region_routes = table_route_value.region_routes().clone();
+ let mut region_routes = table_route_value.region_routes.clone();
let region_route = region_routes
.iter_mut()
.find(|route| route.region.id == region_id)
@@ -81,7 +81,7 @@ impl UpdateMetadata {
let region_id = ctx.region_id();
let table_route_value = ctx.get_table_route_value().await?.clone();
- let region_routes = table_route_value.region_routes().clone();
+ let region_routes = table_route_value.region_routes.clone();
let region_route = region_routes
.into_iter()
.find(|route| route.region.id == region_id)
@@ -176,6 +176,7 @@ impl UpdateMetadata {
#[cfg(test)]
mod tests {
use std::assert_matches::assert_matches;
+ use std::collections::HashMap;
use common_meta::key::test_utils::new_test_table_info;
use common_meta::peer::Peer;
@@ -224,8 +225,11 @@ mod tests {
..Default::default()
}];
- env.create_physical_table_metadata(table_info, region_routes)
- .await;
+ let table_metadata_manager = env.table_metadata_manager();
+ table_metadata_manager
+ .create_table_metadata(table_info, region_routes, HashMap::default())
+ .await
+ .unwrap();
let err = state
.build_upgrade_candidate_region_metadata(&mut ctx)
@@ -250,8 +254,11 @@ mod tests {
..Default::default()
}];
- env.create_physical_table_metadata(table_info, region_routes)
- .await;
+ let table_metadata_manager = env.table_metadata_manager();
+ table_metadata_manager
+ .create_table_metadata(table_info, region_routes, HashMap::default())
+ .await
+ .unwrap();
let err = state
.build_upgrade_candidate_region_metadata(&mut ctx)
@@ -278,8 +285,11 @@ mod tests {
leader_status: Some(RegionStatus::Downgraded),
}];
- env.create_physical_table_metadata(table_info, region_routes)
- .await;
+ let table_metadata_manager = env.table_metadata_manager();
+ table_metadata_manager
+ .create_table_metadata(table_info, region_routes, HashMap::default())
+ .await
+ .unwrap();
let new_region_routes = state
.build_upgrade_candidate_region_metadata(&mut ctx)
@@ -316,10 +326,12 @@ mod tests {
},
];
- env.create_physical_table_metadata(table_info, region_routes)
- .await;
-
let table_metadata_manager = env.table_metadata_manager();
+ table_metadata_manager
+ .create_table_metadata(table_info, region_routes, HashMap::default())
+ .await
+ .unwrap();
+
let original_table_route = table_metadata_manager
.table_route_manager()
.get(table_id)
@@ -373,8 +385,11 @@ mod tests {
leader_status: None,
}];
- env.create_physical_table_metadata(table_info, region_routes)
- .await;
+ let table_metadata_manager = env.table_metadata_manager();
+ table_metadata_manager
+ .create_table_metadata(table_info, region_routes, HashMap::default())
+ .await
+ .unwrap();
let updated = state.check_metadata_updated(&mut ctx).await.unwrap();
assert!(!updated);
@@ -396,8 +411,11 @@ mod tests {
leader_status: None,
}];
- env.create_physical_table_metadata(table_info, region_routes)
- .await;
+ let table_metadata_manager = env.table_metadata_manager();
+ table_metadata_manager
+ .create_table_metadata(table_info, region_routes, HashMap::default())
+ .await
+ .unwrap();
let updated = state.check_metadata_updated(&mut ctx).await.unwrap();
assert!(updated);
@@ -419,8 +437,11 @@ mod tests {
leader_status: Some(RegionStatus::Downgraded),
}];
- env.create_physical_table_metadata(table_info, region_routes)
- .await;
+ let table_metadata_manager = env.table_metadata_manager();
+ table_metadata_manager
+ .create_table_metadata(table_info, region_routes, HashMap::default())
+ .await
+ .unwrap();
let err = state.check_metadata_updated(&mut ctx).await.unwrap_err();
assert_matches!(err, Error::Unexpected { .. });
@@ -449,23 +470,24 @@ mod tests {
.unwrap();
ctx.volatile_ctx.opening_region_guard = Some(guard);
- env.create_physical_table_metadata(table_info, region_routes)
- .await;
-
let table_metadata_manager = env.table_metadata_manager();
+ table_metadata_manager
+ .create_table_metadata(table_info, region_routes, HashMap::default())
+ .await
+ .unwrap();
let (next, _) = state.next(&mut ctx).await.unwrap();
let _ = next.as_any().downcast_ref::<RegionMigrationEnd>().unwrap();
- let table_route = table_metadata_manager
+ let region_routes = table_metadata_manager
.table_route_manager()
.get(table_id)
.await
.unwrap()
.unwrap()
- .into_inner();
- let region_routes = table_route.region_routes();
+ .into_inner()
+ .region_routes;
assert!(ctx.volatile_ctx.table_route.is_none());
assert!(ctx.volatile_ctx.opening_region_guard.is_none());
diff --git a/src/meta-srv/src/procedure/tests.rs b/src/meta-srv/src/procedure/tests.rs
index 9ffad3aa6cf9..e7b8a681138c 100644
--- a/src/meta-srv/src/procedure/tests.rs
+++ b/src/meta-srv/src/procedure/tests.rs
@@ -100,12 +100,12 @@ fn test_region_request_builder() {
let procedure = CreateTableProcedure::new(
1,
create_table_task(),
- TableRouteValue::physical(test_data::new_region_routes()),
+ test_data::new_region_routes(),
HashMap::default(),
test_data::new_ddl_context(Arc::new(DatanodeClients::default())),
);
- let template = procedure.new_region_request_builder(None).unwrap();
+ let template = procedure.new_region_request_builder().unwrap();
let expected = PbCreateRegionRequest {
region_id: 0,
@@ -191,7 +191,7 @@ async fn test_on_datanode_create_regions() {
let mut procedure = CreateTableProcedure::new(
1,
create_table_task(),
- TableRouteValue::physical(region_routes),
+ region_routes,
HashMap::default(),
test_data::new_ddl_context(datanode_manager),
);
@@ -247,7 +247,7 @@ async fn test_on_datanode_drop_regions() {
let procedure = DropTableProcedure::new(
1,
drop_table_task,
- DeserializedValueWithBytes::from_inner(TableRouteValue::physical(region_routes)),
+ DeserializedValueWithBytes::from_inner(TableRouteValue::new(region_routes)),
DeserializedValueWithBytes::from_inner(TableInfoValue::new(test_data::new_table_info())),
test_data::new_ddl_context(datanode_manager),
);
@@ -373,7 +373,7 @@ async fn test_submit_alter_region_requests() {
.table_metadata_manager
.create_table_metadata(
table_info.clone(),
- TableRouteValue::physical(region_routes),
+ region_routes.clone(),
HashMap::default(),
)
.await
diff --git a/src/meta-srv/src/region/lease_keeper.rs b/src/meta-srv/src/region/lease_keeper.rs
index cbd2451896b1..b555d2e780dd 100644
--- a/src/meta-srv/src/region/lease_keeper.rs
+++ b/src/meta-srv/src/region/lease_keeper.rs
@@ -188,7 +188,6 @@ mod tests {
use std::collections::{HashMap, HashSet};
use std::sync::Arc;
- use common_meta::key::table_route::TableRouteValue;
use common_meta::key::test_utils::new_test_table_info;
use common_meta::key::TableMetadataManager;
use common_meta::kv_backend::memory::MemoryKvBackend;
@@ -292,11 +291,7 @@ mod tests {
let keeper = new_test_keeper();
let table_metadata_manager = keeper.table_metadata_manager();
table_metadata_manager
- .create_table_metadata(
- table_info,
- TableRouteValue::physical(vec![region_route]),
- HashMap::default(),
- )
+ .create_table_metadata(table_info, vec![region_route.clone()], HashMap::default())
.await
.unwrap();
@@ -383,11 +378,7 @@ mod tests {
let keeper = new_test_keeper();
let table_metadata_manager = keeper.table_metadata_manager();
table_metadata_manager
- .create_table_metadata(
- table_info,
- TableRouteValue::physical(vec![region_route]),
- HashMap::default(),
- )
+ .create_table_metadata(table_info, vec![region_route.clone()], HashMap::default())
.await
.unwrap();
diff --git a/src/meta-srv/src/selector/load_based.rs b/src/meta-srv/src/selector/load_based.rs
index a5f5beeacd35..114a48beff72 100644
--- a/src/meta-srv/src/selector/load_based.rs
+++ b/src/meta-srv/src/selector/load_based.rs
@@ -143,7 +143,7 @@ async fn get_leader_peer_ids(
.context(error::TableMetadataManagerSnafu)
.map(|route| {
route.map_or_else(Vec::new, |route| {
- find_leaders(route.region_routes())
+ find_leaders(&route.region_routes)
.into_iter()
.map(|peer| peer.id)
.collect()
diff --git a/src/meta-srv/src/table_meta_alloc.rs b/src/meta-srv/src/table_meta_alloc.rs
index 21e5778209f7..12fac723b430 100644
--- a/src/meta-srv/src/table_meta_alloc.rs
+++ b/src/meta-srv/src/table_meta_alloc.rs
@@ -12,23 +12,17 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-use std::collections::HashMap;
-
-use common_catalog::consts::METRIC_ENGINE;
+use common_catalog::format_full_table_name;
use common_error::ext::BoxedError;
use common_meta::ddl::{TableMetadata, TableMetadataAllocator, TableMetadataAllocatorContext};
-use common_meta::error::{ExternalSnafu, Result as MetaResult};
-use common_meta::key::table_route::{
- LogicalTableRouteValue, PhysicalTableRouteValue, TableRouteValue,
-};
+use common_meta::error::{self as meta_error, Result as MetaResult};
use common_meta::rpc::ddl::CreateTableTask;
use common_meta::rpc::router::{Region, RegionRoute};
use common_meta::sequence::SequenceRef;
use common_meta::wal::{allocate_region_wal_options, WalOptionsAllocatorRef};
-use common_meta::ClusterId;
-use common_telemetry::debug;
+use common_telemetry::{debug, warn};
use snafu::{ensure, ResultExt};
-use store_api::storage::{RegionId, RegionNumber, TableId, MAX_REGION_SEQ};
+use store_api::storage::{RegionId, TableId, MAX_REGION_SEQ};
use crate::error::{self, Result, TooManyPartitionsSnafu};
use crate::metasrv::{SelectorContext, SelectorRef};
@@ -55,83 +49,6 @@ impl MetaSrvTableMetadataAllocator {
wal_options_allocator,
}
}
-
- async fn create_table_route(
- &self,
- cluster_id: ClusterId,
- table_id: TableId,
- task: &CreateTableTask,
- ) -> Result<TableRouteValue> {
- let table_route = if task.create_table.engine == METRIC_ENGINE {
- TableRouteValue::Logical(LogicalTableRouteValue {})
- } else {
- let regions = task.partitions.len();
-
- ensure!(regions <= MAX_REGION_SEQ as usize, TooManyPartitionsSnafu);
-
- let mut peers = self
- .selector
- .select(
- cluster_id,
- &self.ctx,
- SelectorOptions {
- min_required_items: regions,
- allow_duplication: true,
- },
- )
- .await?;
-
- ensure!(
- peers.len() >= regions,
- error::NoEnoughAvailableDatanodeSnafu {
- required: regions,
- available: peers.len(),
- }
- );
-
- peers.truncate(regions);
-
- let region_routes = task
- .partitions
- .iter()
- .enumerate()
- .map(|(i, partition)| {
- let region = Region {
- id: RegionId::new(table_id, i as RegionNumber),
- partition: Some(partition.clone().into()),
- ..Default::default()
- };
-
- let peer = peers[i % peers.len()].clone();
-
- RegionRoute {
- region,
- leader_peer: Some(peer.into()),
- ..Default::default()
- }
- })
- .collect::<Vec<_>>();
- TableRouteValue::Physical(PhysicalTableRouteValue::new(region_routes))
- };
- Ok(table_route)
- }
-
- fn create_wal_options(
- &self,
- table_route: &TableRouteValue,
- ) -> MetaResult<HashMap<RegionNumber, String>> {
- match table_route {
- TableRouteValue::Physical(x) => {
- let region_numbers = x
- .region_routes
- .iter()
- .map(|route| route.region.id.region_number())
- .collect();
- allocate_region_wal_options(region_numbers, &self.wal_options_allocator)
- }
- TableRouteValue::Logical(_) => Ok(HashMap::new()),
- }
- }
}
#[async_trait::async_trait]
@@ -141,15 +58,23 @@ impl TableMetadataAllocator for MetaSrvTableMetadataAllocator {
ctx: &TableMetadataAllocatorContext,
task: &CreateTableTask,
) -> MetaResult<TableMetadata> {
- let table_id = self.table_id_sequence.next().await? as TableId;
-
- let table_route = self
- .create_table_route(ctx.cluster_id, table_id, task)
- .await
- .map_err(BoxedError::new)
- .context(ExternalSnafu)?;
-
- let region_wal_options = self.create_wal_options(&table_route)?;
+ let (table_id, region_routes) = handle_create_region_routes(
+ ctx.cluster_id,
+ task,
+ &self.ctx,
+ &self.selector,
+ &self.table_id_sequence,
+ )
+ .await
+ .map_err(BoxedError::new)
+ .context(meta_error::ExternalSnafu)?;
+
+ let region_numbers = region_routes
+ .iter()
+ .map(|route| route.region.id.region_number())
+ .collect();
+ let region_wal_options =
+ allocate_region_wal_options(region_numbers, &self.wal_options_allocator)?;
debug!(
"Allocated region wal options {:?} for table {}",
@@ -158,8 +83,84 @@ impl TableMetadataAllocator for MetaSrvTableMetadataAllocator {
Ok(TableMetadata {
table_id,
- table_route,
+ region_routes,
region_wal_options,
})
}
}
+
+/// pre-allocates create table's table id and region routes.
+async fn handle_create_region_routes(
+ cluster_id: u64,
+ task: &CreateTableTask,
+ ctx: &SelectorContext,
+ selector: &SelectorRef,
+ table_id_sequence: &SequenceRef,
+) -> Result<(TableId, Vec<RegionRoute>)> {
+ let table_info = &task.table_info;
+ let partitions = &task.partitions;
+
+ let mut peers = selector
+ .select(
+ cluster_id,
+ ctx,
+ SelectorOptions {
+ min_required_items: partitions.len(),
+ allow_duplication: true,
+ },
+ )
+ .await?;
+
+ if peers.len() < partitions.len() {
+ warn!(
+ "Create table failed due to no enough available datanodes, table: {}, partition number: {}, datanode number: {}",
+ format_full_table_name(
+ &table_info.catalog_name,
+ &table_info.schema_name,
+ &table_info.name
+ ),
+ partitions.len(),
+ peers.len()
+ );
+ return error::NoEnoughAvailableDatanodeSnafu {
+ required: partitions.len(),
+ available: peers.len(),
+ }
+ .fail();
+ }
+
+ // We don't need to keep all peers, just truncate it to the number of partitions.
+ // If the peers are not enough, some peers will be used for multiple partitions.
+ peers.truncate(partitions.len());
+
+ let table_id = table_id_sequence
+ .next()
+ .await
+ .context(error::NextSequenceSnafu)? as u32;
+
+ ensure!(
+ partitions.len() <= MAX_REGION_SEQ as usize,
+ TooManyPartitionsSnafu
+ );
+
+ let region_routes = partitions
+ .iter()
+ .enumerate()
+ .map(|(i, partition)| {
+ let region = Region {
+ id: RegionId::new(table_id, i as u32),
+ partition: Some(partition.clone().into()),
+ ..Default::default()
+ };
+ let peer = peers[i % peers.len()].clone();
+ RegionRoute {
+ region,
+ leader_peer: Some(peer.into()),
+ follower_peers: vec![], // follower_peers is not supported at the moment
+ leader_status: None,
+ }
+ })
+ .collect::<Vec<_>>();
+
+ Ok((table_id, region_routes))
+}
diff --git a/src/meta-srv/src/test_util.rs b/src/meta-srv/src/test_util.rs
index 3013ac9ad745..801b63ab3222 100644
--- a/src/meta-srv/src/test_util.rs
+++ b/src/meta-srv/src/test_util.rs
@@ -17,7 +17,6 @@ use std::sync::Arc;
use chrono::DateTime;
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, MITO_ENGINE};
-use common_meta::key::table_route::TableRouteValue;
use common_meta::key::{TableMetadataManager, TableMetadataManagerRef};
use common_meta::kv_backend::memory::MemoryKvBackend;
use common_meta::peer::Peer;
@@ -146,11 +145,7 @@ pub(crate) async fn prepare_table_region_and_info_value(
region_route_factory(4, 3),
];
table_metadata_manager
- .create_table_metadata(
- table_info,
- TableRouteValue::physical(region_routes),
- HashMap::default(),
- )
+ .create_table_metadata(table_info, region_routes, HashMap::default())
.await
.unwrap();
}
diff --git a/src/operator/src/tests/partition_manager.rs b/src/operator/src/tests/partition_manager.rs
index dd2a044b51c3..c0d2a9f74f6b 100644
--- a/src/operator/src/tests/partition_manager.rs
+++ b/src/operator/src/tests/partition_manager.rs
@@ -17,7 +17,6 @@ use std::sync::atomic::{AtomicU32, Ordering};
use std::sync::Arc;
use catalog::kvbackend::MetaKvBackend;
-use common_meta::key::table_route::TableRouteValue;
use common_meta::key::TableMetadataManager;
use common_meta::kv_backend::memory::MemoryKvBackend;
use common_meta::kv_backend::KvBackendRef;
@@ -115,7 +114,7 @@ pub(crate) async fn create_partition_rule_manager(
table_metadata_manager
.create_table_metadata(
new_test_table_info(1, "table_1", regions.clone().into_iter()).into(),
- TableRouteValue::physical(vec![
+ vec![
RegionRoute {
region: Region {
id: 3.into(),
@@ -170,7 +169,7 @@ pub(crate) async fn create_partition_rule_manager(
follower_peers: vec![],
leader_status: None,
},
- ]),
+ ],
region_wal_options.clone(),
)
.await
@@ -179,7 +178,7 @@ pub(crate) async fn create_partition_rule_manager(
table_metadata_manager
.create_table_metadata(
new_test_table_info(2, "table_2", regions.clone().into_iter()).into(),
- TableRouteValue::physical(vec![
+ vec![
RegionRoute {
region: Region {
id: 1.into(),
@@ -240,7 +239,7 @@ pub(crate) async fn create_partition_rule_manager(
follower_peers: vec![],
leader_status: None,
},
- ]),
+ ],
region_wal_options,
)
.await
diff --git a/src/partition/src/manager.rs b/src/partition/src/manager.rs
index ad15c62cc1dd..41b3bef065f8 100644
--- a/src/partition/src/manager.rs
+++ b/src/partition/src/manager.rs
@@ -19,7 +19,7 @@ use api::v1::Rows;
use common_meta::key::table_route::TableRouteManager;
use common_meta::kv_backend::KvBackendRef;
use common_meta::peer::Peer;
-use common_meta::rpc::router::RegionRoutes;
+use common_meta::rpc::router::{convert_to_region_leader_map, RegionRoutes};
use common_query::prelude::Expr;
use datafusion_expr::{BinaryExpr, Expr as DfExpr, Operator};
use datatypes::prelude::Value;
@@ -76,10 +76,39 @@ impl PartitionRuleManager {
.context(error::FindTableRoutesSnafu { table_id })?
.into_inner();
- Ok(RegionRoutes(route.region_routes().clone()))
+ Ok(RegionRoutes(route.region_routes))
}
- pub async fn find_table_partitions(&self, table_id: TableId) -> Result<Vec<PartitionInfo>> {
+ /// Find datanodes of corresponding regions of given table.
+ pub async fn find_region_datanodes(
+ &self,
+ table_id: TableId,
+ regions: Vec<RegionNumber>,
+ ) -> Result<HashMap<Peer, Vec<RegionNumber>>> {
+ let route = self
+ .table_route_manager
+ .get(table_id)
+ .await
+ .context(error::TableRouteManagerSnafu)?
+ .context(error::FindTableRoutesSnafu { table_id })?
+ .into_inner();
+ let mut datanodes = HashMap::with_capacity(regions.len());
+ let region_map = convert_to_region_leader_map(&route.region_routes);
+ for region in regions.iter() {
+ let datanode = *region_map.get(region).context(error::FindDatanodeSnafu {
+ table_id,
+ region: *region,
+ })?;
+ datanodes
+ .entry(datanode.clone())
+ .or_insert_with(Vec::new)
+ .push(*region);
+ }
+ Ok(datanodes)
+ }
+
+ /// Find all leader peers of given table.
+ pub async fn find_table_region_leaders(&self, table_id: TableId) -> Result<Vec<Peer>> {
let route = self
.table_route_manager
.get(table_id)
@@ -87,15 +116,33 @@ impl PartitionRuleManager {
.context(error::TableRouteManagerSnafu)?
.context(error::FindTableRoutesSnafu { table_id })?
.into_inner();
- let region_routes = route.region_routes();
+ let mut peers = Vec::with_capacity(route.region_routes.len());
+
+ for peer in &route.region_routes {
+ peers.push(peer.leader_peer.clone().with_context(|| FindLeaderSnafu {
+ region_id: peer.region.id,
+ table_id,
+ })?);
+ }
+ Ok(peers)
+ }
+
+ pub async fn find_table_partitions(&self, table_id: TableId) -> Result<Vec<PartitionInfo>> {
+ let route = self
+ .table_route_manager
+ .get(table_id)
+ .await
+ .context(error::TableRouteManagerSnafu)?
+ .context(error::FindTableRoutesSnafu { table_id })?
+ .into_inner();
ensure!(
- !region_routes.is_empty(),
+ !route.region_routes.is_empty(),
error::FindTableRoutesSnafu { table_id }
);
- let mut partitions = Vec::with_capacity(region_routes.len());
- for r in region_routes {
+ let mut partitions = Vec::with_capacity(route.region_routes.len());
+ for r in route.region_routes.iter() {
let partition = r
.region
.partition
diff --git a/tests-integration/src/grpc.rs b/tests-integration/src/grpc.rs
index e9731cc336fa..e997139b5357 100644
--- a/tests-integration/src/grpc.rs
+++ b/tests-integration/src/grpc.rs
@@ -521,7 +521,7 @@ CREATE TABLE {table_name} (
.unwrap()
.into_inner();
- let region_to_dn_map = region_distribution(table_route_value.region_routes())
+ let region_to_dn_map = region_distribution(&table_route_value.region_routes)
.unwrap()
.iter()
.map(|(k, v)| (v[0], *k))
diff --git a/tests-integration/src/instance.rs b/tests-integration/src/instance.rs
index 05253dc0a236..ac5a2e4b3ca9 100644
--- a/tests-integration/src/instance.rs
+++ b/tests-integration/src/instance.rs
@@ -216,7 +216,7 @@ mod tests {
.unwrap()
.into_inner();
- let region_to_dn_map = region_distribution(table_route_value.region_routes())
+ let region_to_dn_map = region_distribution(&table_route_value.region_routes)
.unwrap()
.iter()
.map(|(k, v)| (v[0], *k))
|
fix
|
revert unfinished route table change (#3008)
|
5daac5fe3deea595f41f641d47bfc27e96fae9ed
|
2025-01-20 09:22:38
|
Ning Sun
|
ci: revert coverage runner (#5403)
| false
|
diff --git a/.github/workflows/develop.yml b/.github/workflows/develop.yml
index 2ab040bffe2d..c869e8a24181 100644
--- a/.github/workflows/develop.yml
+++ b/.github/workflows/develop.yml
@@ -679,7 +679,7 @@ jobs:
coverage:
if: github.event_name == 'merge_group'
- runs-on: ubuntu-24.04-arm
+ runs-on: ubuntu-20.04-8-cores
timeout-minutes: 60
steps:
- uses: actions/checkout@v4
|
ci
|
revert coverage runner (#5403)
|
9459ace33eb3791a2e9611d08d769e4c8308f2e8
|
2024-04-10 23:17:54
|
tison
|
ci: add CODEOWNERS file (#3691)
| false
|
diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS
new file mode 100644
index 000000000000..c167b3649c6a
--- /dev/null
+++ b/.github/CODEOWNERS
@@ -0,0 +1,27 @@
+# GreptimeDB CODEOWNERS
+
+# These owners will be the default owners for everything in the repo.
+
+* @GreptimeTeam/db-approver
+
+## [Module] Databse Engine
+/src/index @zhongzc
+/src/mito2 @evenyag @v0y4g3r @waynexia
+/src/query @evenyag
+
+## [Module] Distributed
+/src/common/meta @MichaelScofield
+/src/common/procedure @MichaelScofield
+/src/meta-client @MichaelScofield
+/src/meta-srv @MichaelScofield
+
+## [Module] Write Ahead Log
+/src/log-store @v0y4g3r
+/src/store-api @v0y4g3r
+
+## [Module] Metrics Engine
+/src/metric-engine @waynexia
+/src/promql @waynexia
+
+## [Module] Flow
+/src/flow @zhongzc @waynexia
|
ci
|
add CODEOWNERS file (#3691)
|
43fd87e05170a561febac3e9c96693a4db4c3132
|
2024-02-19 17:13:19
|
Yingwen
|
feat: Defines structs in the merge tree memtable (#3326)
| false
|
diff --git a/src/mito2/src/memtable.rs b/src/mito2/src/memtable.rs
index 1ce3509220e6..99fcc7591976 100644
--- a/src/mito2/src/memtable.rs
+++ b/src/mito2/src/memtable.rs
@@ -14,9 +14,10 @@
//! Memtables are write buffers for regions.
-pub mod time_series;
-
pub mod key_values;
+#[allow(dead_code)]
+pub mod merge_tree;
+pub mod time_series;
pub(crate) mod version;
use std::fmt;
diff --git a/src/mito2/src/memtable/merge_tree.rs b/src/mito2/src/memtable/merge_tree.rs
new file mode 100644
index 000000000000..6e7c0329b418
--- /dev/null
+++ b/src/mito2/src/memtable/merge_tree.rs
@@ -0,0 +1,261 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! Memtable implementation based on a merge tree.
+
+mod data;
+mod dict;
+mod metrics;
+mod partition;
+mod shard;
+mod shard_builder;
+mod tree;
+
+use std::fmt;
+use std::sync::atomic::{AtomicI64, AtomicU32, Ordering};
+use std::sync::Arc;
+
+use store_api::metadata::RegionMetadataRef;
+use store_api::storage::ColumnId;
+use table::predicate::Predicate;
+
+use crate::error::Result;
+use crate::flush::WriteBufferManagerRef;
+use crate::memtable::merge_tree::metrics::WriteMetrics;
+use crate::memtable::merge_tree::tree::MergeTree;
+use crate::memtable::{
+ AllocTracker, BoxedBatchIterator, KeyValues, Memtable, MemtableBuilder, MemtableId,
+ MemtableRef, MemtableStats,
+};
+
+/// Id of a shard, only unique inside a partition.
+type ShardId = u32;
+/// Index of a primary key in a shard.
+type PkIndex = u16;
+/// Id of a primary key inside a tree.
+#[derive(Debug, Clone, Copy, PartialEq, Eq)]
+struct PkId {
+ shard_id: ShardId,
+ pk_index: PkIndex,
+}
+
+/// Config for the merge tree memtable.
+#[derive(Debug, Clone)]
+pub struct MergeTreeConfig {
+ /// Max keys in an index shard.
+ pub index_max_keys_per_shard: usize,
+ /// Number of rows to freeze a data part.
+ pub data_freeze_threshold: usize,
+}
+
+impl Default for MergeTreeConfig {
+ fn default() -> Self {
+ Self {
+ index_max_keys_per_shard: 8192,
+ data_freeze_threshold: 102400,
+ }
+ }
+}
+
+/// Memtable based on a merge tree.
+pub struct MergeTreeMemtable {
+ id: MemtableId,
+ tree: MergeTree,
+ alloc_tracker: AllocTracker,
+ max_timestamp: AtomicI64,
+ min_timestamp: AtomicI64,
+}
+
+impl fmt::Debug for MergeTreeMemtable {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("MergeTreeMemtable")
+ .field("id", &self.id)
+ .finish()
+ }
+}
+
+impl Memtable for MergeTreeMemtable {
+ fn id(&self) -> MemtableId {
+ self.id
+ }
+
+ fn write(&self, kvs: &KeyValues) -> Result<()> {
+ // TODO(yingwen): Validate schema while inserting rows.
+
+ let mut metrics = WriteMetrics::default();
+ let res = self.tree.write(kvs, &mut metrics);
+
+ self.update_stats(&metrics);
+
+ res
+ }
+
+ fn iter(
+ &self,
+ _projection: Option<&[ColumnId]>,
+ _predicate: Option<Predicate>,
+ ) -> BoxedBatchIterator {
+ // FIXME(yingwen): Change return value to `Result<BoxedBatchIterator>`.
+ todo!()
+ }
+
+ fn is_empty(&self) -> bool {
+ self.tree.is_empty()
+ }
+
+ fn mark_immutable(&self) {
+ self.alloc_tracker.done_allocating();
+ }
+
+ fn stats(&self) -> MemtableStats {
+ let estimated_bytes = self.alloc_tracker.bytes_allocated();
+
+ if estimated_bytes == 0 {
+ // no rows ever written
+ return MemtableStats {
+ estimated_bytes,
+ time_range: None,
+ };
+ }
+
+ let ts_type = self
+ .tree
+ .metadata
+ .time_index_column()
+ .column_schema
+ .data_type
+ .clone()
+ .as_timestamp()
+ .expect("Timestamp column must have timestamp type");
+ let max_timestamp = ts_type.create_timestamp(self.max_timestamp.load(Ordering::Relaxed));
+ let min_timestamp = ts_type.create_timestamp(self.min_timestamp.load(Ordering::Relaxed));
+ MemtableStats {
+ estimated_bytes,
+ time_range: Some((min_timestamp, max_timestamp)),
+ }
+ }
+}
+
+impl MergeTreeMemtable {
+ /// Returns a new memtable.
+ pub fn new(
+ id: MemtableId,
+ metadata: RegionMetadataRef,
+ write_buffer_manager: Option<WriteBufferManagerRef>,
+ config: &MergeTreeConfig,
+ ) -> Self {
+ Self::with_tree(id, MergeTree::new(metadata, config), write_buffer_manager)
+ }
+
+ /// Creates a mutable memtable from the tree.
+ ///
+ /// It also adds the bytes used by shared parts (e.g. index) to the memory usage.
+ fn with_tree(
+ id: MemtableId,
+ tree: MergeTree,
+ write_buffer_manager: Option<WriteBufferManagerRef>,
+ ) -> Self {
+ let alloc_tracker = AllocTracker::new(write_buffer_manager);
+ // Track space allocated by the tree.
+ let allocated = tree.shared_memory_size();
+ // Here we still add the bytes of shared parts to the tracker as the old memtable
+ // will release its tracker soon.
+ alloc_tracker.on_allocation(allocated);
+
+ Self {
+ id,
+ tree,
+ alloc_tracker,
+ max_timestamp: AtomicI64::new(i64::MIN),
+ min_timestamp: AtomicI64::new(i64::MAX),
+ }
+ }
+
+ /// Updates stats of the memtable.
+ fn update_stats(&self, metrics: &WriteMetrics) {
+ self.alloc_tracker
+ .on_allocation(metrics.key_bytes + metrics.value_bytes);
+
+ loop {
+ let current_min = self.min_timestamp.load(Ordering::Relaxed);
+ if metrics.min_ts >= current_min {
+ break;
+ }
+
+ let Err(updated) = self.min_timestamp.compare_exchange(
+ current_min,
+ metrics.min_ts,
+ Ordering::Relaxed,
+ Ordering::Relaxed,
+ ) else {
+ break;
+ };
+
+ if updated == metrics.min_ts {
+ break;
+ }
+ }
+
+ loop {
+ let current_max = self.max_timestamp.load(Ordering::Relaxed);
+ if metrics.max_ts <= current_max {
+ break;
+ }
+
+ let Err(updated) = self.max_timestamp.compare_exchange(
+ current_max,
+ metrics.max_ts,
+ Ordering::Relaxed,
+ Ordering::Relaxed,
+ ) else {
+ break;
+ };
+
+ if updated == metrics.max_ts {
+ break;
+ }
+ }
+ }
+}
+
+/// Builder to build a [MergeTreeMemtable].
+#[derive(Debug, Default)]
+pub struct MergeTreeMemtableBuilder {
+ id: AtomicU32,
+ write_buffer_manager: Option<WriteBufferManagerRef>,
+ config: MergeTreeConfig,
+}
+
+impl MergeTreeMemtableBuilder {
+ /// Creates a new builder with specific `write_buffer_manager`.
+ pub fn new(write_buffer_manager: Option<WriteBufferManagerRef>) -> Self {
+ Self {
+ id: AtomicU32::new(0),
+ write_buffer_manager,
+ config: MergeTreeConfig::default(),
+ }
+ }
+}
+
+impl MemtableBuilder for MergeTreeMemtableBuilder {
+ fn build(&self, metadata: &RegionMetadataRef) -> MemtableRef {
+ let id = self.id.fetch_add(1, Ordering::Relaxed);
+ Arc::new(MergeTreeMemtable::new(
+ id,
+ metadata.clone(),
+ self.write_buffer_manager.clone(),
+ &self.config,
+ ))
+ }
+}
diff --git a/src/mito2/src/memtable/merge_tree/data.rs b/src/mito2/src/memtable/merge_tree/data.rs
new file mode 100644
index 000000000000..3f2627e9d46c
--- /dev/null
+++ b/src/mito2/src/memtable/merge_tree/data.rs
@@ -0,0 +1,21 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! Data part of a shard.
+
+/// Buffer to store columns not in the primary key.
+pub struct DataBuffer {}
+
+/// Data parts under a shard.
+pub struct DataParts {}
diff --git a/src/mito2/src/memtable/merge_tree/dict.rs b/src/mito2/src/memtable/merge_tree/dict.rs
new file mode 100644
index 000000000000..d8e2ba8712ac
--- /dev/null
+++ b/src/mito2/src/memtable/merge_tree/dict.rs
@@ -0,0 +1,28 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! Key dictionary of a shard.
+
+use std::sync::Arc;
+
+/// Builder to build a key dictionary.
+pub struct KeyDictBuilder {}
+
+/// Buffer to store unsorted primary keys.
+pub struct KeyBuffer {}
+
+/// A key dictionary.
+pub struct KeyDict {}
+
+pub type KeyDictRef = Arc<KeyDict>;
diff --git a/src/mito2/src/memtable/merge_tree/metrics.rs b/src/mito2/src/memtable/merge_tree/metrics.rs
new file mode 100644
index 000000000000..7a2e37359a5b
--- /dev/null
+++ b/src/mito2/src/memtable/merge_tree/metrics.rs
@@ -0,0 +1,38 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! Internal metrics of the memtable.
+
+/// Metrics of writing the merge tree.
+pub struct WriteMetrics {
+ /// Size allocated by keys.
+ pub key_bytes: usize,
+ /// Size allocated by values.
+ pub value_bytes: usize,
+ /// Minimum timestamp.
+ pub min_ts: i64,
+ /// Maximum timestamp
+ pub max_ts: i64,
+}
+
+impl Default for WriteMetrics {
+ fn default() -> Self {
+ Self {
+ key_bytes: 0,
+ value_bytes: 0,
+ min_ts: i64::MAX,
+ max_ts: i64::MIN,
+ }
+ }
+}
diff --git a/src/mito2/src/memtable/merge_tree/partition.rs b/src/mito2/src/memtable/merge_tree/partition.rs
new file mode 100644
index 000000000000..0a5921c0ca5c
--- /dev/null
+++ b/src/mito2/src/memtable/merge_tree/partition.rs
@@ -0,0 +1,42 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! Partition of a merge tree.
+//!
+//! We only support partitioning the tree by pre-defined internal columns.
+
+use std::sync::{Arc, RwLock};
+
+use crate::memtable::merge_tree::shard::Shard;
+use crate::memtable::merge_tree::shard_builder::ShardBuilder;
+use crate::memtable::merge_tree::ShardId;
+
+/// Key of a partition.
+pub type PartitionKey = u32;
+
+/// A tree partition.
+pub struct Partition {
+ inner: RwLock<Inner>,
+}
+
+pub type PartitionRef = Arc<Partition>;
+
+/// Inner struct of the partition.
+struct Inner {
+ /// Shard whose dictionary is active.
+ shard_builder: ShardBuilder,
+ next_shard_id: ShardId,
+ /// Shards with frozon dictionary.
+ shards: Vec<Shard>,
+}
diff --git a/src/mito2/src/memtable/merge_tree/shard.rs b/src/mito2/src/memtable/merge_tree/shard.rs
new file mode 100644
index 000000000000..d7fb74b6bafb
--- /dev/null
+++ b/src/mito2/src/memtable/merge_tree/shard.rs
@@ -0,0 +1,28 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! Shard in a partition.
+
+use crate::memtable::merge_tree::data::DataParts;
+use crate::memtable::merge_tree::dict::KeyDictRef;
+use crate::memtable::merge_tree::ShardId;
+
+/// Shard stores data related to the same key dictionary.
+pub struct Shard {
+ shard_id: ShardId,
+ /// Key dictionary of the shard. `None` if the schema of the tree doesn't have a primary key.
+ key_dict: Option<KeyDictRef>,
+ /// Data in the shard.
+ data_parts: DataParts,
+}
diff --git a/src/mito2/src/memtable/merge_tree/shard_builder.rs b/src/mito2/src/memtable/merge_tree/shard_builder.rs
new file mode 100644
index 000000000000..a66366204989
--- /dev/null
+++ b/src/mito2/src/memtable/merge_tree/shard_builder.rs
@@ -0,0 +1,27 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! Builder of a shard.
+
+use crate::memtable::merge_tree::data::DataBuffer;
+use crate::memtable::merge_tree::dict::KeyDictBuilder;
+
+/// Builder to write keys and data to a shard that the key dictionary
+/// is still active.
+pub struct ShardBuilder {
+ /// Builder for the key dictionary.
+ dict_builder: KeyDictBuilder,
+ /// Buffer to store data.
+ data_buffer: DataBuffer,
+}
diff --git a/src/mito2/src/memtable/merge_tree/tree.rs b/src/mito2/src/memtable/merge_tree/tree.rs
new file mode 100644
index 000000000000..39b6fbea9887
--- /dev/null
+++ b/src/mito2/src/memtable/merge_tree/tree.rs
@@ -0,0 +1,101 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! Implementation of the merge tree.
+
+use std::collections::BTreeMap;
+use std::sync::{Arc, RwLock};
+
+use store_api::metadata::RegionMetadataRef;
+use store_api::storage::ColumnId;
+use table::predicate::Predicate;
+
+use crate::error::Result;
+use crate::memtable::merge_tree::metrics::WriteMetrics;
+use crate::memtable::merge_tree::partition::{PartitionKey, PartitionRef};
+use crate::memtable::merge_tree::MergeTreeConfig;
+use crate::memtable::{BoxedBatchIterator, KeyValues};
+use crate::row_converter::{McmpRowCodec, SortField};
+
+/// The merge tree.
+pub struct MergeTree {
+ /// Config of the tree.
+ config: MergeTreeConfig,
+ /// Metadata of the region.
+ pub(crate) metadata: RegionMetadataRef,
+ /// Primary key codec.
+ row_codec: Arc<McmpRowCodec>,
+ /// Partitions in the tree.
+ partitions: RwLock<BTreeMap<PartitionKey, PartitionRef>>,
+}
+
+impl MergeTree {
+ /// Creates a new merge tree.
+ pub fn new(metadata: RegionMetadataRef, config: &MergeTreeConfig) -> MergeTree {
+ let row_codec = McmpRowCodec::new(
+ metadata
+ .primary_key_columns()
+ .map(|c| SortField::new(c.column_schema.data_type.clone()))
+ .collect(),
+ );
+
+ MergeTree {
+ config: config.clone(),
+ metadata,
+ row_codec: Arc::new(row_codec),
+ partitions: Default::default(),
+ }
+ }
+
+ // TODO(yingwen): The size computed from values is inaccurate.
+ /// Write key-values into the tree.
+ ///
+ /// # Panics
+ /// Panics if the tree is immutable (frozen).
+ pub fn write(&self, _kvs: &KeyValues, _metrics: &mut WriteMetrics) -> Result<()> {
+ todo!()
+ }
+
+ /// Scans the tree.
+ pub fn scan(
+ &self,
+ _projection: Option<&[ColumnId]>,
+ _predicate: Option<Predicate>,
+ ) -> Result<BoxedBatchIterator> {
+ todo!()
+ }
+
+ /// Returns true if the tree is empty.
+ pub fn is_empty(&self) -> bool {
+ todo!()
+ }
+
+ /// Marks the tree as immutable.
+ ///
+ /// Once the tree becomes immutable, callers should not write to it again.
+ pub fn freeze(&self) -> Result<()> {
+ todo!()
+ }
+
+ /// Forks an immutable tree. Returns a mutable tree that inherits the index
+ /// of this tree.
+ pub fn fork(&self, _metadata: RegionMetadataRef) -> MergeTree {
+ todo!()
+ }
+
+ /// Returns the memory size shared by forked trees.
+ pub fn shared_memory_size(&self) -> usize {
+ todo!()
+ }
+}
|
feat
|
Defines structs in the merge tree memtable (#3326)
|
9e1af79637fff334a13682882635ae7631ec1380
|
2024-05-22 12:20:12
|
shuiyisong
|
chore: add ttl to write_cache (#4010)
| false
|
diff --git a/config/config.md b/config/config.md
index fa58b074645b..b0c95f4e2ada 100644
--- a/config/config.md
+++ b/config/config.md
@@ -102,6 +102,10 @@
| `region_engine.mito.sst_meta_cache_size` | String | `128MB` | Cache size for SST metadata. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/32 of OS memory with a max limitation of 128MB. |
| `region_engine.mito.vector_cache_size` | String | `512MB` | Cache size for vectors and arrow arrays. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
| `region_engine.mito.page_cache_size` | String | `512MB` | Cache size for pages of SST row groups. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
+| `region_engine.mito.enable_experimental_write_cache` | Bool | `false` | Whether to enable the experimental write cache. |
+| `region_engine.mito.experimental_write_cache_path` | String | `""` | File system path for write cache, defaults to `{data_home}/write_cache`. |
+| `region_engine.mito.experimental_write_cache_size` | String | `512MB` | Capacity for write cache. |
+| `region_engine.mito.experimental_write_cache_ttl` | String | `1h` | TTL for write cache. |
| `region_engine.mito.sst_write_buffer_size` | String | `8MB` | Buffer size for SST writing. |
| `region_engine.mito.scan_parallelism` | Integer | `0` | Parallelism to scan a region (default: 1/4 of cpu cores).<br/>- `0`: using the default value (1/4 of cpu cores).<br/>- `1`: scan in current thread.<br/>- `n`: scan in parallelism n. |
| `region_engine.mito.parallel_scan_channel_size` | Integer | `32` | Capacity of the channel to send data from parallel scan tasks to the main task. |
@@ -350,6 +354,10 @@
| `region_engine.mito.sst_meta_cache_size` | String | `128MB` | Cache size for SST metadata. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/32 of OS memory with a max limitation of 128MB. |
| `region_engine.mito.vector_cache_size` | String | `512MB` | Cache size for vectors and arrow arrays. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
| `region_engine.mito.page_cache_size` | String | `512MB` | Cache size for pages of SST row groups. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
+| `region_engine.mito.enable_experimental_write_cache` | Bool | `false` | Whether to enable the experimental write cache. |
+| `region_engine.mito.experimental_write_cache_path` | String | `""` | File system path for write cache, defaults to `{data_home}/write_cache`. |
+| `region_engine.mito.experimental_write_cache_size` | String | `512MB` | Capacity for write cache. |
+| `region_engine.mito.experimental_write_cache_ttl` | String | `1h` | TTL for write cache. |
| `region_engine.mito.sst_write_buffer_size` | String | `8MB` | Buffer size for SST writing. |
| `region_engine.mito.scan_parallelism` | Integer | `0` | Parallelism to scan a region (default: 1/4 of cpu cores).<br/>- `0`: using the default value (1/4 of cpu cores).<br/>- `1`: scan in current thread.<br/>- `n`: scan in parallelism n. |
| `region_engine.mito.parallel_scan_channel_size` | Integer | `32` | Capacity of the channel to send data from parallel scan tasks to the main task. |
diff --git a/config/datanode.example.toml b/config/datanode.example.toml
index 55e0c3638379..d1849048778c 100644
--- a/config/datanode.example.toml
+++ b/config/datanode.example.toml
@@ -324,6 +324,18 @@ vector_cache_size = "512MB"
## If not set, it's default to 1/16 of OS memory with a max limitation of 512MB.
page_cache_size = "512MB"
+## Whether to enable the experimental write cache.
+enable_experimental_write_cache = false
+
+## File system path for write cache, defaults to `{data_home}/write_cache`.
+experimental_write_cache_path = ""
+
+## Capacity for write cache.
+experimental_write_cache_size = "512MB"
+
+## TTL for write cache.
+experimental_write_cache_ttl = "1h"
+
## Buffer size for SST writing.
sst_write_buffer_size = "8MB"
diff --git a/config/standalone.example.toml b/config/standalone.example.toml
index 2ed47506b6cc..7f3daedb46d7 100644
--- a/config/standalone.example.toml
+++ b/config/standalone.example.toml
@@ -367,6 +367,18 @@ vector_cache_size = "512MB"
## If not set, it's default to 1/16 of OS memory with a max limitation of 512MB.
page_cache_size = "512MB"
+## Whether to enable the experimental write cache.
+enable_experimental_write_cache = false
+
+## File system path for write cache, defaults to `{data_home}/write_cache`.
+experimental_write_cache_path = ""
+
+## Capacity for write cache.
+experimental_write_cache_size = "512MB"
+
+## TTL for write cache.
+experimental_write_cache_ttl = "1h"
+
## Buffer size for SST writing.
sst_write_buffer_size = "8MB"
diff --git a/src/mito2/src/cache/file_cache.rs b/src/mito2/src/cache/file_cache.rs
index 890d0b773975..0afbf5b6695e 100644
--- a/src/mito2/src/cache/file_cache.rs
+++ b/src/mito2/src/cache/file_cache.rs
@@ -16,7 +16,7 @@
use std::ops::Range;
use std::sync::Arc;
-use std::time::Instant;
+use std::time::{Duration, Instant};
use bytes::Bytes;
use common_base::readable_size::ReadableSize;
@@ -56,9 +56,13 @@ pub(crate) type FileCacheRef = Arc<FileCache>;
impl FileCache {
/// Creates a new file cache.
- pub(crate) fn new(local_store: ObjectStore, capacity: ReadableSize) -> FileCache {
+ pub(crate) fn new(
+ local_store: ObjectStore,
+ capacity: ReadableSize,
+ ttl: Option<Duration>,
+ ) -> FileCache {
let cache_store = local_store.clone();
- let memory_index = Cache::builder()
+ let mut builder = Cache::builder()
.weigher(|_key, value: &IndexValue| -> u32 {
// We only measure space on local store.
value.file_size
@@ -87,8 +91,11 @@ impl FileCache {
}
}
.boxed()
- })
- .build();
+ });
+ if let Some(ttl) = ttl {
+ builder = builder.time_to_idle(ttl);
+ }
+ let memory_index = builder.build();
FileCache {
local_store,
memory_index,
@@ -376,12 +383,52 @@ mod tests {
ObjectStore::new(builder).unwrap().finish()
}
+ #[tokio::test]
+ async fn test_file_cache_ttl() {
+ let dir = create_temp_dir("");
+ let local_store = new_fs_store(dir.path().to_str().unwrap());
+
+ let cache = FileCache::new(
+ local_store.clone(),
+ ReadableSize::mb(10),
+ Some(Duration::from_millis(5)),
+ );
+ let region_id = RegionId::new(2000, 0);
+ let file_id = FileId::random();
+ let key = IndexKey::new(region_id, file_id, FileType::Parquet);
+ let file_path = cache.cache_file_path(key);
+
+ // Get an empty file.
+ assert!(cache.reader(key).await.is_none());
+
+ // Write a file.
+ local_store
+ .write(&file_path, b"hello".as_slice())
+ .await
+ .unwrap();
+
+ // Add to the cache.
+ cache
+ .put(
+ IndexKey::new(region_id, file_id, FileType::Parquet),
+ IndexValue { file_size: 5 },
+ )
+ .await;
+
+ let exist = cache.reader(key).await;
+ assert!(exist.is_some());
+ tokio::time::sleep(Duration::from_millis(10)).await;
+ cache.memory_index.run_pending_tasks().await;
+ let non = cache.reader(key).await;
+ assert!(non.is_none());
+ }
+
#[tokio::test]
async fn test_file_cache_basic() {
let dir = create_temp_dir("");
let local_store = new_fs_store(dir.path().to_str().unwrap());
- let cache = FileCache::new(local_store.clone(), ReadableSize::mb(10));
+ let cache = FileCache::new(local_store.clone(), ReadableSize::mb(10), None);
let region_id = RegionId::new(2000, 0);
let file_id = FileId::random();
let key = IndexKey::new(region_id, file_id, FileType::Parquet);
@@ -430,7 +477,7 @@ mod tests {
let dir = create_temp_dir("");
let local_store = new_fs_store(dir.path().to_str().unwrap());
- let cache = FileCache::new(local_store.clone(), ReadableSize::mb(10));
+ let cache = FileCache::new(local_store.clone(), ReadableSize::mb(10), None);
let region_id = RegionId::new(2000, 0);
let file_id = FileId::random();
let key = IndexKey::new(region_id, file_id, FileType::Parquet);
@@ -462,7 +509,7 @@ mod tests {
async fn test_file_cache_recover() {
let dir = create_temp_dir("");
let local_store = new_fs_store(dir.path().to_str().unwrap());
- let cache = FileCache::new(local_store.clone(), ReadableSize::mb(10));
+ let cache = FileCache::new(local_store.clone(), ReadableSize::mb(10), None);
let region_id = RegionId::new(2000, 0);
let file_type = FileType::Parquet;
@@ -488,7 +535,7 @@ mod tests {
}
// Recover the cache.
- let cache = FileCache::new(local_store.clone(), ReadableSize::mb(10));
+ let cache = FileCache::new(local_store.clone(), ReadableSize::mb(10), None);
// No entry before recovery.
assert!(cache
.reader(IndexKey::new(region_id, file_ids[0], file_type))
@@ -513,7 +560,7 @@ mod tests {
async fn test_file_cache_read_ranges() {
let dir = create_temp_dir("");
let local_store = new_fs_store(dir.path().to_str().unwrap());
- let file_cache = FileCache::new(local_store.clone(), ReadableSize::mb(10));
+ let file_cache = FileCache::new(local_store.clone(), ReadableSize::mb(10), None);
let region_id = RegionId::new(2000, 0);
let file_id = FileId::random();
let key = IndexKey::new(region_id, file_id, FileType::Parquet);
diff --git a/src/mito2/src/cache/write_cache.rs b/src/mito2/src/cache/write_cache.rs
index c13e32e64155..23a84194695d 100644
--- a/src/mito2/src/cache/write_cache.rs
+++ b/src/mito2/src/cache/write_cache.rs
@@ -15,6 +15,7 @@
//! A write-through cache for remote object stores.
use std::sync::Arc;
+use std::time::Duration;
use common_base::readable_size::ReadableSize;
use common_telemetry::{debug, info};
@@ -55,9 +56,10 @@ impl WriteCache {
local_store: ObjectStore,
object_store_manager: ObjectStoreManagerRef,
cache_capacity: ReadableSize,
+ ttl: Option<Duration>,
intermediate_manager: IntermediateManager,
) -> Result<Self> {
- let file_cache = FileCache::new(local_store, cache_capacity);
+ let file_cache = FileCache::new(local_store, cache_capacity, ttl);
file_cache.recover().await?;
Ok(Self {
@@ -72,6 +74,7 @@ impl WriteCache {
cache_dir: &str,
object_store_manager: ObjectStoreManagerRef,
cache_capacity: ReadableSize,
+ ttl: Option<Duration>,
intermediate_manager: IntermediateManager,
) -> Result<Self> {
info!("Init write cache on {cache_dir}, capacity: {cache_capacity}");
@@ -81,6 +84,7 @@ impl WriteCache {
local_store,
object_store_manager,
cache_capacity,
+ ttl,
intermediate_manager,
)
.await
diff --git a/src/mito2/src/config.rs b/src/mito2/src/config.rs
index 60d079c5abae..7d633765a0d9 100644
--- a/src/mito2/src/config.rs
+++ b/src/mito2/src/config.rs
@@ -87,6 +87,9 @@ pub struct MitoConfig {
pub experimental_write_cache_path: String,
/// Capacity for write cache.
pub experimental_write_cache_size: ReadableSize,
+ /// TTL for write cache.
+ #[serde(with = "humantime_serde")]
+ pub experimental_write_cache_ttl: Option<Duration>,
// Other configs:
/// Buffer size for SST writing.
@@ -126,6 +129,7 @@ impl Default for MitoConfig {
enable_experimental_write_cache: false,
experimental_write_cache_path: String::new(),
experimental_write_cache_size: ReadableSize::mb(512),
+ experimental_write_cache_ttl: Some(Duration::from_secs(60 * 60)),
sst_write_buffer_size: DEFAULT_WRITE_BUFFER_SIZE,
scan_parallelism: divide_num_cpus(4),
parallel_scan_channel_size: DEFAULT_SCAN_CHANNEL_SIZE,
@@ -228,10 +232,16 @@ impl MitoConfig {
/// Enable experimental write cache.
#[cfg(test)]
- pub fn enable_write_cache(mut self, path: String, size: ReadableSize) -> Self {
+ pub fn enable_write_cache(
+ mut self,
+ path: String,
+ size: ReadableSize,
+ ttl: Option<Duration>,
+ ) -> Self {
self.enable_experimental_write_cache = true;
self.experimental_write_cache_path = path;
self.experimental_write_cache_size = size;
+ self.experimental_write_cache_ttl = ttl;
self
}
}
diff --git a/src/mito2/src/engine/basic_test.rs b/src/mito2/src/engine/basic_test.rs
index dbe33ff37f89..6d3fac897eda 100644
--- a/src/mito2/src/engine/basic_test.rs
+++ b/src/mito2/src/engine/basic_test.rs
@@ -565,7 +565,7 @@ async fn test_engine_with_write_cache() {
let mut env = TestEnv::new();
let path = env.data_home().to_str().unwrap().to_string();
- let mito_config = MitoConfig::default().enable_write_cache(path, ReadableSize::mb(512));
+ let mito_config = MitoConfig::default().enable_write_cache(path, ReadableSize::mb(512), None);
let engine = env.create_engine(mito_config).await;
let region_id = RegionId::new(1, 1);
diff --git a/src/mito2/src/test_util.rs b/src/mito2/src/test_util.rs
index 27a746f3ba6a..78dbd1c3362b 100644
--- a/src/mito2/src/test_util.rs
+++ b/src/mito2/src/test_util.rs
@@ -377,9 +377,10 @@ impl TestEnv {
.unwrap();
let object_store_manager = self.get_object_store_manager().unwrap();
- let write_cache = WriteCache::new(local_store, object_store_manager, capacity, intm_mgr)
- .await
- .unwrap();
+ let write_cache =
+ WriteCache::new(local_store, object_store_manager, capacity, None, intm_mgr)
+ .await
+ .unwrap();
Arc::new(write_cache)
}
diff --git a/src/mito2/src/worker.rs b/src/mito2/src/worker.rs
index ca6eaa6bfe95..7483c73bad31 100644
--- a/src/mito2/src/worker.rs
+++ b/src/mito2/src/worker.rs
@@ -320,6 +320,7 @@ async fn write_cache_from_config(
&config.experimental_write_cache_path,
object_store_manager,
config.experimental_write_cache_size,
+ config.experimental_write_cache_ttl,
intermediate_manager,
)
.await?;
diff --git a/tests-integration/tests/http.rs b/tests-integration/tests/http.rs
index 14c3e8cac265..49132fbc7bf2 100644
--- a/tests-integration/tests/http.rs
+++ b/tests-integration/tests/http.rs
@@ -814,6 +814,7 @@ auto_flush_interval = "30m"
enable_experimental_write_cache = false
experimental_write_cache_path = ""
experimental_write_cache_size = "512MiB"
+experimental_write_cache_ttl = "1h"
sst_write_buffer_size = "8MiB"
parallel_scan_channel_size = 32
allow_stale_entries = false
|
chore
|
add ttl to write_cache (#4010)
|
9718aa17c90c279dee8e1e02f28983551f50a75a
|
2023-08-04 14:38:07
|
Ruihang Xia
|
feat: define region group and sequence (#2100)
| false
|
diff --git a/src/meta-srv/src/error.rs b/src/meta-srv/src/error.rs
index d2d0be501720..41b41d96d156 100644
--- a/src/meta-srv/src/error.rs
+++ b/src/meta-srv/src/error.rs
@@ -469,6 +469,9 @@ pub enum Error {
source: SendError<Message>,
location: Location,
},
+
+ #[snafu(display("Too many partitions, location: {}", location))]
+ TooManyPartitions { location: Location },
}
pub type Result<T> = std::result::Result<T, Error>;
@@ -525,7 +528,8 @@ impl ErrorExt for Error {
| Error::ParseNum { .. }
| Error::UnsupportedSelectorType { .. }
| Error::InvalidArguments { .. }
- | Error::InvalidHeartbeatRequest { .. } => StatusCode::InvalidArguments,
+ | Error::InvalidHeartbeatRequest { .. }
+ | Error::TooManyPartitions { .. } => StatusCode::InvalidArguments,
Error::LeaseKeyFromUtf8 { .. }
| Error::LeaseValueFromUtf8 { .. }
| Error::StatKeyFromUtf8 { .. }
diff --git a/src/meta-srv/src/service/ddl.rs b/src/meta-srv/src/service/ddl.rs
index d1f560de915e..faf840084a74 100644
--- a/src/meta-srv/src/service/ddl.rs
+++ b/src/meta-srv/src/service/ddl.rs
@@ -24,14 +24,15 @@ use common_meta::rpc::ddl::{
use common_meta::rpc::router;
use common_meta::table_name::TableName;
use common_telemetry::{info, warn};
-use snafu::{OptionExt, ResultExt};
+use snafu::{ensure, OptionExt, ResultExt};
+use store_api::storage::MAX_REGION_SEQ;
use table::metadata::RawTableInfo;
use tonic::{Request, Response};
use super::store::kv::KvStoreRef;
use super::GrpcResult;
use crate::ddl::DdlManagerRef;
-use crate::error::{self, Result, TableMetadataManagerSnafu};
+use crate::error::{self, Result, TableMetadataManagerSnafu, TooManyPartitionsSnafu};
use crate::metasrv::{MetaSrv, SelectorContext, SelectorRef};
use crate::sequence::SequenceRef;
use crate::table_routes::get_table_route_value;
@@ -192,6 +193,10 @@ async fn handle_create_table_route(
..Default::default()
};
+ ensure!(
+ partitions.len() <= MAX_REGION_SEQ as usize,
+ TooManyPartitionsSnafu
+ );
let region_routes = partitions
.into_iter()
.enumerate()
diff --git a/src/store-api/src/storage/descriptors.rs b/src/store-api/src/storage/descriptors.rs
index efa87ed1dea7..de0ce1739f95 100644
--- a/src/store-api/src/storage/descriptors.rs
+++ b/src/store-api/src/storage/descriptors.rs
@@ -19,16 +19,36 @@ use serde::{Deserialize, Serialize};
use crate::storage::{consts, ColumnDefaultConstraint, ColumnSchema, ConcreteDataType};
-/// Id of column, unique in each region.
+/// Id of column. Unique in each region.
pub type ColumnId = u32;
-/// Id of column family, unique in each region.
+/// Id of column family. Unique in each region.
pub type ColumnFamilyId = u32;
-/// Sequence number of regions under the same table.
+/// Group number of one region. Unique in each region.
+pub type RegionGroup = u8;
+/// Sequence number of region inside one table. Unique in each table.
+/// The first 8 bits are preserved for [RegionGroup].
+pub type RegionSeq = u32;
+/// Id of regions under the same table. Unique in each table.
+/// Is composed by [RegionGroup] and [RegionSeq].
pub type RegionNumber = u32;
-/// Id of table.
+/// Id of table. Universal unique.
pub type TableId = u32;
-/// Id of the region. It's generated by concatenating table id and region number.
+const REGION_GROUP_MASK: u32 = 0b1111_1111 << 24;
+const REGION_SEQ_MASK: u32 = (0b1 << 24) - 1;
+
+/// The max valid region sequence number.
+pub const MAX_REGION_SEQ: u32 = REGION_SEQ_MASK;
+
+/// Id of the region. It's generated by concatenating table id, region group and region number.
+///
+/// ```plaintext
+/// 63 31 23 0
+/// ┌────────────────────────────────────┬──────────┬──────────────────┐
+/// │ Table Id(32) │ Group(8) │ Sequence(24) │
+/// └────────────────────────────────────┴──────────┴──────────────────┘
+/// Region Number(32)
+/// ```
#[derive(Default, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)]
pub struct RegionId(u64);
@@ -48,6 +68,16 @@ impl RegionId {
self.0 as RegionNumber
}
+ /// Returns the group number of the region
+ pub const fn region_group(&self) -> RegionGroup {
+ ((self.region_number() & REGION_GROUP_MASK) >> 24) as RegionGroup
+ }
+
+ /// Return the sequence number of the region
+ pub const fn region_sequence(&self) -> RegionSeq {
+ self.region_number() & REGION_SEQ_MASK
+ }
+
/// Returns the region id as u64.
pub const fn as_u64(&self) -> u64 {
self.0
@@ -57,6 +87,19 @@ impl RegionId {
pub const fn from_u64(id: u64) -> RegionId {
RegionId(id)
}
+
+ #[cfg(test)]
+ pub const fn with_group_and_seq(
+ table_id: TableId,
+ group: RegionGroup,
+ seq: RegionSeq,
+ ) -> RegionId {
+ RegionId(
+ ((table_id as u64) << 32)
+ | ((group as u32) << 24) as u64
+ | (seq & REGION_SEQ_MASK) as u64,
+ )
+ }
}
impl fmt::Debug for RegionId {
@@ -411,4 +454,22 @@ mod tests {
let parsed: RegionId = serde_json::from_str(&json).unwrap();
assert_eq!(region_id, parsed);
}
+
+ #[test]
+ fn test_retrieve_region_group_and_seq() {
+ let region_id = RegionId::with_group_and_seq(111, 222, 333);
+ assert_eq!(111, region_id.table_id());
+ assert_eq!(222, region_id.region_group());
+ assert_eq!(333, region_id.region_sequence());
+
+ let expected_region_number = 222 << 24 | 333;
+ assert_eq!(expected_region_number, region_id.region_number());
+ }
+
+ #[test]
+ fn test_invalid_large_region_sequence() {
+ // region sequence larger than `MAX_REGION_SEQ` will be masked into valid range
+ let region_id = RegionId::with_group_and_seq(111, 222, u32::MAX);
+ assert_eq!(MAX_REGION_SEQ, region_id.region_sequence());
+ }
}
|
feat
|
define region group and sequence (#2100)
|
65f5349767704139678f32a44824bdeb3441a9e7
|
2023-07-10 17:55:33
|
Yingwen
|
feat(mito2): Define basic structs for MitoEngine (#1928)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index 3d9eccf167f0..59daaa6ea897 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -228,6 +228,20 @@ version = "0.1.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b3f9eb837c6a783fbf002e3e5cc7925a3aa6893d6d42f9169517528983777590"
+[[package]]
+name = "aquamarine"
+version = "0.3.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "df752953c49ce90719c7bf1fc587bc8227aed04732ea0c0f85e5397d7fdbd1a1"
+dependencies = [
+ "include_dir",
+ "itertools",
+ "proc-macro-error",
+ "proc-macro2",
+ "quote",
+ "syn 1.0.109",
+]
+
[[package]]
name = "arc-swap"
version = "1.6.0"
@@ -4428,6 +4442,25 @@ dependencies = [
"hashbrown 0.12.3",
]
+[[package]]
+name = "include_dir"
+version = "0.7.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "18762faeff7122e89e0857b02f7ce6fcc0d101d5e9ad2ad7846cc01d61b7f19e"
+dependencies = [
+ "include_dir_macros",
+]
+
+[[package]]
+name = "include_dir_macros"
+version = "0.7.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b139284b5cf57ecfa712bcc66950bb635b31aff41c188e8a4cfc758eca374a3f"
+dependencies = [
+ "proc-macro2",
+ "quote",
+]
+
[[package]]
name = "indexmap"
version = "1.9.3"
@@ -5397,6 +5430,13 @@ dependencies = [
[[package]]
name = "mito2"
version = "0.3.2"
+dependencies = [
+ "aquamarine",
+ "common-error",
+ "object-store",
+ "snafu",
+ "store-api",
+]
[[package]]
name = "moka"
diff --git a/src/mito2/Cargo.toml b/src/mito2/Cargo.toml
index 7eba8be440c6..b1b0e55e37ca 100644
--- a/src/mito2/Cargo.toml
+++ b/src/mito2/Cargo.toml
@@ -5,3 +5,8 @@ edition.workspace = true
license.workspace = true
[dependencies]
+aquamarine = "0.3"
+common-error = { path = "../common/error" }
+object-store = { path = "../object-store" }
+snafu.workspace = true
+store-api = { path = "../store-api" }
diff --git a/src/mito2/src/config.rs b/src/mito2/src/config.rs
new file mode 100644
index 000000000000..87c25cd3d31c
--- /dev/null
+++ b/src/mito2/src/config.rs
@@ -0,0 +1,19 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! Configurations.
+
+/// Configuration for [MitoEngine](crate::engine::MitoEngine).
+#[derive(Debug)]
+pub struct MitoConfig {}
diff --git a/src/mito2/src/engine.rs b/src/mito2/src/engine.rs
index 51fea657cc85..c5e66af2b517 100644
--- a/src/mito2/src/engine.rs
+++ b/src/mito2/src/engine.rs
@@ -12,6 +12,39 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+use std::sync::Arc;
+
+use object_store::ObjectStore;
+
+use crate::config::MitoConfig;
+use crate::worker::WorkerGroup;
+
/// Region engine implementation for timeseries data.
#[derive(Clone)]
-pub struct MitoEngine {}
+pub struct MitoEngine {
+ inner: Arc<EngineInner>,
+}
+
+impl MitoEngine {
+ /// Returns a new [MitoEngine] with specific `config`, `log_store` and `object_store`.
+ pub fn new<S>(config: MitoConfig, log_store: S, object_store: ObjectStore) -> MitoEngine {
+ MitoEngine {
+ inner: Arc::new(EngineInner::new(config, log_store, object_store)),
+ }
+ }
+}
+
+/// Inner struct of [MitoEngine].
+struct EngineInner {
+ /// Region workers group.
+ workers: WorkerGroup,
+}
+
+impl EngineInner {
+ /// Returns a new [EngineInner] with specific `config`, `log_store` and `object_store`.
+ fn new<S>(_config: MitoConfig, _log_store: S, _object_store: ObjectStore) -> EngineInner {
+ EngineInner {
+ workers: WorkerGroup::default(),
+ }
+ }
+}
diff --git a/src/mito2/src/error.rs b/src/mito2/src/error.rs
new file mode 100644
index 000000000000..f751c4ccaf8b
--- /dev/null
+++ b/src/mito2/src/error.rs
@@ -0,0 +1,21 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! Error of mito engine.
+
+use snafu::Snafu;
+
+#[derive(Debug, Snafu)]
+#[snafu(visibility(pub))]
+pub enum Error {}
diff --git a/src/mito2/src/lib.rs b/src/mito2/src/lib.rs
index 8f0d0d0f9a8b..4636c996276d 100644
--- a/src/mito2/src/lib.rs
+++ b/src/mito2/src/lib.rs
@@ -12,4 +12,178 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+//! # Mito
+//!
+//! Mito is the a region engine to store timeseries data.
+
+// TODO(yingwen): Remove all `allow(dead_code)` after finish refactoring mito.
+pub mod config;
+#[allow(dead_code)]
pub mod engine;
+pub mod error;
+#[allow(dead_code)]
+mod region;
+#[allow(dead_code)]
+mod worker;
+
+#[cfg_attr(doc, aquamarine::aquamarine)]
+/// # Mito developer document
+///
+/// ## Engine
+///
+/// Engine hierarchy:
+///
+/// ```mermaid
+/// classDiagram
+/// class MitoEngine {
+/// -WorkerGroup workers
+/// }
+/// class MitoRegion {
+/// +VersionControlRef version_control
+/// -RegionId region_id
+/// -String manifest_dir
+/// -AtomicI64 last_flush_millis
+/// +region_id() RegionId
+/// +scan() ChunkReaderImpl
+/// }
+/// class RegionMap {
+/// -HashMap<RegionId, MitoRegionRef> regions
+/// }
+/// class ChunkReaderImpl
+///
+/// class WorkerGroup {
+/// -Vec~RegionWorker~ workers
+/// }
+/// class RegionWorker {
+/// -RegionMap regions
+/// -Sender sender
+/// -JoinHandle handle
+/// }
+/// class RegionWorkerThread~LogStore~ {
+/// -RegionMap regions
+/// -Receiver receiver
+/// -Wal~LogStore~ wal
+/// -ObjectStore object_store
+/// -MemtableBuilderRef memtable_builder
+/// -FlushSchedulerRef~LogStore~ flush_scheduler
+/// -FlushStrategy flush_strategy
+/// -CompactionSchedulerRef~LogStore~ compaction_scheduler
+/// -FilePurgerRef file_purger
+/// }
+/// class Wal~LogStore~ {
+/// -LogStore log_store
+/// }
+/// class MitoConfig
+///
+/// MitoEngine o-- MitoConfig
+/// MitoEngine o-- MitoRegion
+/// MitoEngine o-- WorkerGroup
+/// MitoRegion o-- VersionControl
+/// MitoRegion -- ChunkReaderImpl
+/// WorkerGroup o-- RegionWorker
+/// RegionWorker o-- RegionMap
+/// RegionWorker -- RegionWorkerThread~LogStore~
+/// RegionWorkerThread~LogStore~ o-- RegionMap
+/// RegionWorkerThread~LogStore~ o-- Wal~LogStore~
+/// ```
+///
+/// ## Metadata
+///
+/// Metadata hierarchy:
+///
+/// ```mermaid
+/// classDiagram
+/// class VersionControl {
+/// -CowCell~Version~ version
+/// -AtomicU64 committed_sequence
+/// }
+/// class Version {
+/// -RegionMetadataRef metadata
+/// -MemtableVersionRef memtables
+/// -LevelMetasRef ssts
+/// -SequenceNumber flushed_sequence
+/// -ManifestVersion manifest_version
+/// }
+/// class MemtableVersion {
+/// -MemtableRef mutable
+/// -Vec~MemtableRef~ immutables
+/// +mutable_memtable() MemtableRef
+/// +immutable_memtables() &[MemtableRef]
+/// +freeze_mutable(MemtableRef new_mutable) MemtableVersion
+/// }
+/// class LevelMetas {
+/// -LevelMetaVec levels
+/// -AccessLayerRef sst_layer
+/// -FilePurgerRef file_purger
+/// -Option~i64~ compaction_time_window
+/// }
+/// class LevelMeta {
+/// -Level level
+/// -HashMap<FileId, FileHandle> files
+/// }
+/// class FileHandle {
+/// -FileMeta meta
+/// -bool compacting
+/// -AtomicBool deleted
+/// -AccessLayerRef sst_layer
+/// -FilePurgerRef file_purger
+/// }
+/// class FileMeta {
+/// +RegionId region_id
+/// +FileId file_id
+/// +Option<Timestamp, Timestamp> time_range
+/// +Level level
+/// +u64 file_size
+/// }
+///
+/// VersionControl o-- Version
+/// Version o-- RegionMetadata
+/// Version o-- MemtableVersion
+/// Version o-- LevelMetas
+/// LevelMetas o-- LevelMeta
+/// LevelMeta o-- FileHandle
+/// FileHandle o-- FileMeta
+///
+/// class RegionMetadata {
+/// +RegionId region_id
+/// +VersionNumber version
+/// +SchemaRef table_schema
+/// +Vec~usize~ primary_key_indices
+/// +Vec~usize~ value_indices
+/// +ColumnId next_column_id
+/// +TableOptions region_options
+/// +DateTime~Utc~ created_on
+/// +RegionSchemaRef region_schema
+/// }
+/// class RegionSchema {
+/// -SchemaRef user_schema
+/// -StoreSchemaRef store_schema
+/// -ColumnsMetadataRef columns
+/// }
+/// class Schema
+/// class StoreSchema {
+/// -Vec~ColumnMetadata~ columns
+/// -SchemaRef schema
+/// -usize row_key_end
+/// -usize user_column_end
+/// }
+/// class ColumnsMetadata {
+/// -Vec~ColumnMetadata~ columns
+/// -HashMap<String, usize> name_to_col_index
+/// -usize row_key_end
+/// -usize timestamp_key_index
+/// -usize user_column_end
+/// }
+/// class ColumnMetadata
+///
+/// RegionMetadata o-- RegionSchema
+/// RegionMetadata o-- Schema
+/// RegionSchema o-- StoreSchema
+/// RegionSchema o-- Schema
+/// RegionSchema o-- ColumnsMetadata
+/// StoreSchema o-- ColumnsMetadata
+/// StoreSchema o-- Schema
+/// StoreSchema o-- ColumnMetadata
+/// ColumnsMetadata o-- ColumnMetadata
+/// ```
+mod docs {}
diff --git a/src/mito2/src/region.rs b/src/mito2/src/region.rs
new file mode 100644
index 000000000000..f2ee4be61dd5
--- /dev/null
+++ b/src/mito2/src/region.rs
@@ -0,0 +1,41 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! Mito region.
+
+mod metadata;
+mod version;
+
+use std::collections::HashMap;
+use std::sync::{Arc, RwLock};
+
+use store_api::storage::RegionId;
+
+use crate::region::version::VersionControlRef;
+
+/// Metadata and runtime status of a region.
+#[derive(Debug)]
+pub(crate) struct MitoRegion {
+ version_control: VersionControlRef,
+}
+
+pub(crate) type MitoRegionRef = Arc<MitoRegion>;
+
+/// Regions indexed by ids.
+#[derive(Debug, Default)]
+pub(crate) struct RegionMap {
+ regions: RwLock<HashMap<RegionId, MitoRegionRef>>,
+}
+
+pub(crate) type RegionMapRef = Arc<RegionMap>;
diff --git a/src/mito2/src/region/metadata.rs b/src/mito2/src/region/metadata.rs
new file mode 100644
index 000000000000..ef99d2dcc4b3
--- /dev/null
+++ b/src/mito2/src/region/metadata.rs
@@ -0,0 +1,23 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! Metadata of mito regions.
+
+use std::sync::Arc;
+
+/// Static metadata of a region.
+#[derive(Debug)]
+pub(crate) struct RegionMetadata {}
+
+pub(crate) type RegionMetadataRef = Arc<RegionMetadata>;
diff --git a/src/mito2/src/region/version.rs b/src/mito2/src/region/version.rs
new file mode 100644
index 000000000000..d3205432ec4e
--- /dev/null
+++ b/src/mito2/src/region/version.rs
@@ -0,0 +1,32 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! Version control of mito engine.
+//!
+//! Version is an immutable snapshot of region's metadata.
+//!
+//! To read latest data from `VersionControl`, we should
+//! 1. Acquire `Version` from `VersionControl`.
+//! 2. Then acquire last sequence.
+//!
+//! Reason: data may be flushed/compacted and some data with old sequence may be removed
+//! and became invisible between step 1 and 2, so need to acquire version at first.
+
+use std::sync::Arc;
+
+/// Controls version of in memory metadata for a region.
+#[derive(Debug)]
+pub(crate) struct VersionControl {}
+
+pub(crate) type VersionControlRef = Arc<VersionControl>;
diff --git a/src/mito2/src/worker.rs b/src/mito2/src/worker.rs
new file mode 100644
index 000000000000..bbfe075486f9
--- /dev/null
+++ b/src/mito2/src/worker.rs
@@ -0,0 +1,31 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! Structs and utilities for writing regions.
+
+use crate::region::RegionMapRef;
+
+/// A fixed size group of [RegionWorker]s.
+///
+/// The group binds each region to a specific [RegionWorker].
+#[derive(Debug, Default)]
+pub(crate) struct WorkerGroup {
+ workers: Vec<RegionWorker>,
+}
+
+/// Worker to write and alter regions bound to it.
+#[derive(Debug, Default)]
+struct RegionWorker {
+ regions: RegionMapRef,
+}
|
feat
|
Define basic structs for MitoEngine (#1928)
|
beb9c0a797a7bc7e73eb6f8810289d8a458f0556
|
2025-02-11 23:11:44
|
localhost
|
chore: set now as timestamp field default value (#5502)
| false
|
diff --git a/src/pipeline/src/etl/transform.rs b/src/pipeline/src/etl/transform.rs
index e3039d6c7ac4..a61444d9458f 100644
--- a/src/pipeline/src/etl/transform.rs
+++ b/src/pipeline/src/etl/transform.rs
@@ -17,6 +17,8 @@ pub mod transformer;
use std::collections::BTreeMap;
+use snafu::OptionExt;
+
use crate::etl::error::{Error, Result};
use crate::etl::transform::index::Index;
use crate::etl::value::Value;
@@ -28,7 +30,6 @@ const TRANSFORM_INDEX: &str = "index";
const TRANSFORM_DEFAULT: &str = "default";
const TRANSFORM_ON_FAILURE: &str = "on_failure";
-use snafu::OptionExt;
pub use transformer::greptime::GreptimeTransformer;
use super::error::{
@@ -37,6 +38,7 @@ use super::error::{
};
use super::field::Fields;
use super::processor::{yaml_new_field, yaml_new_fields, yaml_string};
+use super::value::Timestamp;
pub trait Transformer: std::fmt::Debug + Sized + Send + Sync + 'static {
type Output;
@@ -166,6 +168,14 @@ impl Transform {
pub(crate) fn get_type_matched_default_val(&self) -> &Value {
&self.type_
}
+
+ pub(crate) fn get_default_value_when_data_is_none(&self) -> Option<Value> {
+ if matches!(self.type_, Value::Timestamp(_)) && self.index.is_some_and(|i| i == Index::Time)
+ {
+ return Some(Value::Timestamp(Timestamp::default()));
+ }
+ None
+ }
}
impl TryFrom<&yaml_rust::yaml::Hash> for Transform {
@@ -228,6 +238,7 @@ impl TryFrom<&yaml_rust::yaml::Hash> for Transform {
(_, _) => {
let target = type_.parse_str_value(default_value.to_str_value().as_str())?;
final_default = Some(target);
+ on_failure = Some(OnFailure::Default);
}
}
}
diff --git a/src/pipeline/src/etl/transform/transformer/greptime.rs b/src/pipeline/src/etl/transform/transformer/greptime.rs
index eb8d0f882726..621acc758162 100644
--- a/src/pipeline/src/etl/transform/transformer/greptime.rs
+++ b/src/pipeline/src/etl/transform/transformer/greptime.rs
@@ -84,9 +84,8 @@ impl GreptimePipelineParams {
impl GreptimeTransformer {
/// Add a default timestamp column to the transforms
fn add_greptime_timestamp_column(transforms: &mut Transforms) {
- let ns = chrono::Utc::now().timestamp_nanos_opt().unwrap_or(0);
- let type_ = Value::Timestamp(Timestamp::Nanosecond(ns));
- let default = Some(type_.clone());
+ let type_ = Value::Timestamp(Timestamp::Nanosecond(0));
+ let default = None;
let transform = Transform {
fields: Fields::one(Field::new(
@@ -192,9 +191,17 @@ impl Transformer for GreptimeTransformer {
values[output_index] = GreptimeValue { value_data };
}
None => {
- let default = transform.get_default();
- let value_data = match default {
- Some(default) => coerce_value(default, transform)?,
+ let value_data = match transform.on_failure {
+ Some(crate::etl::transform::OnFailure::Default) => {
+ match transform.get_default() {
+ Some(default) => coerce_value(default, transform)?,
+ None => match transform.get_default_value_when_data_is_none() {
+ Some(default) => coerce_value(&default, transform)?,
+ None => None,
+ },
+ }
+ }
+ Some(crate::etl::transform::OnFailure::Ignore) => None,
None => None,
};
values[output_index] = GreptimeValue { value_data };
diff --git a/src/pipeline/src/etl/transform/transformer/greptime/coerce.rs b/src/pipeline/src/etl/transform/transformer/greptime/coerce.rs
index da345b3bdeb3..a796a816ec0b 100644
--- a/src/pipeline/src/etl/transform/transformer/greptime/coerce.rs
+++ b/src/pipeline/src/etl/transform/transformer/greptime/coerce.rs
@@ -159,19 +159,7 @@ fn coerce_type(transform: &Transform) -> Result<(ColumnDataType, Option<ColumnDa
pub(crate) fn coerce_value(val: &Value, transform: &Transform) -> Result<Option<ValueData>> {
match val {
- Value::Null => match &transform.default {
- Some(default) => coerce_value(default, transform),
- None => match transform.on_failure {
- Some(OnFailure::Ignore) => Ok(None),
- Some(OnFailure::Default) => transform
- .get_default()
- .map(|default| coerce_value(default, transform))
- .unwrap_or_else(|| {
- coerce_value(transform.get_type_matched_default_val(), transform)
- }),
- None => Ok(None),
- },
- },
+ Value::Null => Ok(None),
Value::Int8(n) => coerce_i64_value(*n as i64, transform),
Value::Int16(n) => coerce_i64_value(*n as i64, transform),
diff --git a/src/pipeline/tests/pipeline.rs b/src/pipeline/tests/pipeline.rs
index 7a170660a99d..3f3a90c55fc1 100644
--- a/src/pipeline/tests/pipeline.rs
+++ b/src/pipeline/tests/pipeline.rs
@@ -785,6 +785,36 @@ transform:
assert_eq!(expected, r);
}
+#[test]
+fn test_timestamp_default_now() {
+ let input_value = serde_json::json!({"abc": "hello world"});
+
+ let pipeline_yaml = r#"
+processors:
+transform:
+ - field: abc
+ type: string
+ on_failure: default
+"#;
+
+ let yaml_content = Content::Yaml(pipeline_yaml);
+ let pipeline: Pipeline<GreptimeTransformer> = parse(&yaml_content).unwrap();
+
+ let mut status = json_to_intermediate_state(input_value).unwrap();
+ let row = pipeline
+ .exec_mut(&mut status)
+ .unwrap()
+ .into_transformed()
+ .expect("expect transformed result ");
+
+ row.values.into_iter().for_each(|v| {
+ if let ValueData::TimestampNanosecondValue(v) = v.value_data.unwrap() {
+ let now = chrono::Utc::now().timestamp_nanos_opt().unwrap();
+ assert!(now - v < 1_000_000);
+ }
+ });
+}
+
#[test]
fn test_dispatch() {
let input_value_str1 = r#"
|
chore
|
set now as timestamp field default value (#5502)
|
e3a079a1421a9cb2e7c3b51011e4b092665a28f2
|
2023-08-02 19:09:17
|
Vanish
|
fix: session features (#2084)
| false
|
diff --git a/src/datanode/Cargo.toml b/src/datanode/Cargo.toml
index 08c847e36c2e..730a225e5ebb 100644
--- a/src/datanode/Cargo.toml
+++ b/src/datanode/Cargo.toml
@@ -76,3 +76,4 @@ client = { path = "../client" }
common-test-util = { path = "../common/test-util" }
common-query = { path = "../common/query" }
datafusion-common.workspace = true
+session = { path = "../session", features = ["testing"] }
diff --git a/src/promql/Cargo.toml b/src/promql/Cargo.toml
index c594b93351e2..2ff4e68017cf 100644
--- a/src/promql/Cargo.toml
+++ b/src/promql/Cargo.toml
@@ -26,3 +26,4 @@ common-telemetry = { path = "../common/telemetry" }
[dev-dependencies]
tokio.workspace = true
query = { path = "../query" }
+session = { path = "../session", features = ["testing"] }
diff --git a/src/script/Cargo.toml b/src/script/Cargo.toml
index 730647da370a..7dec57d258cb 100644
--- a/src/script/Cargo.toml
+++ b/src/script/Cargo.toml
@@ -76,6 +76,7 @@ log-store = { path = "../log-store" }
mito = { path = "../mito", features = ["test"] }
ron = "0.7"
serde = { version = "1.0", features = ["derive"] }
+session = { path = "../session", features = ["testing"] }
storage = { path = "../storage" }
tokio-test = "0.4"
criterion = { version = "0.4", features = ["html_reports", "async_tokio"] }
|
fix
|
session features (#2084)
|
6d762aa9dcad7f4edb9e1019688e6eb25b14d7d8
|
2022-11-18 12:25:11
|
aievl
|
feat: update mysql default listen port to 4406 (#568)
| false
|
diff --git a/config/datanode.example.toml b/config/datanode.example.toml
index 6e7e00ab517a..795ad456615c 100644
--- a/config/datanode.example.toml
+++ b/config/datanode.example.toml
@@ -3,7 +3,7 @@ mode = 'distributed'
rpc_addr = '127.0.0.1:3001'
wal_dir = '/tmp/greptimedb/wal'
rpc_runtime_size = 8
-mysql_addr = '127.0.0.1:3306'
+mysql_addr = '127.0.0.1:4406'
mysql_runtime_size = 4
[storage]
diff --git a/config/standalone.example.toml b/config/standalone.example.toml
index fa7ab8725612..cf981a300f47 100644
--- a/config/standalone.example.toml
+++ b/config/standalone.example.toml
@@ -1,7 +1,7 @@
node_id = 0
mode = 'standalone'
http_addr = '127.0.0.1:4000'
-datanode_mysql_addr = '127.0.0.1:3306'
+datanode_mysql_addr = '127.0.0.1:4406'
datanode_mysql_runtime_size = 4
wal_dir = '/tmp/greptimedb/wal/'
diff --git a/src/cmd/src/datanode.rs b/src/cmd/src/datanode.rs
index 20b0c52c1f2f..10136101c933 100644
--- a/src/cmd/src/datanode.rs
+++ b/src/cmd/src/datanode.rs
@@ -143,7 +143,7 @@ mod tests {
let options: DatanodeOptions = cmd.try_into().unwrap();
assert_eq!("127.0.0.1:3001".to_string(), options.rpc_addr);
assert_eq!("/tmp/greptimedb/wal".to_string(), options.wal_dir);
- assert_eq!("127.0.0.1:3306".to_string(), options.mysql_addr);
+ assert_eq!("127.0.0.1:4406".to_string(), options.mysql_addr);
assert_eq!(4, options.mysql_runtime_size);
let MetaClientOpts {
metasrv_addrs: metasrv_addr,
diff --git a/src/cmd/src/standalone.rs b/src/cmd/src/standalone.rs
index 11b522fee5e1..1bb5fc5ed0ed 100644
--- a/src/cmd/src/standalone.rs
+++ b/src/cmd/src/standalone.rs
@@ -87,7 +87,7 @@ impl Default for StandaloneOptions {
mode: Mode::Standalone,
wal_dir: "/tmp/greptimedb/wal".to_string(),
storage: ObjectStoreConfig::default(),
- datanode_mysql_addr: "127.0.0.1:3306".to_string(),
+ datanode_mysql_addr: "127.0.0.1:4406".to_string(),
datanode_mysql_runtime_size: 4,
}
}
diff --git a/src/datanode/src/datanode.rs b/src/datanode/src/datanode.rs
index 26e77abbabac..bcab4856563f 100644
--- a/src/datanode/src/datanode.rs
+++ b/src/datanode/src/datanode.rs
@@ -56,7 +56,7 @@ impl Default for DatanodeOptions {
node_id: None,
rpc_addr: "127.0.0.1:3001".to_string(),
rpc_runtime_size: 8,
- mysql_addr: "127.0.0.1:3306".to_string(),
+ mysql_addr: "127.0.0.1:4406".to_string(),
mysql_runtime_size: 2,
meta_client_opts: None,
wal_dir: "/tmp/greptimedb/wal".to_string(),
|
feat
|
update mysql default listen port to 4406 (#568)
|
a598008ec3fcc2b7f4df447455852cededa380ab
|
2025-01-16 16:35:46
|
Ruihang Xia
|
fix: panic when received invalid query string (#5366)
| false
|
diff --git a/src/servers/src/mysql/federated.rs b/src/servers/src/mysql/federated.rs
index ae4ac70ed6e5..cf5f9a744da2 100644
--- a/src/servers/src/mysql/federated.rs
+++ b/src/servers/src/mysql/federated.rs
@@ -273,8 +273,11 @@ pub(crate) fn check(
) -> Option<Output> {
// INSERT don't need MySQL federated check. We assume the query doesn't contain
// federated or driver setup command if it starts with a 'INSERT' statement.
- if query.len() > 6 && query[..6].eq_ignore_ascii_case("INSERT") {
- return None;
+ let the_6th_index = query.char_indices().nth(6).map(|(i, _)| i);
+ if let Some(index) = the_6th_index {
+ if query[..index].eq_ignore_ascii_case("INSERT") {
+ return None;
+ }
}
// First to check the query is like "select @@variables".
@@ -295,6 +298,15 @@ mod test {
use super::*;
+ #[test]
+ fn test_check_abnormal() {
+ let session = Arc::new(Session::new(None, Channel::Mysql, Default::default()));
+ let query = "🫣一点不正常的东西🫣";
+ let output = check(query, QueryContext::arc(), session.clone());
+
+ assert!(output.is_none());
+ }
+
#[test]
fn test_check() {
let session = Arc::new(Session::new(None, Channel::Mysql, Default::default()));
|
fix
|
panic when received invalid query string (#5366)
|
43f9c40f43e6c15ab11f8f231913dc15cfc792cc
|
2022-11-08 10:59:32
|
shuiyisong
|
feat: add context to query_handler (#417)
| false
|
diff --git a/src/cmd/src/frontend.rs b/src/cmd/src/frontend.rs
index 75779690d23f..bd1036ffbafe 100644
--- a/src/cmd/src/frontend.rs
+++ b/src/cmd/src/frontend.rs
@@ -37,7 +37,7 @@ impl SubCommand {
}
#[derive(Debug, Parser)]
-struct StartCommand {
+pub struct StartCommand {
#[clap(long)]
http_addr: Option<String>,
#[clap(long)]
diff --git a/src/servers/src/context.rs b/src/servers/src/context.rs
new file mode 100644
index 000000000000..8d694d5097be
--- /dev/null
+++ b/src/servers/src/context.rs
@@ -0,0 +1,166 @@
+use std::collections::HashMap;
+use std::sync::Arc;
+
+use serde::{Deserialize, Serialize};
+
+use crate::context::AuthMethod::Token;
+use crate::context::Channel::HTTP;
+
+type CtxFnRef = Arc<dyn Fn(&Context) -> bool + Send + Sync>;
+
+#[derive(Default, Serialize, Deserialize)]
+pub struct Context {
+ pub exec_info: ExecInfo,
+ pub client_info: ClientInfo,
+ pub user_info: UserInfo,
+ pub quota: Quota,
+ #[serde(skip)]
+ pub predicates: Vec<CtxFnRef>,
+}
+
+impl Context {
+ pub fn new() -> Self {
+ Context::default()
+ }
+
+ pub fn add_predicate(&mut self, predicate: CtxFnRef) {
+ self.predicates.push(predicate);
+ }
+}
+
+#[derive(Default, Serialize, Deserialize)]
+pub struct ExecInfo {
+ pub catalog: Option<String>,
+ pub schema: Option<String>,
+ // should opts to be thread safe?
+ pub extra_opts: HashMap<String, String>,
+ pub trace_id: Option<String>,
+}
+
+#[derive(Default, Serialize, Deserialize)]
+pub struct ClientInfo {
+ pub client_host: Option<String>,
+}
+
+impl ClientInfo {
+ pub fn new(host: Option<String>) -> Self {
+ ClientInfo { client_host: host }
+ }
+}
+
+#[derive(Default, Serialize, Deserialize)]
+pub struct UserInfo {
+ pub username: Option<String>,
+ pub from_channel: Option<Channel>,
+ pub auth_method: Option<AuthMethod>,
+}
+
+impl UserInfo {
+ pub fn with_http_token(token: String) -> Self {
+ UserInfo {
+ username: None,
+ from_channel: Some(HTTP),
+ auth_method: Some(Token(token)),
+ }
+ }
+}
+
+#[derive(Debug, PartialEq, Eq, Serialize, Deserialize)]
+pub enum Channel {
+ GRPC,
+ HTTP,
+ MYSQL,
+}
+
+#[derive(Debug, PartialEq, Eq, Serialize, Deserialize)]
+pub enum AuthMethod {
+ None,
+ Password {
+ hash_method: AuthHashMethod,
+ hashed_value: Vec<u8>,
+ },
+ Token(String),
+}
+
+#[derive(Debug, PartialEq, Eq, Serialize, Deserialize)]
+pub enum AuthHashMethod {
+ DoubleSha1,
+ Sha256,
+}
+
+#[derive(Default, Serialize, Deserialize)]
+pub struct Quota {
+ pub total: u64,
+ pub consumed: u64,
+ pub estimated: u64,
+}
+
+#[cfg(test)]
+mod test {
+ use std::collections::HashMap;
+ use std::sync::Arc;
+
+ use crate::context::AuthMethod::Token;
+ use crate::context::Channel::HTTP;
+ use crate::context::{ClientInfo, Context, ExecInfo, Quota, UserInfo};
+
+ #[test]
+ fn test_predicate() {
+ let mut ctx = Context::default();
+ ctx.add_predicate(Arc::new(|ctx: &Context| {
+ ctx.quota.total > ctx.quota.consumed
+ }));
+ ctx.quota.total = 10;
+ ctx.quota.consumed = 5;
+
+ let predicates = ctx.predicates.clone();
+ let mut re = true;
+ for predicate in predicates {
+ re &= predicate(&ctx);
+ }
+ assert!(re);
+ }
+
+ #[test]
+ fn test_build() {
+ let ctx = Context {
+ exec_info: ExecInfo {
+ catalog: Some(String::from("greptime")),
+ schema: Some(String::from("public")),
+ extra_opts: HashMap::new(),
+ trace_id: None,
+ },
+ client_info: ClientInfo::new(Some(String::from("127.0.0.1:4001"))),
+ user_info: UserInfo::with_http_token(String::from("HELLO")),
+ quota: Quota {
+ total: 10,
+ consumed: 5,
+ estimated: 2,
+ },
+ predicates: vec![],
+ };
+
+ assert_eq!(ctx.exec_info.catalog.unwrap(), String::from("greptime"));
+ assert_eq!(ctx.exec_info.schema.unwrap(), String::from("public"));
+ assert_eq!(ctx.exec_info.extra_opts.capacity(), 0);
+ assert_eq!(ctx.exec_info.trace_id, None);
+
+ assert_eq!(
+ ctx.client_info.client_host.unwrap(),
+ String::from("127.0.0.1:4001")
+ );
+
+ assert_eq!(ctx.user_info.username, None);
+ assert_eq!(ctx.user_info.from_channel.unwrap(), HTTP);
+ assert_eq!(
+ ctx.user_info.auth_method.unwrap(),
+ Token(String::from("HELLO"))
+ );
+
+ assert!(ctx.quota.total > 0);
+ assert!(ctx.quota.consumed > 0);
+ assert!(ctx.quota.estimated > 0);
+
+ assert_eq!(ctx.predicates.capacity(), 0);
+ }
+}
diff --git a/src/servers/src/lib.rs b/src/servers/src/lib.rs
index 7d32f5facb12..2f44f5830e66 100644
--- a/src/servers/src/lib.rs
+++ b/src/servers/src/lib.rs
@@ -1,5 +1,6 @@
#![feature(assert_matches)]
+pub mod context;
pub mod error;
pub mod grpc;
pub mod http;
|
feat
|
add context to query_handler (#417)
|
7b43f027f922703cd50ef895264fe24f3e21af02
|
2022-11-16 15:46:11
|
Lei, Huang
|
fix: respect node id and metasrv addr in config file (#542)
| false
|
diff --git a/src/cmd/src/datanode.rs b/src/cmd/src/datanode.rs
index 692c120b64e1..c77a17b2b431 100644
--- a/src/cmd/src/datanode.rs
+++ b/src/cmd/src/datanode.rs
@@ -93,31 +93,20 @@ impl TryFrom<StartCommand> for DatanodeOptions {
opts.mysql_addr = addr;
}
- match (cmd.metasrv_addr, cmd.node_id) {
- (Some(meta_addr), Some(node_id)) => {
- // Running mode is only set to Distributed when
- // both metasrv addr and node id are set in
- // commandline options
- opts.meta_client_opts.metasrv_addr = meta_addr.clone();
- opts.node_id = node_id;
- opts.metasrv_addr = Some(vec![meta_addr]);
- opts.mode = Mode::Distributed;
- }
- (None, None) => {
- opts.mode = Mode::Standalone;
- }
- (None, Some(_)) => {
- return MissingConfigSnafu {
- msg: "Missing metasrv address option",
- }
- .fail();
- }
- (Some(_), None) => {
- return MissingConfigSnafu {
- msg: "Missing node id option",
- }
- .fail();
+ if let Some(node_id) = cmd.node_id {
+ opts.node_id = Some(node_id);
+ }
+
+ if let Some(meta_addr) = cmd.metasrv_addr {
+ opts.meta_client_opts.metasrv_addr = meta_addr;
+ opts.mode = Mode::Distributed;
+ }
+
+ if let (Mode::Distributed, None) = (&opts.mode, &opts.node_id) {
+ return MissingConfigSnafu {
+ msg: "Missing node id option",
}
+ .fail();
}
Ok(opts)
}
@@ -198,13 +187,32 @@ mod tests {
config_file: None,
})
.is_err());
- assert!(DatanodeOptions::try_from(StartCommand {
+
+ // Providing node_id but leave metasrv_addr absent is ok since metasrv_addr has default value
+ DatanodeOptions::try_from(StartCommand {
node_id: Some(42),
rpc_addr: None,
mysql_addr: None,
metasrv_addr: None,
config_file: None,
})
- .is_err());
+ .unwrap();
+ }
+
+ #[test]
+ fn test_merge_config() {
+ let dn_opts = DatanodeOptions::try_from(StartCommand {
+ node_id: None,
+ rpc_addr: None,
+ mysql_addr: None,
+ metasrv_addr: None,
+ config_file: Some(format!(
+ "{}/../../config/datanode.example.toml",
+ std::env::current_dir().unwrap().as_path().to_str().unwrap()
+ )),
+ })
+ .unwrap();
+ assert_eq!(Some(42), dn_opts.node_id);
+ assert_eq!("1.1.1.1:3002", dn_opts.meta_client_opts.metasrv_addr);
}
}
diff --git a/src/datanode/src/datanode.rs b/src/datanode/src/datanode.rs
index 3eb9a967dc11..0d0707adc31c 100644
--- a/src/datanode/src/datanode.rs
+++ b/src/datanode/src/datanode.rs
@@ -39,7 +39,7 @@ impl Default for ObjectStoreConfig {
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct DatanodeOptions {
- pub node_id: u64,
+ pub node_id: Option<u64>,
pub rpc_addr: String,
pub rpc_runtime_size: usize,
pub mysql_addr: String,
@@ -48,13 +48,12 @@ pub struct DatanodeOptions {
pub wal_dir: String,
pub storage: ObjectStoreConfig,
pub mode: Mode,
- pub metasrv_addr: Option<Vec<String>>,
}
impl Default for DatanodeOptions {
fn default() -> Self {
Self {
- node_id: 0,
+ node_id: None,
rpc_addr: "127.0.0.1:3001".to_string(),
rpc_runtime_size: 8,
mysql_addr: "127.0.0.1:3306".to_string(),
@@ -63,7 +62,6 @@ impl Default for DatanodeOptions {
wal_dir: "/tmp/greptimedb/wal".to_string(),
storage: ObjectStoreConfig::default(),
mode: Mode::Standalone,
- metasrv_addr: None,
}
}
}
diff --git a/src/datanode/src/error.rs b/src/datanode/src/error.rs
index d837b7799d41..03e5c0e64fa4 100644
--- a/src/datanode/src/error.rs
+++ b/src/datanode/src/error.rs
@@ -280,6 +280,9 @@ pub enum Error {
#[snafu(backtrace)]
source: datatypes::error::Error,
},
+
+ #[snafu(display("Missing node id option in distributed mode"))]
+ MissingNodeId { backtrace: Backtrace },
}
pub type Result<T> = std::result::Result<T, Error>;
@@ -344,6 +347,7 @@ impl ErrorExt for Error {
Error::EmptyInsertBatch => StatusCode::InvalidArguments,
Error::TableIdProviderNotFound { .. } => StatusCode::Unsupported,
Error::BumpTableId { source, .. } => source.status_code(),
+ Error::MissingNodeId { .. } => StatusCode::InvalidArguments,
}
}
diff --git a/src/datanode/src/instance.rs b/src/datanode/src/instance.rs
index 5095b9e91b52..9cf336b446d4 100644
--- a/src/datanode/src/instance.rs
+++ b/src/datanode/src/instance.rs
@@ -37,7 +37,9 @@ use table_engine::config::EngineConfig as TableEngineConfig;
use table_engine::engine::MitoEngine;
use crate::datanode::{DatanodeOptions, ObjectStoreConfig};
-use crate::error::{self, CatalogSnafu, MetaClientInitSnafu, NewCatalogSnafu, Result};
+use crate::error::{
+ self, CatalogSnafu, MetaClientInitSnafu, MissingNodeIdSnafu, NewCatalogSnafu, Result,
+};
use crate::heartbeat::HeartbeatTask;
use crate::script::ScriptExecutor;
use crate::server::grpc::plan::PhysicalPlanner;
@@ -72,7 +74,11 @@ impl Instance {
let meta_client = match opts.mode {
Mode::Standalone => None,
Mode::Distributed => {
- let meta_client = new_metasrv_client(opts.node_id, &opts.meta_client_opts).await?;
+ let meta_client = new_metasrv_client(
+ opts.node_id.context(MissingNodeIdSnafu)?,
+ &opts.meta_client_opts,
+ )
+ .await?;
Some(Arc::new(meta_client))
}
};
@@ -106,7 +112,7 @@ impl Instance {
Mode::Distributed => {
let catalog = Arc::new(catalog::remote::RemoteCatalogManager::new(
table_engine.clone(),
- opts.node_id,
+ opts.node_id.context(MissingNodeIdSnafu)?,
Arc::new(MetaKvBackend {
client: meta_client.as_ref().unwrap().clone(),
}),
@@ -123,7 +129,7 @@ impl Instance {
let heartbeat_task = match opts.mode {
Mode::Standalone => None,
Mode::Distributed => Some(HeartbeatTask::new(
- opts.node_id, /*node id not set*/
+ opts.node_id.context(MissingNodeIdSnafu)?,
opts.rpc_addr.clone(),
meta_client.as_ref().unwrap().clone(),
)),
diff --git a/src/datanode/src/mock.rs b/src/datanode/src/mock.rs
index d0ed8f62272e..edbfe09c870e 100644
--- a/src/datanode/src/mock.rs
+++ b/src/datanode/src/mock.rs
@@ -91,7 +91,7 @@ impl Instance {
pub async fn with_mock_meta_server(opts: &DatanodeOptions, meta_srv: MockInfo) -> Result<Self> {
let object_store = new_object_store(&opts.storage).await?;
let log_store = create_local_file_log_store(opts).await?;
- let meta_client = Arc::new(mock_meta_client(meta_srv, opts.node_id).await);
+ let meta_client = Arc::new(mock_meta_client(meta_srv, opts.node_id.unwrap_or(42)).await);
let table_engine = Arc::new(DefaultEngine::new(
TableEngineConfig::default(),
EngineImpl::new(
@@ -105,7 +105,7 @@ impl Instance {
// create remote catalog manager
let catalog_manager = Arc::new(catalog::remote::RemoteCatalogManager::new(
table_engine.clone(),
- opts.node_id,
+ opts.node_id.unwrap_or(42),
Arc::new(MetaKvBackend {
client: meta_client.clone(),
}),
@@ -116,8 +116,11 @@ impl Instance {
let script_executor =
ScriptExecutor::new(catalog_manager.clone(), query_engine.clone()).await?;
- let heartbeat_task =
- HeartbeatTask::new(opts.node_id, opts.rpc_addr.clone(), meta_client.clone());
+ let heartbeat_task = HeartbeatTask::new(
+ opts.node_id.unwrap_or(42),
+ opts.rpc_addr.clone(),
+ meta_client.clone(),
+ );
Ok(Self {
query_engine: query_engine.clone(),
sql_handler: SqlHandler::new(table_engine, catalog_manager.clone()),
diff --git a/src/frontend/src/table.rs b/src/frontend/src/table.rs
index da41bbe46cc0..903f44032a55 100644
--- a/src/frontend/src/table.rs
+++ b/src/frontend/src/table.rs
@@ -944,7 +944,7 @@ mod test {
let data_tmp_dir =
TempDir::new_in("/tmp", &format!("dist_table_test-data-{}", current)).unwrap();
let opts = DatanodeOptions {
- node_id: datanode_id,
+ node_id: Some(datanode_id),
wal_dir: wal_tmp_dir.path().to_str().unwrap().to_string(),
storage: ObjectStoreConfig::File {
data_dir: data_tmp_dir.path().to_str().unwrap().to_string(),
|
fix
|
respect node id and metasrv addr in config file (#542)
|
bd1a5dc265bca7d52c62694bb672edcf8be2c072
|
2024-01-05 15:16:39
|
JeremyHi
|
feat: metric engine support alter (#3098)
| false
|
diff --git a/src/common/meta/src/ddl/alter_table.rs b/src/common/meta/src/ddl/alter_table.rs
index e196ed70c6d6..b22f08ef9817 100644
--- a/src/common/meta/src/ddl/alter_table.rs
+++ b/src/common/meta/src/ddl/alter_table.rs
@@ -40,9 +40,7 @@ use table::requests::AlterKind;
use crate::cache_invalidator::Context;
use crate::ddl::utils::handle_operate_region_error;
use crate::ddl::DdlContext;
-use crate::error::{
- self, ConvertAlterTableRequestSnafu, InvalidProtoMsgSnafu, Result, TableRouteNotFoundSnafu,
-};
+use crate::error::{self, ConvertAlterTableRequestSnafu, InvalidProtoMsgSnafu, Result};
use crate::key::table_info::TableInfoValue;
use crate::key::table_name::TableNameKey;
use crate::key::DeserializedValueWithBytes;
@@ -65,6 +63,7 @@ impl AlterTableProcedure {
cluster_id: u64,
task: AlterTableTask,
table_info_value: DeserializedValueWithBytes<TableInfoValue>,
+ physical_table_name: Option<TableName>,
context: DdlContext,
) -> Result<Self> {
let alter_kind = task
@@ -84,7 +83,13 @@ impl AlterTableProcedure {
Ok(Self {
context,
- data: AlterTableData::new(task, table_info_value, cluster_id, next_column_id),
+ data: AlterTableData::new(
+ task,
+ table_info_value,
+ physical_table_name,
+ cluster_id,
+ next_column_id,
+ ),
kind,
})
}
@@ -182,23 +187,19 @@ impl AlterTableProcedure {
pub async fn submit_alter_region_requests(&mut self) -> Result<Status> {
let table_id = self.data.table_id();
-
- let table_route = self
+ let (_, physical_table_route) = self
.context
.table_metadata_manager
.table_route_manager()
- .get(table_id)
- .await?
- .context(TableRouteNotFoundSnafu { table_id })?
- .into_inner();
- let region_routes = table_route.region_routes()?;
+ .get_physical_table_route(table_id)
+ .await?;
- let leaders = find_leaders(region_routes);
+ let leaders = find_leaders(&physical_table_route.region_routes);
let mut alter_region_tasks = Vec::with_capacity(leaders.len());
for datanode in leaders {
let requester = self.context.datanode_manager.datanode(&datanode).await;
- let regions = find_leader_regions(region_routes, &datanode);
+ let regions = find_leader_regions(&physical_table_route.region_routes, &datanode);
for region in regions {
let region_id = RegionId::new(table_id, region);
@@ -335,13 +336,24 @@ impl AlterTableProcedure {
}
fn lock_key_inner(&self) -> Vec<String> {
+ let mut lock_key = vec![];
+
+ if let Some(physical_table_name) = self.data.physical_table_name() {
+ let physical_table_key = common_catalog::format_full_table_name(
+ &physical_table_name.catalog_name,
+ &physical_table_name.schema_name,
+ &physical_table_name.table_name,
+ );
+ lock_key.push(physical_table_key);
+ }
+
let table_ref = self.data.table_ref();
let table_key = common_catalog::format_full_table_name(
table_ref.catalog,
table_ref.schema,
table_ref.table,
);
- let mut lock_key = vec![table_key];
+ lock_key.push(table_key);
if let Ok(Kind::RenameTable(RenameTable { new_table_name })) = self.alter_kind() {
lock_key.push(common_catalog::format_full_table_name(
@@ -415,6 +427,8 @@ pub struct AlterTableData {
task: AlterTableTask,
/// Table info value before alteration.
table_info_value: DeserializedValueWithBytes<TableInfoValue>,
+ /// Physical table name, if the table to alter is a logical table.
+ physical_table_name: Option<TableName>,
cluster_id: u64,
/// Next column id of the table if the task adds columns to the table.
next_column_id: Option<ColumnId>,
@@ -424,6 +438,7 @@ impl AlterTableData {
pub fn new(
task: AlterTableTask,
table_info_value: DeserializedValueWithBytes<TableInfoValue>,
+ physical_table_name: Option<TableName>,
cluster_id: u64,
next_column_id: Option<ColumnId>,
) -> Self {
@@ -431,6 +446,7 @@ impl AlterTableData {
state: AlterTableState::Prepare,
task,
table_info_value,
+ physical_table_name,
cluster_id,
next_column_id,
}
@@ -447,6 +463,10 @@ impl AlterTableData {
fn table_info(&self) -> &RawTableInfo {
&self.table_info_value.table_info
}
+
+ fn physical_table_name(&self) -> Option<&TableName> {
+ self.physical_table_name.as_ref()
+ }
}
/// Creates region proto alter kind from `table_info` and `alter_kind`.
diff --git a/src/common/meta/src/ddl_manager.rs b/src/common/meta/src/ddl_manager.rs
index fe5163b73098..28024b6602c4 100644
--- a/src/common/meta/src/ddl_manager.rs
+++ b/src/common/meta/src/ddl_manager.rs
@@ -46,6 +46,8 @@ use crate::rpc::ddl::{
TruncateTableTask,
};
use crate::rpc::router::RegionRoute;
+use crate::table_name::TableName;
+
pub type DdlManagerRef = Arc<DdlManager>;
/// The [DdlManager] provides the ability to execute Ddl.
@@ -160,11 +162,17 @@ impl DdlManager {
cluster_id: u64,
alter_table_task: AlterTableTask,
table_info_value: DeserializedValueWithBytes<TableInfoValue>,
+ physical_table_name: Option<TableName>,
) -> Result<ProcedureId> {
let context = self.create_context();
- let procedure =
- AlterTableProcedure::new(cluster_id, alter_table_task, table_info_value, context)?;
+ let procedure = AlterTableProcedure::new(
+ cluster_id,
+ alter_table_task,
+ table_info_value,
+ physical_table_name,
+ context,
+ )?;
let procedure_with_id = ProcedureWithId::with_random_id(Box::new(procedure));
@@ -327,8 +335,38 @@ async fn handle_alter_table_task(
table_name: table_ref.to_string(),
})?;
+ let physical_table_id = ddl_manager
+ .table_metadata_manager()
+ .table_route_manager()
+ .get_physical_table_id(table_id)
+ .await?;
+
+ let physical_table_name = if physical_table_id == table_id {
+ None
+ } else {
+ let physical_table_info = &ddl_manager
+ .table_metadata_manager()
+ .table_info_manager()
+ .get(physical_table_id)
+ .await?
+ .with_context(|| error::TableInfoNotFoundSnafu {
+ table_name: table_ref.to_string(),
+ })?
+ .table_info;
+ Some(TableName {
+ catalog_name: physical_table_info.catalog_name.clone(),
+ schema_name: physical_table_info.schema_name.clone(),
+ table_name: physical_table_info.name.clone(),
+ })
+ };
+
let id = ddl_manager
- .submit_alter_table_task(cluster_id, alter_table_task, table_info_value)
+ .submit_alter_table_task(
+ cluster_id,
+ alter_table_task,
+ table_info_value,
+ physical_table_name,
+ )
.await?;
info!("Table: {table_id} is altered via procedure_id {id:?}");
diff --git a/src/common/meta/src/key/table_route.rs b/src/common/meta/src/key/table_route.rs
index c95a28de60fe..9c9270f22d84 100644
--- a/src/common/meta/src/key/table_route.rs
+++ b/src/common/meta/src/key/table_route.rs
@@ -16,12 +16,14 @@ use std::collections::HashMap;
use std::fmt::Display;
use serde::{Deserialize, Serialize};
-use snafu::{ensure, ResultExt};
+use snafu::{ensure, OptionExt, ResultExt};
use store_api::storage::{RegionId, RegionNumber};
use table::metadata::TableId;
use super::{DeserializedValueWithBytes, TableMetaValue};
-use crate::error::{Result, SerdeJsonSnafu, UnexpectedLogicalRouteTableSnafu};
+use crate::error::{
+ Result, SerdeJsonSnafu, TableRouteNotFoundSnafu, UnexpectedLogicalRouteTableSnafu,
+};
use crate::key::{to_removed_key, RegionDistribution, TableMetaKey, TABLE_ROUTE_PREFIX};
use crate::kv_backend::txn::{Compare, CompareOp, Txn, TxnOp, TxnOpResponse};
use crate::kv_backend::KvBackendRef;
@@ -334,6 +336,54 @@ impl TableRouteManager {
.transpose()
}
+ pub async fn get_physical_table_id(
+ &self,
+ logical_or_physical_table_id: TableId,
+ ) -> Result<TableId> {
+ let table_route = self
+ .get(logical_or_physical_table_id)
+ .await?
+ .context(TableRouteNotFoundSnafu {
+ table_id: logical_or_physical_table_id,
+ })?
+ .into_inner();
+
+ match table_route {
+ TableRouteValue::Physical(_) => Ok(logical_or_physical_table_id),
+ TableRouteValue::Logical(x) => Ok(x.physical_table_id()),
+ }
+ }
+
+ pub async fn get_physical_table_route(
+ &self,
+ logical_or_physical_table_id: TableId,
+ ) -> Result<(TableId, PhysicalTableRouteValue)> {
+ let table_route = self
+ .get(logical_or_physical_table_id)
+ .await?
+ .context(TableRouteNotFoundSnafu {
+ table_id: logical_or_physical_table_id,
+ })?
+ .into_inner();
+
+ match table_route {
+ TableRouteValue::Physical(x) => Ok((logical_or_physical_table_id, x)),
+ TableRouteValue::Logical(x) => {
+ let physical_table_id = x.physical_table_id();
+ let physical_table_route =
+ self.get(physical_table_id)
+ .await?
+ .context(TableRouteNotFoundSnafu {
+ table_id: physical_table_id,
+ })?;
+ Ok((
+ physical_table_id,
+ physical_table_route.physical_table_route().clone(),
+ ))
+ }
+ }
+ }
+
/// It may return a subset of the `table_ids`.
pub async fn batch_get(
&self,
diff --git a/src/meta-srv/src/procedure/tests.rs b/src/meta-srv/src/procedure/tests.rs
index 9ffad3aa6cf9..d042cdc37378 100644
--- a/src/meta-srv/src/procedure/tests.rs
+++ b/src/meta-srv/src/procedure/tests.rs
@@ -313,6 +313,7 @@ fn test_create_alter_region_request() {
1,
alter_table_task,
DeserializedValueWithBytes::from_inner(TableInfoValue::new(test_data::new_table_info())),
+ None,
test_data::new_ddl_context(Arc::new(DatanodeClients::default())),
)
.unwrap();
@@ -383,6 +384,7 @@ async fn test_submit_alter_region_requests() {
1,
alter_table_task,
DeserializedValueWithBytes::from_inner(TableInfoValue::new(table_info)),
+ None,
context,
)
.unwrap();
diff --git a/src/partition/src/manager.rs b/src/partition/src/manager.rs
index 5b73ee8fedf7..4e424b595a0e 100644
--- a/src/partition/src/manager.rs
+++ b/src/partition/src/manager.rs
@@ -16,7 +16,7 @@ use std::collections::{HashMap, HashSet};
use std::sync::Arc;
use api::v1::Rows;
-use common_meta::key::table_route::{TableRouteManager, TableRouteValue};
+use common_meta::key::table_route::TableRouteManager;
use common_meta::kv_backend::KvBackendRef;
use common_meta::peer::Peer;
use common_meta::rpc::router;
@@ -29,7 +29,7 @@ use store_api::storage::{RegionId, RegionNumber};
use table::metadata::TableId;
use crate::columns::RangeColumnsPartitionRule;
-use crate::error::{FindLeaderSnafu, InvalidTableRouteDataSnafu, Result};
+use crate::error::{FindLeaderSnafu, Result};
use crate::partition::{PartitionBound, PartitionDef, PartitionExpr};
use crate::range::RangePartitionRule;
use crate::splitter::RowSplitter;
@@ -65,38 +65,13 @@ impl PartitionRuleManager {
}
}
- /// Find table route of given table name.
- async fn find_table_route(&self, table_id: TableId) -> Result<TableRouteValue> {
- let route = self
+ async fn find_region_routes(&self, table_id: TableId) -> Result<Vec<RegionRoute>> {
+ let (_, route) = self
.table_route_manager
- .get(table_id)
+ .get_physical_table_route(table_id)
.await
- .context(error::TableRouteManagerSnafu)?
- .context(error::FindTableRoutesSnafu { table_id })?
- .into_inner();
- Ok(route)
- }
-
- async fn find_region_routes(&self, table_id: TableId) -> Result<Vec<RegionRoute>> {
- let table_route = self.find_table_route(table_id).await?;
-
- let region_routes = match table_route {
- TableRouteValue::Physical(x) => x.region_routes,
-
- TableRouteValue::Logical(x) => {
- let TableRouteValue::Physical(physical_table_route) =
- self.find_table_route(x.physical_table_id()).await?
- else {
- return InvalidTableRouteDataSnafu {
- table_id: x.physical_table_id(),
- err_msg: "expected to be a physical table route",
- }
- .fail();
- };
- physical_table_route.region_routes
- }
- };
- Ok(region_routes)
+ .context(error::TableRouteManagerSnafu)?;
+ Ok(route.region_routes)
}
pub async fn find_table_partitions(&self, table_id: TableId) -> Result<Vec<PartitionInfo>> {
|
feat
|
metric engine support alter (#3098)
|
4811fe83f5e408d113a2da37bdec31f28536e486
|
2024-07-08 18:04:35
|
Zhenchi
|
fix: test_fulltext_intm_path (#4314)
| false
|
diff --git a/src/mito2/src/sst/index/intermediate.rs b/src/mito2/src/sst/index/intermediate.rs
index 6464b97eff93..02095eda348b 100644
--- a/src/mito2/src/sst/index/intermediate.rs
+++ b/src/mito2/src/sst/index/intermediate.rs
@@ -84,7 +84,6 @@ impl IntermediateManager {
#[derive(Debug, Clone)]
pub struct IntermediateLocation {
files_dir: String,
- sst_dir: String,
}
impl IntermediateLocation {
@@ -97,13 +96,12 @@ impl IntermediateLocation {
let uuid = Uuid::new_v4();
Self {
files_dir: format!("{INTERMEDIATE_DIR}/{region_id}/{sst_file_id}/{uuid}/"),
- sst_dir: format!("{INTERMEDIATE_DIR}/{region_id}/{sst_file_id}/"),
}
}
/// Returns the directory to clean up when the sorting is done
pub fn dir_to_cleanup(&self) -> &str {
- &self.sst_dir
+ &self.files_dir
}
/// Returns the path of the directory for intermediate files associated with a column:
@@ -121,6 +119,8 @@ impl IntermediateLocation {
#[cfg(test)]
mod tests {
+ use std::ffi::OsStr;
+
use common_test_util::temp_dir;
use regex::Regex;
@@ -152,11 +152,6 @@ mod tests {
let sst_file_id = FileId::random();
let location = IntermediateLocation::new(&RegionId::new(0, 0), &sst_file_id);
- assert_eq!(
- location.dir_to_cleanup(),
- format!("{INTERMEDIATE_DIR}/0/{sst_file_id}/")
- );
-
let re = Regex::new(&format!(
"{INTERMEDIATE_DIR}/0/{sst_file_id}/{}/",
r"\w{8}-\w{4}-\w{4}-\w{4}-\w{12}"
@@ -187,21 +182,19 @@ mod tests {
let manager = IntermediateManager::init_fs(&aux_path).await.unwrap();
let region_id = RegionId::new(0, 0);
let sst_file_id = FileId::random();
- let column_id = 0;
+ let column_id = 1;
let fulltext_path = manager.fulltext_path(®ion_id, &sst_file_id, column_id);
- if cfg!(windows) {
- let p = fulltext_path.to_string_lossy().to_string();
- let r = Regex::new(&format!(
- "{aux_path}\\\\{INTERMEDIATE_DIR}\\\\0\\\\{sst_file_id}\\\\fulltext-0-\\w{{8}}-\\w{{4}}-\\w{{4}}-\\w{{4}}-\\w{{12}}",
- )).unwrap();
- assert!(r.is_match(&p));
- } else {
- let p = fulltext_path.to_string_lossy().to_string();
- let r = Regex::new(&format!(
- "{aux_path}/{INTERMEDIATE_DIR}/0/{sst_file_id}/fulltext-0-\\w{{8}}-\\w{{4}}-\\w{{4}}-\\w{{4}}-\\w{{12}}",
- )).unwrap();
- assert!(r.is_match(&p));
+ let mut pi = fulltext_path.iter();
+ for a in temp_dir.path().iter() {
+ assert_eq!(a, pi.next().unwrap());
}
+ assert_eq!(pi.next().unwrap(), INTERMEDIATE_DIR);
+ assert_eq!(pi.next().unwrap(), "0"); // region id
+ assert_eq!(pi.next().unwrap(), OsStr::new(&sst_file_id.to_string())); // sst file id
+ assert!(Regex::new(r"fulltext-1-\w{8}-\w{4}-\w{4}-\w{4}-\w{12}")
+ .unwrap()
+ .is_match(&pi.next().unwrap().to_string_lossy())); // fulltext path
+ assert!(pi.next().is_none());
}
}
|
fix
|
test_fulltext_intm_path (#4314)
|
ea9af42091f056b01c03b445c91f7883954650bc
|
2022-12-21 17:02:30
|
LFC
|
chore: upgrade Rust to nightly 2022-12-20 (#772)
| false
|
diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml
index af5d54e1b4cc..81f6005e887a 100644
--- a/.github/workflows/coverage.yml
+++ b/.github/workflows/coverage.yml
@@ -24,7 +24,7 @@ on:
name: Code coverage
env:
- RUST_TOOLCHAIN: nightly-2022-07-14
+ RUST_TOOLCHAIN: nightly-2022-12-20
jobs:
coverage:
diff --git a/.github/workflows/develop.yml b/.github/workflows/develop.yml
index 2cba1fa5d238..b877df919dcd 100644
--- a/.github/workflows/develop.yml
+++ b/.github/workflows/develop.yml
@@ -23,7 +23,7 @@ on:
name: CI
env:
- RUST_TOOLCHAIN: nightly-2022-07-14
+ RUST_TOOLCHAIN: nightly-2022-12-20
jobs:
typos:
diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml
index 5a98be220162..8a1433db6fd1 100644
--- a/.github/workflows/release.yml
+++ b/.github/workflows/release.yml
@@ -10,7 +10,7 @@ on:
name: Release
env:
- RUST_TOOLCHAIN: nightly-2022-07-14
+ RUST_TOOLCHAIN: nightly-2022-12-20
# FIXME(zyy17): Would be better to use `gh release list -L 1 | cut -f 3` to get the latest release version tag, but for a long time, we will stay at 'v0.1.0-alpha-*'.
SCHEDULED_BUILD_VERSION_PREFIX: v0.1.0-alpha
diff --git a/benchmarks/src/bin/nyc-taxi.rs b/benchmarks/src/bin/nyc-taxi.rs
index 0d097fb3a05b..885f1827e150 100644
--- a/benchmarks/src/bin/nyc-taxi.rs
+++ b/benchmarks/src/bin/nyc-taxi.rs
@@ -15,7 +15,6 @@
//! Use the taxi trip records from New York City dataset to bench. You can download the dataset from
//! [here](https://www1.nyc.gov/site/tlc/about/tlc-trip-record-data.page).
-#![feature(once_cell)]
#![allow(clippy::print_stdout)]
use std::collections::HashMap;
@@ -94,7 +93,7 @@ async fn write_data(
.unwrap();
let progress_bar = mpb.add(ProgressBar::new(row_num as _));
progress_bar.set_style(pb_style);
- progress_bar.set_message(format!("{:?}", path));
+ progress_bar.set_message(format!("{path:?}"));
let mut total_rpc_elapsed_ms = 0;
@@ -115,10 +114,7 @@ async fn write_data(
progress_bar.inc(row_count as _);
}
- progress_bar.finish_with_message(format!(
- "file {:?} done in {}ms",
- path, total_rpc_elapsed_ms
- ));
+ progress_bar.finish_with_message(format!("file {path:?} done in {total_rpc_elapsed_ms}ms",));
total_rpc_elapsed_ms
}
@@ -355,12 +351,12 @@ fn query_set() -> HashMap<String, String> {
ret.insert(
"count_all".to_string(),
- format!("SELECT COUNT(*) FROM {};", TABLE_NAME),
+ format!("SELECT COUNT(*) FROM {TABLE_NAME};"),
);
ret.insert(
"fare_amt_by_passenger".to_string(),
- format!("SELECT passenger_count, MIN(fare_amount), MAX(fare_amount), SUM(fare_amount) FROM {} GROUP BY passenger_count",TABLE_NAME)
+ format!("SELECT passenger_count, MIN(fare_amount), MAX(fare_amount), SUM(fare_amount) FROM {TABLE_NAME} GROUP BY passenger_count")
);
ret
@@ -373,7 +369,7 @@ async fn do_write(args: &Args, client: &Client) {
let mut write_jobs = JoinSet::new();
let create_table_result = admin.create(create_table_expr()).await;
- println!("Create table result: {:?}", create_table_result);
+ println!("Create table result: {create_table_result:?}");
let progress_bar_style = ProgressStyle::with_template(
"[{elapsed_precise}] {bar:60.cyan/blue} {pos:>7}/{len:7} {msg}",
@@ -406,7 +402,7 @@ async fn do_write(args: &Args, client: &Client) {
async fn do_query(num_iter: usize, db: &Database) {
for (query_name, query) in query_set() {
- println!("Running query: {}", query);
+ println!("Running query: {query}");
for i in 0..num_iter {
let now = Instant::now();
let _res = db.select(Select::Sql(query.clone())).await.unwrap();
diff --git a/rust-toolchain b/rust-toolchain
index 746dd10072bf..14b94c3fefe7 100644
--- a/rust-toolchain
+++ b/rust-toolchain
@@ -1 +1 @@
-nightly-2022-07-14
+nightly-2022-12-20
diff --git a/src/catalog/src/helper.rs b/src/catalog/src/helper.rs
index 062d07bc1997..0993e865f69f 100644
--- a/src/catalog/src/helper.rs
+++ b/src/catalog/src/helper.rs
@@ -33,48 +33,38 @@ const ALPHANUMERICS_NAME_PATTERN: &str = "[a-zA-Z_][a-zA-Z0-9_]*";
lazy_static! {
static ref CATALOG_KEY_PATTERN: Regex = Regex::new(&format!(
- "^{}-({})$",
- CATALOG_KEY_PREFIX, ALPHANUMERICS_NAME_PATTERN
+ "^{CATALOG_KEY_PREFIX}-({ALPHANUMERICS_NAME_PATTERN})$"
))
.unwrap();
}
lazy_static! {
static ref SCHEMA_KEY_PATTERN: Regex = Regex::new(&format!(
- "^{}-({})-({})$",
- SCHEMA_KEY_PREFIX, ALPHANUMERICS_NAME_PATTERN, ALPHANUMERICS_NAME_PATTERN
+ "^{SCHEMA_KEY_PREFIX}-({ALPHANUMERICS_NAME_PATTERN})-({ALPHANUMERICS_NAME_PATTERN})$"
))
.unwrap();
}
lazy_static! {
static ref TABLE_GLOBAL_KEY_PATTERN: Regex = Regex::new(&format!(
- "^{}-({})-({})-({})$",
- TABLE_GLOBAL_KEY_PREFIX,
- ALPHANUMERICS_NAME_PATTERN,
- ALPHANUMERICS_NAME_PATTERN,
- ALPHANUMERICS_NAME_PATTERN
+ "^{TABLE_GLOBAL_KEY_PREFIX}-({ALPHANUMERICS_NAME_PATTERN})-({ALPHANUMERICS_NAME_PATTERN})-({ALPHANUMERICS_NAME_PATTERN})$"
))
.unwrap();
}
lazy_static! {
static ref TABLE_REGIONAL_KEY_PATTERN: Regex = Regex::new(&format!(
- "^{}-({})-({})-({})-([0-9]+)$",
- TABLE_REGIONAL_KEY_PREFIX,
- ALPHANUMERICS_NAME_PATTERN,
- ALPHANUMERICS_NAME_PATTERN,
- ALPHANUMERICS_NAME_PATTERN
+ "^{TABLE_REGIONAL_KEY_PREFIX}-({ALPHANUMERICS_NAME_PATTERN})-({ALPHANUMERICS_NAME_PATTERN})-({ALPHANUMERICS_NAME_PATTERN})-([0-9]+)$"
))
.unwrap();
}
pub fn build_catalog_prefix() -> String {
- format!("{}-", CATALOG_KEY_PREFIX)
+ format!("{CATALOG_KEY_PREFIX}-")
}
pub fn build_schema_prefix(catalog_name: impl AsRef<str>) -> String {
- format!("{}-{}-", SCHEMA_KEY_PREFIX, catalog_name.as_ref())
+ format!("{SCHEMA_KEY_PREFIX}-{}-", catalog_name.as_ref())
}
pub fn build_table_global_prefix(
@@ -82,8 +72,7 @@ pub fn build_table_global_prefix(
schema_name: impl AsRef<str>,
) -> String {
format!(
- "{}-{}-{}-",
- TABLE_GLOBAL_KEY_PREFIX,
+ "{TABLE_GLOBAL_KEY_PREFIX}-{}-{}-",
catalog_name.as_ref(),
schema_name.as_ref()
)
@@ -378,7 +367,7 @@ mod tests {
table_info,
};
let serialized = serde_json::to_string(&value).unwrap();
- let deserialized = TableGlobalValue::parse(&serialized).unwrap();
+ let deserialized = TableGlobalValue::parse(serialized).unwrap();
assert_eq!(value, deserialized);
}
}
diff --git a/src/catalog/src/lib.rs b/src/catalog/src/lib.rs
index d71a0c6d5b72..21d8efe65f1b 100644
--- a/src/catalog/src/lib.rs
+++ b/src/catalog/src/lib.rs
@@ -157,7 +157,7 @@ pub struct RegisterSchemaRequest {
/// Formats table fully-qualified name
pub fn format_full_table_name(catalog: &str, schema: &str, table: &str) -> String {
- format!("{}.{}.{}", catalog, schema, table)
+ format!("{catalog}.{schema}.{table}")
}
pub trait CatalogProviderFactory {
@@ -187,8 +187,7 @@ pub(crate) async fn handle_system_table_request<'a, M: CatalogManager>(
.await
.with_context(|_| CreateTableSnafu {
table_info: format!(
- "{}.{}.{}, id: {}",
- catalog_name, schema_name, table_name, table_id,
+ "{catalog_name}.{schema_name}.{table_name}, id: {table_id}",
),
})?;
manager
@@ -200,7 +199,7 @@ pub(crate) async fn handle_system_table_request<'a, M: CatalogManager>(
table: table.clone(),
})
.await?;
- info!("Created and registered system table: {}", table_name);
+ info!("Created and registered system table: {table_name}");
table
};
if let Some(hook) = req.open_hook {
diff --git a/src/catalog/src/local/manager.rs b/src/catalog/src/local/manager.rs
index e4c89933e04d..1a1f1a995a29 100644
--- a/src/catalog/src/local/manager.rs
+++ b/src/catalog/src/local/manager.rs
@@ -338,7 +338,7 @@ impl CatalogManager for LocalCatalogManager {
let schema = catalog
.schema(schema_name)?
.with_context(|| SchemaNotFoundSnafu {
- schema_info: format!("{}.{}", catalog_name, schema_name),
+ schema_info: format!("{catalog_name}.{schema_name}"),
})?;
{
@@ -452,7 +452,7 @@ impl CatalogManager for LocalCatalogManager {
let schema = catalog
.schema(schema_name)?
.with_context(|| SchemaNotFoundSnafu {
- schema_info: format!("{}.{}", catalog_name, schema_name),
+ schema_info: format!("{catalog_name}.{schema_name}"),
})?;
schema.table(table_name)
}
diff --git a/src/catalog/src/remote/manager.rs b/src/catalog/src/remote/manager.rs
index c37acdc303be..c18a079c84bd 100644
--- a/src/catalog/src/remote/manager.rs
+++ b/src/catalog/src/remote/manager.rs
@@ -331,10 +331,7 @@ impl RemoteCatalogManager {
.open_table(&context, request)
.await
.with_context(|_| OpenTableSnafu {
- table_info: format!(
- "{}.{}.{}, id:{}",
- catalog_name, schema_name, table_name, table_id
- ),
+ table_info: format!("{catalog_name}.{schema_name}.{table_name}, id:{table_id}"),
})? {
Some(table) => {
info!(
@@ -355,7 +352,7 @@ impl RemoteCatalogManager {
.clone()
.try_into()
.context(InvalidTableSchemaSnafu {
- table_info: format!("{}.{}.{}", catalog_name, schema_name, table_name,),
+ table_info: format!("{catalog_name}.{schema_name}.{table_name}"),
schema: meta.schema.clone(),
})?;
let req = CreateTableRequest {
@@ -477,7 +474,7 @@ impl CatalogManager for RemoteCatalogManager {
let schema = catalog
.schema(schema_name)?
.with_context(|| SchemaNotFoundSnafu {
- schema_info: format!("{}.{}", catalog_name, schema_name),
+ schema_info: format!("{catalog_name}.{schema_name}"),
})?;
schema.table(table_name)
}
diff --git a/src/catalog/src/system.rs b/src/catalog/src/system.rs
index a845c081438b..df39d3a5ab13 100644
--- a/src/catalog/src/system.rs
+++ b/src/catalog/src/system.rs
@@ -197,7 +197,7 @@ pub fn build_table_insert_request(full_table_name: String, table_id: TableId) ->
}
pub fn build_schema_insert_request(catalog_name: String, schema_name: String) -> InsertRequest {
- let full_schema_name = format!("{}.{}", catalog_name, schema_name);
+ let full_schema_name = format!("{catalog_name}.{schema_name}");
build_insert_request(
EntryType::Schema,
full_schema_name.as_bytes(),
@@ -390,7 +390,7 @@ mod tests {
if let Entry::Catalog(e) = entry {
assert_eq!("some_catalog", e.catalog_name);
} else {
- panic!("Unexpected type: {:?}", entry);
+ panic!("Unexpected type: {entry:?}");
}
}
@@ -407,7 +407,7 @@ mod tests {
assert_eq!("some_catalog", e.catalog_name);
assert_eq!("some_schema", e.schema_name);
} else {
- panic!("Unexpected type: {:?}", entry);
+ panic!("Unexpected type: {entry:?}");
}
}
@@ -426,7 +426,7 @@ mod tests {
assert_eq!("some_table", e.table_name);
assert_eq!(42, e.table_id);
} else {
- panic!("Unexpected type: {:?}", entry);
+ panic!("Unexpected type: {entry:?}");
}
}
diff --git a/src/catalog/tests/local_catalog_tests.rs b/src/catalog/tests/local_catalog_tests.rs
index 2e577540778a..e58722bac4c5 100644
--- a/src/catalog/tests/local_catalog_tests.rs
+++ b/src/catalog/tests/local_catalog_tests.rs
@@ -69,8 +69,7 @@ mod tests {
assert!(
err.to_string()
.contains("Table `greptime.public.test_table` already exists"),
- "Actual error message: {}",
- err
+ "Actual error message: {err}",
);
}
diff --git a/src/catalog/tests/mock.rs b/src/catalog/tests/mock.rs
index 01aec6e2f883..336f41ba0a2a 100644
--- a/src/catalog/tests/mock.rs
+++ b/src/catalog/tests/mock.rs
@@ -189,10 +189,10 @@ impl TableEngine for MockTableEngine {
unimplemented!()
}
- fn get_table<'a>(
+ fn get_table(
&self,
_ctx: &EngineContext,
- table_ref: &'a TableReference,
+ table_ref: &TableReference,
) -> table::Result<Option<TableRef>> {
futures::executor::block_on(async {
Ok(self
@@ -204,7 +204,7 @@ impl TableEngine for MockTableEngine {
})
}
- fn table_exists<'a>(&self, _ctx: &EngineContext, table_ref: &'a TableReference) -> bool {
+ fn table_exists(&self, _ctx: &EngineContext, table_ref: &TableReference) -> bool {
futures::executor::block_on(async {
self.tables
.read()
diff --git a/src/cmd/src/standalone.rs b/src/cmd/src/standalone.rs
index d4b65c3a85c7..e14f6f6e0a14 100644
--- a/src/cmd/src/standalone.rs
+++ b/src/cmd/src/standalone.rs
@@ -221,8 +221,7 @@ impl TryFrom<StartCommand> for FrontendOptions {
if addr == datanode_grpc_addr {
return IllegalConfigSnafu {
msg: format!(
- "gRPC listen address conflicts with datanode reserved gRPC addr: {}",
- datanode_grpc_addr
+ "gRPC listen address conflicts with datanode reserved gRPC addr: {datanode_grpc_addr}",
),
}
.fail();
diff --git a/src/common/error/src/ext.rs b/src/common/error/src/ext.rs
index 54b4343120f7..9d2b3fa0ae64 100644
--- a/src/common/error/src/ext.rs
+++ b/src/common/error/src/ext.rs
@@ -131,7 +131,7 @@ mod tests {
assert!(ErrorCompat::backtrace(&err).is_some());
- let msg = format!("{:?}", err);
+ let msg = format!("{err:?}");
assert!(msg.contains("\nBacktrace:\n"));
let fmt_msg = format!("{:?}", DebugFormat::new(&err));
assert_eq!(msg, fmt_msg);
@@ -151,7 +151,7 @@ mod tests {
assert!(err.as_any().downcast_ref::<MockError>().is_some());
assert!(err.source().is_some());
- let msg = format!("{:?}", err);
+ let msg = format!("{err:?}");
assert!(msg.contains("\nBacktrace:\n"));
assert!(msg.contains("Caused by"));
diff --git a/src/common/error/src/format.rs b/src/common/error/src/format.rs
index 87d8171ce0c5..40c077ca3d5c 100644
--- a/src/common/error/src/format.rs
+++ b/src/common/error/src/format.rs
@@ -31,11 +31,11 @@ impl<'a, E: ErrorExt + ?Sized> fmt::Debug for DebugFormat<'a, E> {
write!(f, "{}.", self.0)?;
if let Some(source) = self.0.source() {
// Source error use debug format for more verbose info.
- write!(f, " Caused by: {:?}", source)?;
+ write!(f, " Caused by: {source:?}")?;
}
if let Some(backtrace) = self.0.backtrace_opt() {
// Add a newline to separate causes and backtrace.
- write!(f, "\nBacktrace:\n{}", backtrace)?;
+ write!(f, "\nBacktrace:\n{backtrace}")?;
}
Ok(())
diff --git a/src/common/error/src/status_code.rs b/src/common/error/src/status_code.rs
index 8f8a576e8d44..3e5fec37aca3 100644
--- a/src/common/error/src/status_code.rs
+++ b/src/common/error/src/status_code.rs
@@ -87,7 +87,7 @@ impl StatusCode {
impl fmt::Display for StatusCode {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
// The current debug format is suitable to display.
- write!(f, "{:?}", self)
+ write!(f, "{self:?}")
}
}
@@ -96,7 +96,7 @@ mod tests {
use super::*;
fn assert_status_code_display(code: StatusCode, msg: &str) {
- let code_msg = format!("{}", code);
+ let code_msg = format!("{code}");
assert_eq!(msg, code_msg);
}
diff --git a/src/common/function/src/scalars/numpy/interp.rs b/src/common/function/src/scalars/numpy/interp.rs
index c4bb6e981103..c4dbadc99914 100644
--- a/src/common/function/src/scalars/numpy/interp.rs
+++ b/src/common/function/src/scalars/numpy/interp.rs
@@ -343,7 +343,7 @@ mod tests {
Arc::new(Int64Vector::from_vec(fp.clone())),
];
let vector = interp(&args).unwrap();
- assert!(matches!(vector.get(0), Value::Float64(v) if v==x[0] as f64));
+ assert!(matches!(vector.get(0), Value::Float64(v) if v == x[0]));
// x=None output:Null
let input = vec![None, Some(0.0), Some(0.3)];
diff --git a/src/common/function/src/scalars/udf.rs b/src/common/function/src/scalars/udf.rs
index f6a7dcee874d..38812f695e70 100644
--- a/src/common/function/src/scalars/udf.rs
+++ b/src/common/function/src/scalars/udf.rs
@@ -127,12 +127,7 @@ mod tests {
assert_eq!(4, vec.len());
for i in 0..4 {
- assert_eq!(
- i == 0 || i == 3,
- vec.get_data(i).unwrap(),
- "failed at {}",
- i
- )
+ assert_eq!(i == 0 || i == 3, vec.get_data(i).unwrap(), "Failed at {i}",)
}
}
_ => unreachable!(),
diff --git a/src/common/grpc-expr/src/alter.rs b/src/common/grpc-expr/src/alter.rs
index 8f43932c35a0..45c9e710d1d7 100644
--- a/src/common/grpc-expr/src/alter.rs
+++ b/src/common/grpc-expr/src/alter.rs
@@ -106,7 +106,7 @@ pub fn create_table_schema(expr: &CreateTableExpr) -> Result<SchemaRef> {
.iter()
.any(|column| column.name == expr.time_index),
MissingTimestampColumnSnafu {
- msg: format!("CreateExpr: {:?}", expr)
+ msg: format!("CreateExpr: {expr:?}")
}
);
diff --git a/src/common/grpc-expr/src/insert.rs b/src/common/grpc-expr/src/insert.rs
index a8273c62b686..169574b4446c 100644
--- a/src/common/grpc-expr/src/insert.rs
+++ b/src/common/grpc-expr/src/insert.rs
@@ -154,7 +154,7 @@ fn collect_column_values(column_datatype: ColumnDataType, values: &Values) -> Ve
collect_values!(values.i32_values, |v| ValueRef::from(*v))
}
ColumnDataType::Int64 => {
- collect_values!(values.i64_values, |v| ValueRef::from(*v as i64))
+ collect_values!(values.i64_values, |v| ValueRef::from(*v))
}
ColumnDataType::Uint8 => {
collect_values!(values.u8_values, |v| ValueRef::from(*v as u8))
@@ -166,7 +166,7 @@ fn collect_column_values(column_datatype: ColumnDataType, values: &Values) -> Ve
collect_values!(values.u32_values, |v| ValueRef::from(*v))
}
ColumnDataType::Uint64 => {
- collect_values!(values.u64_values, |v| ValueRef::from(*v as u64))
+ collect_values!(values.u64_values, |v| ValueRef::from(*v))
}
ColumnDataType::Float32 => collect_values!(values.f32_values, |v| ValueRef::from(*v)),
ColumnDataType::Float64 => collect_values!(values.f64_values, |v| ValueRef::from(*v)),
diff --git a/src/common/grpc-expr/src/lib.rs b/src/common/grpc-expr/src/lib.rs
index 71786d670f92..f296966bb315 100644
--- a/src/common/grpc-expr/src/lib.rs
+++ b/src/common/grpc-expr/src/lib.rs
@@ -1,4 +1,3 @@
-#![feature(assert_matches)]
// Copyright 2022 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
diff --git a/src/common/grpc/benches/channel_manager.rs b/src/common/grpc/benches/channel_manager.rs
index 0b95dcddd032..1b2a917b5693 100644
--- a/src/common/grpc/benches/channel_manager.rs
+++ b/src/common/grpc/benches/channel_manager.rs
@@ -26,7 +26,7 @@ async fn do_bench_channel_manager() {
let join = tokio::spawn(async move {
for _ in 0..10000 {
let idx = rand::random::<usize>() % 100;
- let ret = m_clone.get(format!("{}", idx));
+ let ret = m_clone.get(format!("{idx}"));
assert!(ret.is_ok());
}
});
diff --git a/src/common/grpc/src/channel_manager.rs b/src/common/grpc/src/channel_manager.rs
index 7bb87872f862..42f5dea47df4 100644
--- a/src/common/grpc/src/channel_manager.rs
+++ b/src/common/grpc/src/channel_manager.rs
@@ -120,7 +120,7 @@ impl ChannelManager {
fn build_endpoint(&self, addr: &str) -> Result<Endpoint> {
let mut endpoint =
- Endpoint::new(format!("http://{}", addr)).context(error::CreateChannelSnafu)?;
+ Endpoint::new(format!("http://{addr}")).context(error::CreateChannelSnafu)?;
if let Some(dur) = self.config.timeout {
endpoint = endpoint.timeout(dur);
diff --git a/src/common/query/src/function.rs b/src/common/query/src/function.rs
index 01bfffce7fa0..cde2e25f1490 100644
--- a/src/common/query/src/function.rs
+++ b/src/common/query/src/function.rs
@@ -161,12 +161,7 @@ mod tests {
assert_eq!(4, vec.len());
for i in 0..4 {
- assert_eq!(
- i == 0 || i == 3,
- vec.get_data(i).unwrap(),
- "failed at {}",
- i
- )
+ assert_eq!(i == 0 || i == 3, vec.get_data(i).unwrap(), "Failed at {i}")
}
}
_ => unreachable!(),
diff --git a/src/common/query/src/logical_plan/accumulator.rs b/src/common/query/src/logical_plan/accumulator.rs
index 4b83a7efa8cd..7ef476d2b964 100644
--- a/src/common/query/src/logical_plan/accumulator.rs
+++ b/src/common/query/src/logical_plan/accumulator.rs
@@ -131,7 +131,7 @@ impl DfAccumulator for DfAccumulatorAdaptor {
let state_types = self.creator.state_types()?;
if state_values.len() != state_types.len() {
return error::BadAccumulatorImplSnafu {
- err_msg: format!("Accumulator {:?} returned state values size do not match its state types size.", self),
+ err_msg: format!("Accumulator {self:?} returned state values size do not match its state types size."),
}
.fail()?;
}
diff --git a/src/common/recordbatch/src/lib.rs b/src/common/recordbatch/src/lib.rs
index be96a94a50d8..4fcb69656d70 100644
--- a/src/common/recordbatch/src/lib.rs
+++ b/src/common/recordbatch/src/lib.rs
@@ -231,8 +231,7 @@ mod tests {
assert_eq!(
result.unwrap_err().to_string(),
format!(
- "Failed to create RecordBatches, reason: expect RecordBatch schema equals {:?}, actual: {:?}",
- schema1, schema2
+ "Failed to create RecordBatches, reason: expect RecordBatch schema equals {schema1:?}, actual: {schema2:?}",
)
);
diff --git a/src/common/substrait/src/df_expr.rs b/src/common/substrait/src/df_expr.rs
index b8d77a113c7c..88a35261d8bf 100644
--- a/src/common/substrait/src/df_expr.rs
+++ b/src/common/substrait/src/df_expr.rs
@@ -61,7 +61,7 @@ pub(crate) fn to_df_expr(
| RexType::Cast(_)
| RexType::Subquery(_)
| RexType::Enum(_) => UnsupportedExprSnafu {
- name: format!("substrait expression {:?}", expr_rex_type),
+ name: format!("substrait expression {expr_rex_type:?}"),
}
.fail()?,
}
@@ -109,7 +109,7 @@ pub fn convert_scalar_function(
let fn_name = ctx
.find_scalar_fn(anchor)
.with_context(|| InvalidParametersSnafu {
- reason: format!("Unregistered scalar function reference: {}", anchor),
+ reason: format!("Unregistered scalar function reference: {anchor}"),
})?;
// convenient util
@@ -435,7 +435,7 @@ pub fn convert_scalar_function(
// skip Wildcard, unimplemented.
// end other direct expr
_ => UnsupportedExprSnafu {
- name: format!("scalar function {}", fn_name),
+ name: format!("scalar function {fn_name}"),
}
.fail()?,
};
@@ -595,8 +595,8 @@ pub fn convert_column(column: &Column, schema: &Schema) -> Result<FieldReference
schema
.column_index_by_name(column_name)
.with_context(|| MissingFieldSnafu {
- field: format!("{:?}", column),
- plan: format!("schema: {:?}", schema),
+ field: format!("{column:?}"),
+ plan: format!("schema: {schema:?}"),
})?;
Ok(FieldReference {
diff --git a/src/common/substrait/src/df_logical.rs b/src/common/substrait/src/df_logical.rs
index c1323abc66c2..e51b25093ef3 100644
--- a/src/common/substrait/src/df_logical.rs
+++ b/src/common/substrait/src/df_logical.rs
@@ -236,7 +236,7 @@ impl DFLogicalSubstraitConvertor {
.map_err(BoxedError::new)
.context(InternalSnafu)?
.context(TableNotFoundSnafu {
- name: format!("{}.{}.{}", catalog_name, schema_name, table_name),
+ name: format!("{catalog_name}.{schema_name}.{table_name}"),
})?;
let adapter = Arc::new(DefaultTableSource::new(Arc::new(
DfTableProviderAdapter::new(table_ref),
@@ -281,7 +281,7 @@ impl DFLogicalSubstraitConvertor {
// TODO(ruihang): Support limit(fetch)
Ok(LogicalPlan::TableScan(TableScan {
- table_name: format!("{}.{}.{}", catalog_name, schema_name, table_name),
+ table_name: format!("{catalog_name}.{schema_name}.{table_name}"),
source: adapter,
projection,
projected_schema,
@@ -397,8 +397,7 @@ impl DFLogicalSubstraitConvertor {
| LogicalPlan::Analyze(_)
| LogicalPlan::Extension(_) => InvalidParametersSnafu {
reason: format!(
- "Trying to convert DDL/DML plan to substrait proto, plan: {:?}",
- plan
+ "Trying to convert DDL/DML plan to substrait proto, plan: {plan:?}",
),
}
.fail()?,
@@ -572,7 +571,7 @@ mod test {
let proto = convertor.encode(plan.clone()).unwrap();
let tripped_plan = convertor.decode(proto, catalog).unwrap();
- assert_eq!(format!("{:?}", plan), format!("{:?}", tripped_plan));
+ assert_eq!(format!("{plan:?}"), format!("{tripped_plan:?}"));
}
#[tokio::test]
@@ -606,8 +605,7 @@ mod test {
let table_scan_plan = LogicalPlan::TableScan(TableScan {
table_name: format!(
- "{}.{}.{}",
- DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, DEFAULT_TABLE_NAME
+ "{DEFAULT_CATALOG_NAME}.{DEFAULT_SCHEMA_NAME}.{DEFAULT_TABLE_NAME}",
),
source: adapter,
projection: Some(projection),
diff --git a/src/common/substrait/src/types.rs b/src/common/substrait/src/types.rs
index d1033c7a3e6b..31de2a16a19a 100644
--- a/src/common/substrait/src/types.rs
+++ b/src/common/substrait/src/types.rs
@@ -87,7 +87,7 @@ pub fn to_concrete_type(ty: &SType) -> Result<(ConcreteDataType, bool)> {
| Kind::List(_)
| Kind::Map(_)
| Kind::UserDefinedTypeReference(_) => UnsupportedSubstraitTypeSnafu {
- ty: format!("{:?}", kind),
+ ty: format!("{kind:?}"),
}
.fail(),
}
@@ -154,7 +154,7 @@ pub(crate) fn scalar_value_as_literal_type(v: &ScalarValue) -> Result<LiteralTyp
// TODO(LFC): Implement other conversions: ScalarValue => LiteralType
_ => {
return error::UnsupportedExprSnafu {
- name: format!("{:?}", v),
+ name: format!("{v:?}"),
}
.fail()
}
@@ -177,7 +177,7 @@ pub(crate) fn literal_type_to_scalar_value(t: LiteralType) -> Result<ScalarValue
// TODO(LFC): Implement other conversions: Kind => ScalarValue
_ => {
return error::UnsupportedSubstraitTypeSnafu {
- ty: format!("{:?}", kind),
+ ty: format!("{kind:?}"),
}
.fail()
}
@@ -194,7 +194,7 @@ pub(crate) fn literal_type_to_scalar_value(t: LiteralType) -> Result<ScalarValue
// TODO(LFC): Implement other conversions: LiteralType => ScalarValue
_ => {
return error::UnsupportedSubstraitTypeSnafu {
- ty: format!("{:?}", t),
+ ty: format!("{t:?}"),
}
.fail()
}
diff --git a/src/common/telemetry/src/panic_hook.rs b/src/common/telemetry/src/panic_hook.rs
index e29a832637da..ef2c2e639b12 100644
--- a/src/common/telemetry/src/panic_hook.rs
+++ b/src/common/telemetry/src/panic_hook.rs
@@ -28,7 +28,7 @@ pub fn set_panic_hook() {
let default_hook = panic::take_hook();
panic::set_hook(Box::new(move |panic| {
let backtrace = Backtrace::new();
- let backtrace = format!("{:?}", backtrace);
+ let backtrace = format!("{backtrace:?}");
if let Some(location) = panic.location() {
tracing::error!(
message = %panic,
diff --git a/src/datanode/src/error.rs b/src/datanode/src/error.rs
index 1597ac73886c..6115bfa60aed 100644
--- a/src/datanode/src/error.rs
+++ b/src/datanode/src/error.rs
@@ -155,7 +155,7 @@ pub enum Error {
#[snafu(display("Failed to init backend, config: {:#?}, source: {}", config, source))]
InitBackend {
- config: ObjectStoreConfig,
+ config: Box<ObjectStoreConfig>,
source: object_store::Error,
backtrace: Backtrace,
},
diff --git a/src/datanode/src/instance.rs b/src/datanode/src/instance.rs
index 9d09e4724c43..697d9313013b 100644
--- a/src/datanode/src/instance.rs
+++ b/src/datanode/src/instance.rs
@@ -233,7 +233,7 @@ pub(crate) async fn new_fs_object_store(data_dir: &str) -> Result<ObjectStore> {
.context(error::CreateDirSnafu { dir: &data_dir })?;
info!("The file storage directory is: {}", &data_dir);
- let atomic_write_dir = format!("{}/.tmp/", data_dir);
+ let atomic_write_dir = format!("{data_dir}/.tmp/");
let accessor = FsBuilder::default()
.root(&data_dir)
diff --git a/src/datanode/src/instance/grpc.rs b/src/datanode/src/instance/grpc.rs
index 0817abcb966d..39c53e9c26f0 100644
--- a/src/datanode/src/instance/grpc.rs
+++ b/src/datanode/src/instance/grpc.rs
@@ -117,7 +117,7 @@ impl Instance {
}
Some(select_expr::Expr::LogicalPlan(plan)) => self.execute_logical(plan).await,
_ => UnsupportedExprSnafu {
- name: format!("{:?}", expr),
+ name: format!("{expr:?}"),
}
.fail(),
}
@@ -175,7 +175,7 @@ impl GrpcQueryHandler for Instance {
Some(object_expr::Expr::Select(select_expr)) => self.handle_select(select_expr).await,
other => {
return servers::error::NotSupportedSnafu {
- feat: format!("{:?}", other),
+ feat: format!("{other:?}"),
}
.fail();
}
@@ -200,7 +200,7 @@ impl GrpcAdminHandler for Instance {
}
other => {
return servers::error::NotSupportedSnafu {
- feat: format!("{:?}", other),
+ feat: format!("{other:?}"),
}
.fail();
}
diff --git a/src/datanode/src/instance/sql.rs b/src/datanode/src/instance/sql.rs
index 41269882ec2e..f238a0374557 100644
--- a/src/datanode/src/instance/sql.rs
+++ b/src/datanode/src/instance/sql.rs
@@ -187,8 +187,7 @@ fn table_idents_to_full_name(
)),
_ => error::InvalidSqlSnafu {
msg: format!(
- "expect table name to be <catalog>.<schema>.<table>, <schema>.<table> or <table>, actual: {}",
- obj_name
+ "expect table name to be <catalog>.<schema>.<table>, <schema>.<table> or <table>, actual: {obj_name}",
),
}.fail(),
}
diff --git a/src/datanode/src/server.rs b/src/datanode/src/server.rs
index 9be540d6922d..cbb247595550 100644
--- a/src/datanode/src/server.rs
+++ b/src/datanode/src/server.rs
@@ -40,7 +40,7 @@ impl Services {
pub async fn try_new(instance: InstanceRef, opts: &DatanodeOptions) -> Result<Self> {
let grpc_runtime = Arc::new(
RuntimeBuilder::default()
- .worker_threads(opts.rpc_runtime_size as usize)
+ .worker_threads(opts.rpc_runtime_size)
.thread_name("grpc-io-handlers")
.build()
.context(RuntimeResourceSnafu)?,
@@ -54,7 +54,7 @@ impl Services {
Mode::Distributed => {
let mysql_io_runtime = Arc::new(
RuntimeBuilder::default()
- .worker_threads(opts.mysql_runtime_size as usize)
+ .worker_threads(opts.mysql_runtime_size)
.thread_name("mysql-io-handlers")
.build()
.context(RuntimeResourceSnafu)?,
diff --git a/src/datanode/src/server/grpc.rs b/src/datanode/src/server/grpc.rs
index 3fa54f3b3970..8327b9e1d934 100644
--- a/src/datanode/src/server/grpc.rs
+++ b/src/datanode/src/server/grpc.rs
@@ -202,8 +202,7 @@ mod tests {
let err_msg = result.unwrap_err().to_string();
assert!(
err_msg.contains("Missing timestamp column"),
- "actual: {}",
- err_msg
+ "actual: {err_msg}",
);
}
diff --git a/src/datanode/src/sql.rs b/src/datanode/src/sql.rs
index b336ef81776a..aa41e8a25536 100644
--- a/src/datanode/src/sql.rs
+++ b/src/datanode/src/sql.rs
@@ -96,7 +96,7 @@ impl SqlHandler {
result
}
- pub(crate) fn get_table<'a>(&self, table_ref: &'a TableReference) -> Result<TableRef> {
+ pub(crate) fn get_table(&self, table_ref: &TableReference) -> Result<TableRef> {
self.table_engine
.get_table(&EngineContext::default(), table_ref)
.with_context(|_| GetTableSnafu {
diff --git a/src/datanode/src/sql/alter.rs b/src/datanode/src/sql/alter.rs
index 77fada09fd63..1fa48a3b1812 100644
--- a/src/datanode/src/sql/alter.rs
+++ b/src/datanode/src/sql/alter.rs
@@ -61,7 +61,7 @@ impl SqlHandler {
let alter_kind = match alter_table.alter_operation() {
AlterTableOperation::AddConstraint(table_constraint) => {
return error::InvalidSqlSnafu {
- msg: format!("unsupported table constraint {}", table_constraint),
+ msg: format!("unsupported table constraint {table_constraint}"),
}
.fail()
}
diff --git a/src/datanode/src/sql/create.rs b/src/datanode/src/sql/create.rs
index ac80338aa860..7e83907e1b9b 100644
--- a/src/datanode/src/sql/create.rs
+++ b/src/datanode/src/sql/create.rs
@@ -143,7 +143,7 @@ impl SqlHandler {
)?;
} else {
return error::InvalidSqlSnafu {
- msg: format!("Cannot recognize named UNIQUE constraint: {}", name),
+ msg: format!("Cannot recognize named UNIQUE constraint: {name}"),
}
.fail();
}
@@ -158,8 +158,7 @@ impl SqlHandler {
} else {
return error::InvalidSqlSnafu {
msg: format!(
- "Unrecognized non-primary unnamed UNIQUE constraint: {:?}",
- name
+ "Unrecognized non-primary unnamed UNIQUE constraint: {name:?}",
),
}
.fail();
@@ -167,7 +166,7 @@ impl SqlHandler {
}
_ => {
return ConstraintNotSupportedSnafu {
- constraint: format!("{:?}", c),
+ constraint: format!("{c:?}"),
}
.fail();
}
diff --git a/src/datanode/src/tests/instance_test.rs b/src/datanode/src/tests/instance_test.rs
index 26ba03da73ec..09771f5eda99 100644
--- a/src/datanode/src/tests/instance_test.rs
+++ b/src/datanode/src/tests/instance_test.rs
@@ -487,12 +487,11 @@ async fn test_insert_with_default_value_for_type(type_name: &str) {
let create_sql = format!(
r#"create table test_table(
host string,
- ts {} DEFAULT CURRENT_TIMESTAMP,
+ ts {type_name} DEFAULT CURRENT_TIMESTAMP,
cpu double default 0,
TIME INDEX (ts),
PRIMARY KEY(host)
) engine=mito with(regions=1);"#,
- type_name
);
let output = execute_sql(&instance, &create_sql).await;
assert!(matches!(output, Output::AffectedRows(1)));
diff --git a/src/datanode/src/tests/test_util.rs b/src/datanode/src/tests/test_util.rs
index a7cf8e1fe552..9fba0ff5f13e 100644
--- a/src/datanode/src/tests/test_util.rs
+++ b/src/datanode/src/tests/test_util.rs
@@ -40,8 +40,8 @@ pub struct TestGuard {
}
pub fn create_tmp_dir_and_datanode_opts(name: &str) -> (DatanodeOptions, TestGuard) {
- let wal_tmp_dir = TempDir::new(&format!("gt_wal_{}", name)).unwrap();
- let data_tmp_dir = TempDir::new(&format!("gt_data_{}", name)).unwrap();
+ let wal_tmp_dir = TempDir::new(&format!("gt_wal_{name}")).unwrap();
+ let data_tmp_dir = TempDir::new(&format!("gt_data_{name}")).unwrap();
let opts = DatanodeOptions {
wal_dir: wal_tmp_dir.path().to_str().unwrap().to_string(),
storage: ObjectStoreConfig::File {
diff --git a/src/datatypes/src/error.rs b/src/datatypes/src/error.rs
index 2cb8553a900d..ddb390a8a58a 100644
--- a/src/datatypes/src/error.rs
+++ b/src/datatypes/src/error.rs
@@ -139,7 +139,7 @@ mod tests {
map.insert(false, 2);
let result = serde_json::to_string(&map).context(SerializeSnafu);
- assert!(result.is_err(), "serialize result is: {:?}", result);
+ assert!(result.is_err(), "serialize result is: {result:?}");
let err = serde_json::to_string(&map)
.context(SerializeSnafu)
.err()
diff --git a/src/datatypes/src/lib.rs b/src/datatypes/src/lib.rs
index 3051c7a4b3e3..ff15b33c8965 100644
--- a/src/datatypes/src/lib.rs
+++ b/src/datatypes/src/lib.rs
@@ -12,9 +12,6 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-#![feature(generic_associated_types)]
-#![feature(assert_matches)]
-
pub mod arrow_array;
pub mod data_type;
pub mod error;
diff --git a/src/datatypes/src/schema/constraint.rs b/src/datatypes/src/schema/constraint.rs
index 4dd3ecc14b7f..f512f3190d63 100644
--- a/src/datatypes/src/schema/constraint.rs
+++ b/src/datatypes/src/schema/constraint.rs
@@ -57,8 +57,8 @@ impl TryFrom<ColumnDefaultConstraint> for Vec<u8> {
impl Display for ColumnDefaultConstraint {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
match self {
- ColumnDefaultConstraint::Function(expr) => write!(f, "{}", expr),
- ColumnDefaultConstraint::Value(v) => write!(f, "{}", v),
+ ColumnDefaultConstraint::Function(expr) => write!(f, "{expr}"),
+ ColumnDefaultConstraint::Value(v) => write!(f, "{v}"),
}
}
}
@@ -172,10 +172,7 @@ fn create_current_timestamp_vector(
std::iter::repeat(util::current_time_millis()).take(num_rows),
))),
_ => error::DefaultValueTypeSnafu {
- reason: format!(
- "Not support to assign current timestamp to {:?} type",
- data_type
- ),
+ reason: format!("Not support to assign current timestamp to {data_type:?} type",),
}
.fail(),
}
@@ -301,6 +298,6 @@ mod tests {
let err = constraint
.create_default_vector(&data_type, false, 4)
.unwrap_err();
- assert!(matches!(err, Error::DefaultValueType { .. }), "{:?}", err);
+ assert!(matches!(err, Error::DefaultValueType { .. }), "{err:?}");
}
}
diff --git a/src/datatypes/src/types/date_type.rs b/src/datatypes/src/types/date_type.rs
index afd482359d71..6b7bb788a3dd 100644
--- a/src/datatypes/src/types/date_type.rs
+++ b/src/datatypes/src/types/date_type.rs
@@ -83,7 +83,7 @@ impl LogicalPrimitiveType for DateType {
ValueRef::Null => Ok(None),
ValueRef::Date(v) => Ok(Some(v)),
other => error::CastTypeSnafu {
- msg: format!("Failed to cast value {:?} to Date", other,),
+ msg: format!("Failed to cast value {other:?} to Date"),
}
.fail(),
}
diff --git a/src/datatypes/src/types/datetime_type.rs b/src/datatypes/src/types/datetime_type.rs
index ccd810eee746..47e1183a3a4e 100644
--- a/src/datatypes/src/types/datetime_type.rs
+++ b/src/datatypes/src/types/datetime_type.rs
@@ -84,7 +84,7 @@ impl LogicalPrimitiveType for DateTimeType {
ValueRef::Null => Ok(None),
ValueRef::DateTime(v) => Ok(Some(v)),
other => error::CastTypeSnafu {
- msg: format!("Failed to cast value {:?} to DateTime", other,),
+ msg: format!("Failed to cast value {other:?} to DateTime"),
}
.fail(),
}
diff --git a/src/datatypes/src/types/null_type.rs b/src/datatypes/src/types/null_type.rs
index b9bb2dc7526d..21a008e1da2a 100644
--- a/src/datatypes/src/types/null_type.rs
+++ b/src/datatypes/src/types/null_type.rs
@@ -49,7 +49,7 @@ impl DataType for NullType {
}
fn create_mutable_vector(&self, _capacity: usize) -> Box<dyn MutableVector> {
- Box::new(NullVectorBuilder::default())
+ Box::<NullVectorBuilder>::default()
}
fn is_timestamp_compatible(&self) -> bool {
diff --git a/src/datatypes/src/value.rs b/src/datatypes/src/value.rs
index 7201ffbac48d..3adbd2c6f342 100644
--- a/src/datatypes/src/value.rs
+++ b/src/datatypes/src/value.rs
@@ -70,31 +70,31 @@ impl Display for Value {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
match self {
Value::Null => write!(f, "{}", self.data_type().name()),
- Value::Boolean(v) => write!(f, "{}", v),
- Value::UInt8(v) => write!(f, "{}", v),
- Value::UInt16(v) => write!(f, "{}", v),
- Value::UInt32(v) => write!(f, "{}", v),
- Value::UInt64(v) => write!(f, "{}", v),
- Value::Int8(v) => write!(f, "{}", v),
- Value::Int16(v) => write!(f, "{}", v),
- Value::Int32(v) => write!(f, "{}", v),
- Value::Int64(v) => write!(f, "{}", v),
- Value::Float32(v) => write!(f, "{}", v),
- Value::Float64(v) => write!(f, "{}", v),
+ Value::Boolean(v) => write!(f, "{v}"),
+ Value::UInt8(v) => write!(f, "{v}"),
+ Value::UInt16(v) => write!(f, "{v}"),
+ Value::UInt32(v) => write!(f, "{v}"),
+ Value::UInt64(v) => write!(f, "{v}"),
+ Value::Int8(v) => write!(f, "{v}"),
+ Value::Int16(v) => write!(f, "{v}"),
+ Value::Int32(v) => write!(f, "{v}"),
+ Value::Int64(v) => write!(f, "{v}"),
+ Value::Float32(v) => write!(f, "{v}"),
+ Value::Float64(v) => write!(f, "{v}"),
Value::String(v) => write!(f, "{}", v.as_utf8()),
Value::Binary(v) => {
let hex = v
.iter()
- .map(|b| format!("{:02x}", b))
+ .map(|b| format!("{b:02x}"))
.collect::<Vec<String>>()
.join("");
- write!(f, "{}", hex)
+ write!(f, "{hex}")
}
- Value::Date(v) => write!(f, "{}", v),
- Value::DateTime(v) => write!(f, "{}", v),
+ Value::Date(v) => write!(f, "{v}"),
+ Value::DateTime(v) => write!(f, "{v}"),
Value::Timestamp(v) => write!(f, "{}", v.to_iso8601_string()),
Value::List(v) => {
- let default = Box::new(vec![]);
+ let default = Box::<Vec<Value>>::default();
let items = v.items().as_ref().unwrap_or(&default);
let items = items
.iter()
@@ -146,7 +146,7 @@ impl Value {
Value::Null => Ok(None),
Value::List(v) => Ok(Some(v)),
other => error::CastTypeSnafu {
- msg: format!("Failed to cast {:?} to list value", other),
+ msg: format!("Failed to cast {other:?} to list value"),
}
.fail(),
}
@@ -214,8 +214,7 @@ impl Value {
output_type_id == value_type_id || self.is_null(),
error::ToScalarValueSnafu {
reason: format!(
- "expect value to return output_type {:?}, actual: {:?}",
- output_type_id, value_type_id,
+ "expect value to return output_type {output_type_id:?}, actual: {value_type_id:?}",
),
}
);
@@ -1345,7 +1344,7 @@ mod tests {
);
assert_eq!(
Value::List(ListValue::new(
- Some(Box::new(vec![])),
+ Some(Box::default()),
ConcreteDataType::timestamp_second_datatype(),
))
.to_string(),
@@ -1353,7 +1352,7 @@ mod tests {
);
assert_eq!(
Value::List(ListValue::new(
- Some(Box::new(vec![])),
+ Some(Box::default()),
ConcreteDataType::timestamp_millisecond_datatype(),
))
.to_string(),
@@ -1361,7 +1360,7 @@ mod tests {
);
assert_eq!(
Value::List(ListValue::new(
- Some(Box::new(vec![])),
+ Some(Box::default()),
ConcreteDataType::timestamp_microsecond_datatype(),
))
.to_string(),
@@ -1369,7 +1368,7 @@ mod tests {
);
assert_eq!(
Value::List(ListValue::new(
- Some(Box::new(vec![])),
+ Some(Box::default()),
ConcreteDataType::timestamp_nanosecond_datatype(),
))
.to_string(),
diff --git a/src/datatypes/src/vectors/binary.rs b/src/datatypes/src/vectors/binary.rs
index 3b5defc8ec6e..b2756294c375 100644
--- a/src/datatypes/src/vectors/binary.rs
+++ b/src/datatypes/src/vectors/binary.rs
@@ -252,7 +252,7 @@ mod tests {
#[test]
fn test_serialize_binary_vector_to_json() {
- let vector = BinaryVector::from(BinaryArray::from_iter_values(&[
+ let vector = BinaryVector::from(BinaryArray::from_iter_values([
vec![1, 2, 3],
vec![1, 2, 3],
]));
@@ -281,7 +281,7 @@ mod tests {
#[test]
fn test_from_arrow_array() {
- let arrow_array = BinaryArray::from_iter_values(&[vec![1, 2, 3], vec![1, 2, 3]]);
+ let arrow_array = BinaryArray::from_iter_values([vec![1, 2, 3], vec![1, 2, 3]]);
let original = BinaryArray::from(arrow_array.data().clone());
let vector = BinaryVector::from(arrow_array);
assert_eq!(original, vector.array);
diff --git a/src/datatypes/src/vectors/boolean.rs b/src/datatypes/src/vectors/boolean.rs
index 2b4e5b8e10d9..facbc2cfc624 100644
--- a/src/datatypes/src/vectors/boolean.rs
+++ b/src/datatypes/src/vectors/boolean.rs
@@ -296,7 +296,7 @@ mod tests {
let vec = BooleanVector::from(input.clone());
assert_eq!(4, vec.len());
for (i, v) in input.into_iter().enumerate() {
- assert_eq!(Some(v), vec.get_data(i), "failed at {}", i)
+ assert_eq!(Some(v), vec.get_data(i), "Failed at {i}")
}
}
@@ -306,7 +306,7 @@ mod tests {
let vec = input.iter().collect::<BooleanVector>();
assert_eq!(4, vec.len());
for (i, v) in input.into_iter().enumerate() {
- assert_eq!(v, vec.get_data(i), "failed at {}", i)
+ assert_eq!(v, vec.get_data(i), "Failed at {i}")
}
}
@@ -316,7 +316,7 @@ mod tests {
let vec = BooleanVector::from(input.clone());
assert_eq!(4, vec.len());
for (i, v) in input.into_iter().enumerate() {
- assert_eq!(v, vec.get_data(i), "failed at {}", i)
+ assert_eq!(v, vec.get_data(i), "failed at {i}")
}
}
diff --git a/src/datatypes/src/vectors/constant.rs b/src/datatypes/src/vectors/constant.rs
index 87739e91318b..825d1dc7bd82 100644
--- a/src/datatypes/src/vectors/constant.rs
+++ b/src/datatypes/src/vectors/constant.rs
@@ -203,7 +203,7 @@ mod tests {
let a = Int32Vector::from_slice(vec![1]);
let c = ConstantVector::new(Arc::new(a), 10);
- let s = format!("{:?}", c);
+ let s = format!("{c:?}");
assert_eq!(s, "ConstantVector([Int32(1); 10])");
}
diff --git a/src/datatypes/src/vectors/datetime.rs b/src/datatypes/src/vectors/datetime.rs
index a40a3e54d330..524ada886981 100644
--- a/src/datatypes/src/vectors/datetime.rs
+++ b/src/datatypes/src/vectors/datetime.rs
@@ -37,7 +37,7 @@ mod tests {
#[test]
fn test_datetime_vector() {
- let v = DateTimeVector::new(PrimitiveArray::from_slice(&[1, 2, 3]));
+ let v = DateTimeVector::new(PrimitiveArray::from_slice([1, 2, 3]));
assert_eq!(ConcreteDataType::datetime_datatype(), v.data_type());
assert_eq!(3, v.len());
assert_eq!("DateTimeVector", v.vector_type_name());
diff --git a/src/datatypes/src/vectors/helper.rs b/src/datatypes/src/vectors/helper.rs
index e3a2eaaa583c..cd04eae6438c 100644
--- a/src/datatypes/src/vectors/helper.rs
+++ b/src/datatypes/src/vectors/helper.rs
@@ -205,7 +205,7 @@ impl Helper {
| ScalarValue::Time64Microsecond(_)
| ScalarValue::Time64Nanosecond(_) => {
return error::ConversionSnafu {
- from: format!("Unsupported scalar value: {}", value),
+ from: format!("Unsupported scalar value: {value}"),
}
.fail()
}
diff --git a/src/datatypes/src/vectors/list.rs b/src/datatypes/src/vectors/list.rs
index 747e03557ba2..3e9b3637b8ad 100644
--- a/src/datatypes/src/vectors/list.rs
+++ b/src/datatypes/src/vectors/list.rs
@@ -157,10 +157,7 @@ impl From<ListArray> for ListVector {
fn from(array: ListArray) -> Self {
let item_type = ConcreteDataType::from_arrow_type(match array.data_type() {
ArrowDataType::List(field) => field.data_type(),
- other => panic!(
- "Try to create ListVector from an arrow array with type {:?}",
- other
- ),
+ other => panic!("Try to create ListVector from an arrow array with type {other:?}"),
});
Self { array, item_type }
}
diff --git a/src/datatypes/src/vectors/null.rs b/src/datatypes/src/vectors/null.rs
index bb66e09b392b..7f6d3fbeb1fd 100644
--- a/src/datatypes/src/vectors/null.rs
+++ b/src/datatypes/src/vectors/null.rs
@@ -167,7 +167,7 @@ impl MutableVector for NullVectorBuilder {
ensure!(
value.is_null(),
error::CastTypeSnafu {
- msg: format!("Failed to cast value ref {:?} to null", value),
+ msg: format!("Failed to cast value ref {value:?} to null"),
}
);
@@ -243,7 +243,7 @@ mod tests {
#[test]
fn test_debug_null_vector() {
let array = NullVector::new(1024 * 1024);
- assert_eq!(format!("{:?}", array), "NullVector(1048576)");
+ assert_eq!(format!("{array:?}"), "NullVector(1048576)");
}
#[test]
diff --git a/src/datatypes/src/vectors/operations/filter.rs b/src/datatypes/src/vectors/operations/filter.rs
index 8368a6afb4c4..d921a67bb20a 100644
--- a/src/datatypes/src/vectors/operations/filter.rs
+++ b/src/datatypes/src/vectors/operations/filter.rs
@@ -45,11 +45,11 @@ mod tests {
};
fn check_filter_primitive(expect: &[i32], input: &[i32], filter: &[bool]) {
- let v = Int32Vector::from_slice(&input);
+ let v = Int32Vector::from_slice(input);
let filter = BooleanVector::from_slice(filter);
let out = v.filter(&filter).unwrap();
- let expect: VectorRef = Arc::new(Int32Vector::from_slice(&expect));
+ let expect: VectorRef = Arc::new(Int32Vector::from_slice(expect));
assert_eq!(expect, out);
}
diff --git a/src/datatypes/src/vectors/primitive.rs b/src/datatypes/src/vectors/primitive.rs
index 7829c3173131..42bee1e33921 100644
--- a/src/datatypes/src/vectors/primitive.rs
+++ b/src/datatypes/src/vectors/primitive.rs
@@ -365,7 +365,7 @@ pub(crate) fn replicate_primitive<T: LogicalPrimitiveType>(
return vector.get_slice(0, 0);
}
- let mut builder = PrimitiveVectorBuilder::<T>::with_capacity(*offsets.last().unwrap() as usize);
+ let mut builder = PrimitiveVectorBuilder::<T>::with_capacity(*offsets.last().unwrap());
let mut previous_offset = 0;
diff --git a/src/frontend/src/catalog.rs b/src/frontend/src/catalog.rs
index 0c66980334ea..0cfd453f7e26 100644
--- a/src/frontend/src/catalog.rs
+++ b/src/frontend/src/catalog.rs
@@ -291,7 +291,7 @@ impl SchemaProvider for FrontendSchemaProvider {
}
Some(r) => r,
};
- let val = TableGlobalValue::from_bytes(&res.1).context(InvalidCatalogValueSnafu)?;
+ let val = TableGlobalValue::from_bytes(res.1).context(InvalidCatalogValueSnafu)?;
let table = Arc::new(DistTable::new(
table_name,
diff --git a/src/frontend/src/instance.rs b/src/frontend/src/instance.rs
index 19d49a611d92..b14bb136eb48 100644
--- a/src/frontend/src/instance.rs
+++ b/src/frontend/src/instance.rs
@@ -643,7 +643,7 @@ impl GrpcQueryHandler for Instance {
.await
.map_err(BoxedError::new)
.with_context(|_| server_error::ExecuteQuerySnafu {
- query: format!("{:?}", insert_expr),
+ query: format!("{insert_expr:?}"),
})?;
let object_result = match output {
Output::AffectedRows(rows) => ObjectResultBuilder::default()
diff --git a/src/frontend/src/instance/distributed.rs b/src/frontend/src/instance/distributed.rs
index 8205a8cd0ec7..31657c0f296c 100644
--- a/src/frontend/src/instance/distributed.rs
+++ b/src/frontend/src/instance/distributed.rs
@@ -241,12 +241,12 @@ impl DistInstance {
.schema(schema_name)
.context(CatalogSnafu)?
.context(SchemaNotFoundSnafu {
- schema_info: format!("{}.{}", catalog_name, schema_name),
+ schema_info: format!("{catalog_name}.{schema_name}"),
})?
.table(table_name)
.context(CatalogSnafu)?
.context(TableNotFoundSnafu {
- table_name: format!("{}.{}.{}", catalog_name, schema_name, table_name),
+ table_name: format!("{catalog_name}.{schema_name}.{table_name}"),
})?;
let dist_table = table
@@ -392,7 +392,7 @@ impl GrpcAdminHandler for DistInstance {
}
.map_err(BoxedError::new)
.context(server_error::ExecuteQuerySnafu {
- query: format!("{:?}", query),
+ query: format!("{query:?}"),
})
}
}
diff --git a/src/frontend/src/instance/opentsdb.rs b/src/frontend/src/instance/opentsdb.rs
index 89cb869fc5e9..b52f37aabb2e 100644
--- a/src/frontend/src/instance/opentsdb.rs
+++ b/src/frontend/src/instance/opentsdb.rs
@@ -51,7 +51,7 @@ impl Instance {
.await
.map_err(BoxedError::new)
.with_context(|_| server_error::ExecuteQuerySnafu {
- query: format!("{:?}", data_point),
+ query: format!("{data_point:?}"),
})?;
Ok(())
}
diff --git a/src/frontend/src/instance/prometheus.rs b/src/frontend/src/instance/prometheus.rs
index d1a848e158eb..fde5e0b43cd2 100644
--- a/src/frontend/src/instance/prometheus.rs
+++ b/src/frontend/src/instance/prometheus.rs
@@ -54,8 +54,7 @@ fn negotiate_response_type(accepted_response_types: &[i32]) -> ServerResult<Resp
.find(|t| is_supported(**t))
.with_context(|| error::NotSupportedSnafu {
feat: format!(
- "server does not support any of the requested response types: {:?}",
- accepted_response_types
+ "server does not support any of the requested response types: {accepted_response_types:?}",
),
})?;
@@ -131,7 +130,7 @@ impl PrometheusProtocolHandler for Instance {
.await
.map_err(BoxedError::new)
.with_context(|_| error::ExecuteInsertSnafu {
- msg: format!("{:?}", request),
+ msg: format!("{request:?}"),
})?;
}
Mode::Distributed => {
@@ -139,7 +138,7 @@ impl PrometheusProtocolHandler for Instance {
.await
.map_err(BoxedError::new)
.with_context(|_| error::ExecuteInsertSnafu {
- msg: format!("{:?}", request),
+ msg: format!("{request:?}"),
})?;
}
}
diff --git a/src/frontend/src/spliter.rs b/src/frontend/src/spliter.rs
index f70116b69e25..d7753d434c20 100644
--- a/src/frontend/src/spliter.rs
+++ b/src/frontend/src/spliter.rs
@@ -66,7 +66,7 @@ impl WriteSpliter {
{
Ok(region_id) => region_id,
Err(e) => {
- let reason = format!("{:?}", e);
+ let reason = format!("{e:?}");
return FindRegionSnafu { reason }.fail();
}
};
diff --git a/src/frontend/src/table.rs b/src/frontend/src/table.rs
index 9b26dc2d0e1e..b7291dc9b682 100644
--- a/src/frontend/src/table.rs
+++ b/src/frontend/src/table.rs
@@ -162,9 +162,9 @@ impl DistTable {
filters: &[Expr],
) -> Result<Vec<RegionNumber>> {
let regions = if let Some((first, rest)) = filters.split_first() {
- let mut target = self.find_regions0(partition_rule.clone(), first)?;
+ let mut target = Self::find_regions0(partition_rule.clone(), first)?;
for filter in rest {
- let regions = self.find_regions0(partition_rule.clone(), filter)?;
+ let regions = Self::find_regions0(partition_rule.clone(), filter)?;
// When all filters are provided as a collection, it often implicitly states that
// "all filters must be satisfied". So we join all the results here.
@@ -193,7 +193,6 @@ impl DistTable {
// - expr with arithmetic like "a + 1 < 10" (should have been optimized in logic plan?)
// - not comparison or neither "AND" nor "OR" operations, for example, "a LIKE x"
fn find_regions0(
- &self,
partition_rule: PartitionRuleRef<Error>,
filter: &Expr,
) -> Result<HashSet<RegionNumber>> {
@@ -222,9 +221,9 @@ impl DistTable {
if matches!(op, Operator::And | Operator::Or) =>
{
let left_regions =
- self.find_regions0(partition_rule.clone(), &(*left.clone()).into())?;
+ Self::find_regions0(partition_rule.clone(), &(*left.clone()).into())?;
let right_regions =
- self.find_regions0(partition_rule.clone(), &(*right.clone()).into())?;
+ Self::find_regions0(partition_rule.clone(), &(*right.clone()).into())?;
let regions = match op {
Operator::And => left_regions
.intersection(&right_regions)
diff --git a/src/frontend/src/table/route.rs b/src/frontend/src/table/route.rs
index eaeb3e14c010..8099d43a30da 100644
--- a/src/frontend/src/table/route.rs
+++ b/src/frontend/src/table/route.rs
@@ -44,7 +44,7 @@ impl TableRoutes {
.await
.map_err(|e| {
error::GetCacheSnafu {
- err_msg: format!("{:?}", e),
+ err_msg: format!("{e:?}"),
}
.build()
})
diff --git a/src/frontend/src/table/scan.rs b/src/frontend/src/table/scan.rs
index 3d9f623aeb37..e0f3dc78d56c 100644
--- a/src/frontend/src/table/scan.rs
+++ b/src/frontend/src/table/scan.rs
@@ -82,7 +82,7 @@ impl DatanodeInstance {
let table_provider = Arc::new(DfTableProviderAdapter::new(self.table.clone()));
let mut builder = LogicalPlanBuilder::scan_with_filters(
- &table_scan.table_name.to_string(),
+ table_scan.table_name.to_string(),
Arc::new(DefaultTableSource::new(table_provider)),
table_scan.projection.clone(),
table_scan
diff --git a/src/frontend/src/tests.rs b/src/frontend/src/tests.rs
index 37292458f369..d6a074373bcf 100644
--- a/src/frontend/src/tests.rs
+++ b/src/frontend/src/tests.rs
@@ -59,8 +59,8 @@ pub(crate) async fn create_frontend_instance(test_name: &str) -> (Arc<Instance>,
}
fn create_tmp_dir_and_datanode_opts(name: &str) -> (DatanodeOptions, TestGuard) {
- let wal_tmp_dir = TempDir::new(&format!("gt_wal_{}", name)).unwrap();
- let data_tmp_dir = TempDir::new(&format!("gt_data_{}", name)).unwrap();
+ let wal_tmp_dir = TempDir::new(&format!("gt_wal_{name}")).unwrap();
+ let data_tmp_dir = TempDir::new(&format!("gt_data_{name}")).unwrap();
let opts = DatanodeOptions {
wal_dir: wal_tmp_dir.path().to_str().unwrap().to_string(),
storage: ObjectStoreConfig::File {
@@ -138,8 +138,8 @@ async fn create_dist_datanode_instance(
meta_srv: MockInfo,
) -> Arc<DatanodeInstance> {
let current = common_time::util::current_time_millis();
- let wal_tmp_dir = TempDir::new_in("/tmp", &format!("dist_datanode-wal-{}", current)).unwrap();
- let data_tmp_dir = TempDir::new_in("/tmp", &format!("dist_datanode-data-{}", current)).unwrap();
+ let wal_tmp_dir = TempDir::new_in("/tmp", &format!("dist_datanode-wal-{current}")).unwrap();
+ let data_tmp_dir = TempDir::new_in("/tmp", &format!("dist_datanode-data-{current}")).unwrap();
let opts = DatanodeOptions {
node_id: Some(datanode_id),
wal_dir: wal_tmp_dir.path().to_str().unwrap().to_string(),
diff --git a/src/log-store/src/fs/chunk.rs b/src/log-store/src/fs/chunk.rs
index d123684cd9f2..a59b34e55762 100644
--- a/src/log-store/src/fs/chunk.rs
+++ b/src/log-store/src/fs/chunk.rs
@@ -145,7 +145,7 @@ impl Buffer for ChunkList {
}
left -= actual;
} else {
- panic!("Advance step [{}] exceeds max readable bytes", by);
+ panic!("Advance step [{by}] exceeds max readable bytes");
}
}
}
diff --git a/src/log-store/src/fs/file.rs b/src/log-store/src/fs/file.rs
index 163cbe22d6dc..132fbd337e79 100644
--- a/src/log-store/src/fs/file.rs
+++ b/src/log-store/src/fs/file.rs
@@ -653,7 +653,7 @@ fn read_at(file: &Arc<File>, offset: usize, file_length: usize) -> Result<Chunk>
if offset > file_length {
return Err(Eof);
}
- let size = CHUNK_SIZE.min((file_length - offset) as usize);
+ let size = CHUNK_SIZE.min(file_length - offset);
let mut data = Box::new([0u8; CHUNK_SIZE]);
crate::fs::io::pread_exact(file.as_ref(), &mut data[0..size], offset as u64)?;
Ok(Chunk::new(data, size))
@@ -684,7 +684,7 @@ mod tests {
let mut file = LogFile::open(path.clone(), &config)
.await
- .unwrap_or_else(|_| panic!("Failed to open file: {}", path));
+ .unwrap_or_else(|_| panic!("Failed to open file: {path}"));
file.start().await.expect("Failed to start log file");
assert_eq!(
@@ -873,7 +873,7 @@ mod tests {
let mut file = LogFile::open(path.clone(), &config)
.await
- .unwrap_or_else(|_| panic!("Failed to open file: {}", path));
+ .unwrap_or_else(|_| panic!("Failed to open file: {path}"));
let state = file.state.clone();
file.start().await.unwrap();
diff --git a/src/log-store/src/fs/file_name.rs b/src/log-store/src/fs/file_name.rs
index b4b04968f06d..555513803948 100644
--- a/src/log-store/src/fs/file_name.rs
+++ b/src/log-store/src/fs/file_name.rs
@@ -97,13 +97,13 @@ mod tests {
#[test]
pub fn test_padding_file_name() {
let id = u64::MIN;
- assert_eq!("00000000000000000000", format!("{:020}", id));
+ assert_eq!("00000000000000000000", format!("{id:020}"));
let id = 123u64;
- assert_eq!("00000000000000000123", format!("{:020}", id));
+ assert_eq!("00000000000000000123", format!("{id:020}"));
let id = 123123123123u64;
- assert_eq!("00000000123123123123", format!("{:020}", id));
+ assert_eq!("00000000123123123123", format!("{id:020}"));
let id = u64::MAX;
- assert_eq!(u64::MAX.to_string(), format!("{:020}", id));
+ assert_eq!(u64::MAX.to_string(), format!("{id:020}"));
}
#[test]
diff --git a/src/log-store/src/fs/io/unix.rs b/src/log-store/src/fs/io/unix.rs
index 09dc2bf9218d..f0936ada2cf2 100644
--- a/src/log-store/src/fs/io/unix.rs
+++ b/src/log-store/src/fs/io/unix.rs
@@ -20,9 +20,9 @@ use snafu::ResultExt;
use crate::error::{Error, IoSnafu};
pub fn pread_exact(file: &File, buf: &mut [u8], offset: u64) -> Result<(), Error> {
- file.read_exact_at(buf, offset as u64).context(IoSnafu)
+ file.read_exact_at(buf, offset).context(IoSnafu)
}
pub fn pwrite_all(file: &File, buf: &[u8], offset: u64) -> Result<(), Error> {
- file.write_all_at(buf, offset as u64).context(IoSnafu)
+ file.write_all_at(buf, offset).context(IoSnafu)
}
diff --git a/src/log-store/src/fs/log.rs b/src/log-store/src/fs/log.rs
index 38c8dc285f22..96576b330d62 100644
--- a/src/log-store/src/fs/log.rs
+++ b/src/log-store/src/fs/log.rs
@@ -93,8 +93,7 @@ impl LocalFileLogStore {
Arc::get_mut(active_file)
.with_context(|| InternalSnafu {
msg: format!(
- "Concurrent modification on log store {} start is not allowed",
- active_file_name
+ "Concurrent modification on log store {active_file_name} start is not allowed"
),
})?
.start()
@@ -143,9 +142,9 @@ impl LocalFileLogStore {
let file = LogFile::open(path, config).await?;
info!("Load log store file {}: {:?}", start_id, file);
if map.contains_key(&start_id) {
- error!("Log file with start entry id: {} already exists", start_id);
+ error!("Log file with start entry id: {start_id} already exists");
return DuplicateFileSnafu {
- msg: format!("File with start id: {} duplicates on start", start_id),
+ msg: format!("File with start id: {start_id} duplicates on start"),
}
.fail();
}
diff --git a/src/meta-client/src/client.rs b/src/meta-client/src/client.rs
index 1c88c832c1d4..f85fec8b4cf1 100644
--- a/src/meta-client/src/client.rs
+++ b/src/meta-client/src/client.rs
@@ -326,7 +326,7 @@ mod tests {
async fn gen_data(&self) {
for i in 0..10 {
let req = PutRequest::new()
- .with_key(self.key(&format!("key-{}", i)))
+ .with_key(self.key(&format!("key-{i}")))
.with_value(format!("{}-{}", "value", i).into_bytes())
.with_prev_kv();
let res = self.client.put(req).await;
@@ -547,7 +547,7 @@ mod tests {
let kvs = res.unwrap().take_kvs();
assert_eq!(10, kvs.len());
for (i, mut kv) in kvs.into_iter().enumerate() {
- assert_eq!(tc.key(&format!("key-{}", i)), kv.take_key());
+ assert_eq!(tc.key(&format!("key-{i}")), kv.take_key());
assert_eq!(format!("{}-{}", "value", i).into_bytes(), kv.take_value());
}
}
diff --git a/src/meta-srv/src/keys.rs b/src/meta-srv/src/keys.rs
index 6add27c86bcf..0f6bf7a22d6b 100644
--- a/src/meta-srv/src/keys.rs
+++ b/src/meta-srv/src/keys.rs
@@ -31,7 +31,7 @@ pub(crate) const TABLE_ROUTE_PREFIX: &str = "__meta_table_route";
lazy_static! {
static ref DATANODE_KEY_PATTERN: Regex =
- Regex::new(&format!("^{}-([0-9]+)-([0-9]+)$", DN_LEASE_PREFIX)).unwrap();
+ Regex::new(&format!("^{DN_LEASE_PREFIX}-([0-9]+)-([0-9]+)$")).unwrap();
}
#[derive(Debug, Clone, Eq, PartialEq)]
pub struct LeaseKey {
@@ -52,10 +52,10 @@ impl FromStr for LeaseKey {
let cluster_id = caps[1].to_string();
let node_id = caps[2].to_string();
let cluster_id: u64 = cluster_id.parse().context(error::ParseNumSnafu {
- err_msg: format!("invalid cluster_id: {}", cluster_id),
+ err_msg: format!("invalid cluster_id: {cluster_id}"),
})?;
let node_id: u64 = node_id.parse().context(error::ParseNumSnafu {
- err_msg: format!("invalid node_id: {}", node_id),
+ err_msg: format!("invalid node_id: {node_id}"),
})?;
Ok(Self {
@@ -118,7 +118,7 @@ impl TryFrom<LeaseValue> for Vec<u8> {
fn try_from(dn_value: LeaseValue) -> Result<Self> {
Ok(serde_json::to_string(&dn_value)
.context(error::SerializeToJsonSnafu {
- input: format!("{:?}", dn_value),
+ input: format!("{dn_value:?}"),
})?
.into_bytes())
}
diff --git a/src/meta-srv/src/lease.rs b/src/meta-srv/src/lease.rs
index 41a9b776781f..570749a765fc 100644
--- a/src/meta-srv/src/lease.rs
+++ b/src/meta-srv/src/lease.rs
@@ -53,5 +53,5 @@ where
#[inline]
pub fn get_lease_prefix(cluster_id: u64) -> Vec<u8> {
- format!("{}-{}", DN_LEASE_PREFIX, cluster_id).into_bytes()
+ format!("{DN_LEASE_PREFIX}-{cluster_id}").into_bytes()
}
diff --git a/src/meta-srv/src/service/heartbeat.rs b/src/meta-srv/src/service/heartbeat.rs
index e3882478f4e8..331b6b3b1059 100644
--- a/src/meta-srv/src/service/heartbeat.rs
+++ b/src/meta-srv/src/service/heartbeat.rs
@@ -100,7 +100,7 @@ impl heartbeat_server::Heartbeat for MetaSrv {
pusher_key.as_ref().unwrap_or(&"unknow".to_string())
);
if let Some(key) = pusher_key {
- let _ = handler_group.unregister(&key);
+ let _ = handler_group.unregister(&key).await;
}
});
diff --git a/src/meta-srv/src/service/router.rs b/src/meta-srv/src/service/router.rs
index 1162b34dac88..18cf40df43fb 100644
--- a/src/meta-srv/src/service/router.rs
+++ b/src/meta-srv/src/service/router.rs
@@ -164,7 +164,7 @@ async fn handle_delete(req: DeleteRequest, ctx: Context) -> Result<RouteResponse
let tgv = get_table_global_value(&ctx.kv_store, &tgk)
.await?
.with_context(|| error::TableNotFoundSnafu {
- name: format!("{}", tgk),
+ name: format!("{tgk}"),
})?;
let trk = TableRouteKey::with_table_global_key(tgv.table_id() as u64, &tgk);
let (_, trv) = remove_table_route_value(&ctx.kv_store, &trk).await?;
@@ -272,11 +272,11 @@ async fn get_table_global_value(
kv_store: &KvStoreRef,
key: &TableGlobalKey,
) -> Result<Option<TableGlobalValue>> {
- let tg_key = format!("{}", key).into_bytes();
+ let tg_key = format!("{key}").into_bytes();
let tv = get_from_store(kv_store, tg_key).await?;
match tv {
Some(tv) => {
- let tv = TableGlobalValue::from_bytes(&tv).context(error::InvalidCatalogValueSnafu)?;
+ let tv = TableGlobalValue::from_bytes(tv).context(error::InvalidCatalogValueSnafu)?;
Ok(Some(tv))
}
None => Ok(None),
diff --git a/src/mito/src/engine.rs b/src/mito/src/engine.rs
index 2c2f0926177e..cab3d527dc86 100644
--- a/src/mito/src/engine.rs
+++ b/src/mito/src/engine.rs
@@ -49,7 +49,7 @@ const INIT_TABLE_VERSION: TableVersion = 0;
/// Generate region name in the form of "{TABLE_ID}_{REGION_NUMBER}"
#[inline]
fn region_name(table_id: TableId, n: u32) -> String {
- format!("{}_{:010}", table_id, n)
+ format!("{table_id}_{n:010}")
}
#[inline]
@@ -59,7 +59,7 @@ fn region_id(table_id: TableId, n: u32) -> RegionId {
#[inline]
fn table_dir(schema_name: &str, table_id: TableId) -> String {
- format!("{}/{}/", schema_name, table_id)
+ format!("{schema_name}/{table_id}/")
}
/// [TableEngine] implementation.
@@ -109,15 +109,15 @@ impl<S: StorageEngine> TableEngine for MitoEngine<S> {
Ok(self.inner.alter_table(ctx, req).await?)
}
- fn get_table<'a>(
+ fn get_table(
&self,
_ctx: &EngineContext,
- table_ref: &'a TableReference,
+ table_ref: &TableReference,
) -> TableResult<Option<TableRef>> {
Ok(self.inner.get_table(table_ref))
}
- fn table_exists<'a>(&self, _ctx: &EngineContext, table_ref: &'a TableReference) -> bool {
+ fn table_exists(&self, _ctx: &EngineContext, table_ref: &TableReference) -> bool {
self.inner.get_table(table_ref).is_some()
}
@@ -292,7 +292,7 @@ impl<S: StorageEngine> MitoEngineInner<S> {
return Ok(table);
} else {
return TableExistsSnafu {
- table_name: format!("{}.{}.{}", catalog_name, schema_name, table_name),
+ table_name: format!("{catalog_name}.{schema_name}.{table_name}"),
}
.fail();
}
@@ -459,7 +459,7 @@ impl<S: StorageEngine> MitoEngineInner<S> {
Ok(table)
}
- fn get_table<'a>(&self, table_ref: &'a TableReference) -> Option<TableRef> {
+ fn get_table(&self, table_ref: &TableReference) -> Option<TableRef> {
self.tables
.read()
.unwrap()
@@ -876,7 +876,7 @@ mod tests {
let result = table_engine.create_table(&ctx, request).await;
assert!(result.is_err());
- assert!(matches!(result, Err(e) if format!("{:?}", e).contains("Table already exists")));
+ assert!(matches!(result, Err(e) if format!("{e:?}").contains("Table already exists")));
}
#[tokio::test]
diff --git a/src/mito/src/table.rs b/src/mito/src/table.rs
index 463e7c866ef1..74488d3f409e 100644
--- a/src/mito/src/table.rs
+++ b/src/mito/src/table.rs
@@ -56,7 +56,7 @@ use crate::manifest::TableManifest;
#[inline]
fn table_manifest_dir(table_dir: &str) -> String {
- format!("{}/manifest/", table_dir)
+ format!("{table_dir}/manifest/")
}
/// [Table] implementation.
@@ -284,7 +284,7 @@ impl Stream for ChunkStream {
#[inline]
fn column_qualified_name(table_name: &str, region_name: &str, column_name: &str) -> String {
- format!("{}.{}.{}", table_name, region_name, column_name)
+ format!("{table_name}.{region_name}.{column_name}")
}
impl<R: Region> MitoTable<R> {
diff --git a/src/query/src/datafusion.rs b/src/query/src/datafusion.rs
index 79c3bc7938b1..21c4cc767e35 100644
--- a/src/query/src/datafusion.rs
+++ b/src/query/src/datafusion.rs
@@ -281,7 +281,7 @@ mod tests {
// TODO(sunng87): do not rely on to_string for compare
assert_eq!(
- format!("{:?}", plan),
+ format!("{plan:?}"),
r#"DfPlan(Limit: skip=0, fetch=20
Projection: SUM(numbers.number)
Aggregate: groupBy=[[]], aggr=[[SUM(numbers.number)]]
diff --git a/src/query/src/optimizer.rs b/src/query/src/optimizer.rs
index 2e6658876933..513d96e5ba26 100644
--- a/src/query/src/optimizer.rs
+++ b/src/query/src/optimizer.rs
@@ -34,11 +34,7 @@ use datatypes::arrow::datatypes::DataType;
pub struct TypeConversionRule;
impl OptimizerRule for TypeConversionRule {
- fn optimize(
- &self,
- plan: &LogicalPlan,
- optimizer_config: &mut OptimizerConfig,
- ) -> Result<LogicalPlan> {
+ fn optimize(&self, plan: &LogicalPlan, _config: &mut OptimizerConfig) -> Result<LogicalPlan> {
let mut converter = TypeConverter {
schemas: plan.all_schemas(),
};
@@ -46,7 +42,7 @@ impl OptimizerRule for TypeConversionRule {
match plan {
LogicalPlan::Filter(filter) => Ok(LogicalPlan::Filter(Filter::try_new(
filter.predicate().clone().rewrite(&mut converter)?,
- Arc::new(self.optimize(filter.input(), optimizer_config)?),
+ Arc::new(self.optimize(filter.input(), _config)?),
)?)),
LogicalPlan::TableScan(TableScan {
table_name,
@@ -92,7 +88,7 @@ impl OptimizerRule for TypeConversionRule {
let inputs = plan.inputs();
let new_inputs = inputs
.iter()
- .map(|plan| self.optimize(plan, optimizer_config))
+ .map(|plan| self.optimize(plan, _config))
.collect::<Result<Vec<_>>>()?;
let expr = plan
@@ -175,8 +171,7 @@ impl<'a> TypeConverter<'a> {
let casted_right = Self::cast_scalar_value(value, left_type)?;
if casted_right.is_null() {
return Err(DataFusionError::Plan(format!(
- "column:{:?} value:{:?} is invalid",
- col, value
+ "column:{col:?} value:{value:?} is invalid",
)));
}
if reverse {
diff --git a/src/query/tests/argmax_test.rs b/src/query/tests/argmax_test.rs
index cbf1ae931dc9..88dbde83e416 100644
--- a/src/query/tests/argmax_test.rs
+++ b/src/query/tests/argmax_test.rs
@@ -82,10 +82,7 @@ async fn execute_argmax<'a>(
table_name: &'a str,
engine: Arc<dyn QueryEngine>,
) -> RecordResult<Vec<RecordBatch>> {
- let sql = format!(
- "select ARGMAX({}) as argmax from {}",
- column_name, table_name
- );
+ let sql = format!("select ARGMAX({column_name}) as argmax from {table_name}");
let plan = engine
.sql_to_plan(&sql, Arc::new(QueryContext::new()))
.unwrap();
diff --git a/src/query/tests/argmin_test.rs b/src/query/tests/argmin_test.rs
index 546fa9ae23f3..2655a8db17e6 100644
--- a/src/query/tests/argmin_test.rs
+++ b/src/query/tests/argmin_test.rs
@@ -82,10 +82,7 @@ async fn execute_argmin<'a>(
table_name: &'a str,
engine: Arc<dyn QueryEngine>,
) -> RecordResult<Vec<RecordBatch>> {
- let sql = format!(
- "select argmin({}) as argmin from {}",
- column_name, table_name
- );
+ let sql = format!("select argmin({column_name}) as argmin from {table_name}");
let plan = engine
.sql_to_plan(&sql, Arc::new(QueryContext::new()))
.unwrap();
diff --git a/src/query/tests/function.rs b/src/query/tests/function.rs
index 7de93a6265ec..3a0bad335168 100644
--- a/src/query/tests/function.rs
+++ b/src/query/tests/function.rs
@@ -81,7 +81,7 @@ pub async fn get_numbers_from_table<'s, T>(
where
T: WrapperType,
{
- let sql = format!("SELECT {} FROM {}", column_name, table_name);
+ let sql = format!("SELECT {column_name} FROM {table_name}");
let plan = engine
.sql_to_plan(&sql, Arc::new(QueryContext::new()))
.unwrap();
diff --git a/src/query/tests/mean_test.rs b/src/query/tests/mean_test.rs
index 000323fb2192..56bf327339d1 100644
--- a/src/query/tests/mean_test.rs
+++ b/src/query/tests/mean_test.rs
@@ -78,7 +78,7 @@ async fn execute_mean<'a>(
table_name: &'a str,
engine: Arc<dyn QueryEngine>,
) -> RecordResult<Vec<RecordBatch>> {
- let sql = format!("select MEAN({}) as mean from {}", column_name, table_name);
+ let sql = format!("select MEAN({column_name}) as mean from {table_name}");
let plan = engine
.sql_to_plan(&sql, Arc::new(QueryContext::new()))
.unwrap();
diff --git a/src/query/tests/my_sum_udaf_example.rs b/src/query/tests/my_sum_udaf_example.rs
index 54d3a62a5b98..06adca86c1fc 100644
--- a/src/query/tests/my_sum_udaf_example.rs
+++ b/src/query/tests/my_sum_udaf_example.rs
@@ -217,10 +217,7 @@ where
Arc::new(|| Arc::new(MySumAccumulatorCreator::default())),
)));
- let sql = format!(
- "select MY_SUM({}) as my_sum from {}",
- column_name, table_name
- );
+ let sql = format!("select MY_SUM({column_name}) as my_sum from {table_name}");
let plan = engine.sql_to_plan(&sql, Arc::new(QueryContext::new()))?;
let output = engine.execute(&plan).await?;
diff --git a/src/query/tests/percentile_test.rs b/src/query/tests/percentile_test.rs
index e639d4b3e63f..724d80c663df 100644
--- a/src/query/tests/percentile_test.rs
+++ b/src/query/tests/percentile_test.rs
@@ -85,7 +85,7 @@ where
let expected_value = numbers.iter().map(|&n| n.as_()).collect::<Vec<f64>>();
let expected_value: inc_stats::Percentiles<f64> = expected_value.iter().cloned().collect();
- let expected_value = expected_value.percentile(&0.5).unwrap();
+ let expected_value = expected_value.percentile(0.5).unwrap();
assert_eq!(value, expected_value.into());
Ok(())
}
@@ -95,10 +95,7 @@ async fn execute_percentile<'a>(
table_name: &'a str,
engine: Arc<dyn QueryEngine>,
) -> RecordResult<Vec<RecordBatch>> {
- let sql = format!(
- "select PERCENTILE({},50.0) as percentile from {}",
- column_name, table_name
- );
+ let sql = format!("select PERCENTILE({column_name},50.0) as percentile from {table_name}");
let plan = engine
.sql_to_plan(&sql, Arc::new(QueryContext::new()))
.unwrap();
diff --git a/src/query/tests/polyval_test.rs b/src/query/tests/polyval_test.rs
index 248c0d42d74e..d174d20ec0c3 100644
--- a/src/query/tests/polyval_test.rs
+++ b/src/query/tests/polyval_test.rs
@@ -78,10 +78,7 @@ async fn execute_polyval<'a>(
table_name: &'a str,
engine: Arc<dyn QueryEngine>,
) -> RecordResult<Vec<RecordBatch>> {
- let sql = format!(
- "select POLYVAL({}, 0) as polyval from {}",
- column_name, table_name
- );
+ let sql = format!("select POLYVAL({column_name}, 0) as polyval from {table_name}");
let plan = engine
.sql_to_plan(&sql, Arc::new(QueryContext::new()))
.unwrap();
diff --git a/src/query/tests/scipy_stats_norm_cdf_test.rs b/src/query/tests/scipy_stats_norm_cdf_test.rs
index dee8f5c87ee3..0c8f50251880 100644
--- a/src/query/tests/scipy_stats_norm_cdf_test.rs
+++ b/src/query/tests/scipy_stats_norm_cdf_test.rs
@@ -76,8 +76,7 @@ async fn execute_scipy_stats_norm_cdf<'a>(
engine: Arc<dyn QueryEngine>,
) -> RecordResult<Vec<RecordBatch>> {
let sql = format!(
- "select SCIPYSTATSNORMCDF({},2.0) as scipy_stats_norm_cdf from {}",
- column_name, table_name
+ "select SCIPYSTATSNORMCDF({column_name},2.0) as scipy_stats_norm_cdf from {table_name}",
);
let plan = engine
.sql_to_plan(&sql, Arc::new(QueryContext::new()))
diff --git a/src/query/tests/scipy_stats_norm_pdf.rs b/src/query/tests/scipy_stats_norm_pdf.rs
index 03e4cf129220..1142db436483 100644
--- a/src/query/tests/scipy_stats_norm_pdf.rs
+++ b/src/query/tests/scipy_stats_norm_pdf.rs
@@ -76,8 +76,7 @@ async fn execute_scipy_stats_norm_pdf<'a>(
engine: Arc<dyn QueryEngine>,
) -> RecordResult<Vec<RecordBatch>> {
let sql = format!(
- "select SCIPYSTATSNORMPDF({},2.0) as scipy_stats_norm_pdf from {}",
- column_name, table_name
+ "select SCIPYSTATSNORMPDF({column_name},2.0) as scipy_stats_norm_pdf from {table_name}"
);
let plan = engine
.sql_to_plan(&sql, Arc::new(QueryContext::new()))
diff --git a/src/script/src/lib.rs b/src/script/src/lib.rs
index 43db30b5d47e..adb949918c20 100644
--- a/src/script/src/lib.rs
+++ b/src/script/src/lib.rs
@@ -12,7 +12,6 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-#![feature(iterator_try_reduce)]
pub mod engine;
pub mod error;
#[cfg(feature = "python")]
diff --git a/src/script/src/python/builtins/mod.rs b/src/script/src/python/builtins/mod.rs
index 4cd52cc609f6..f23cd63ad07b 100644
--- a/src/script/src/python/builtins/mod.rs
+++ b/src/script/src/python/builtins/mod.rs
@@ -145,7 +145,7 @@ fn try_into_py_obj(col: DFColValue, vm: &VirtualMachine) -> PyResult<PyObjectRef
DFColValue::Array(arr) => {
let ret = PyVector::from(
HelperVec::try_into_vector(arr)
- .map_err(|err| vm.new_type_error(format!("Unsupported type: {:#?}", err)))?,
+ .map_err(|err| vm.new_type_error(format!("Unsupported type: {err:#?}")))?,
)
.into_pyobject(vm);
Ok(ret)
@@ -319,13 +319,11 @@ pub(crate) mod greptime_builtin {
let func: Option<FunctionRef> = FUNCTION_REGISTRY.get_function(name);
let res = match func {
Some(f) => f.eval(Default::default(), &v),
- None => return Err(vm.new_type_error(format!("Can't find function {}", name))),
+ None => return Err(vm.new_type_error(format!("Can't find function {name}"))),
};
match res {
Ok(v) => Ok(v.into()),
- Err(err) => {
- Err(vm.new_runtime_error(format!("Fail to evaluate the function,: {}", err)))
- }
+ Err(err) => Err(vm.new_runtime_error(format!("Fail to evaluate the function,: {err}"))),
}
}
@@ -338,26 +336,24 @@ pub(crate) mod greptime_builtin {
let func = FUNCTION_REGISTRY.get_aggr_function(name);
let f = match func {
Some(f) => f.create().creator(),
- None => return Err(vm.new_type_error(format!("Can't find function {}", name))),
+ None => return Err(vm.new_type_error(format!("Can't find function {name}"))),
};
let types: Vec<_> = v.iter().map(|v| v.data_type()).collect();
let acc = f(&types);
let mut acc = match acc {
Ok(acc) => acc,
Err(err) => {
- return Err(vm.new_runtime_error(format!("Failed to create accumulator: {}", err)))
+ return Err(vm.new_runtime_error(format!("Failed to create accumulator: {err}")))
}
};
match acc.update_batch(&v) {
Ok(_) => (),
- Err(err) => {
- return Err(vm.new_runtime_error(format!("Failed to update batch: {}", err)))
- }
+ Err(err) => return Err(vm.new_runtime_error(format!("Failed to update batch: {err}"))),
};
let res = match acc.evaluate() {
Ok(r) => r,
Err(err) => {
- return Err(vm.new_runtime_error(format!("Failed to evaluate accumulator: {}", err)))
+ return Err(vm.new_runtime_error(format!("Failed to evaluate accumulator: {err}")))
}
};
let res = val_to_pyobj(res, vm);
@@ -792,7 +788,7 @@ pub(crate) mod greptime_builtin {
ConstantVector::new(Arc::new(Int64Vector::from_vec(vec![pow])) as _, len_base);
Arc::new(ret) as _
} else {
- return Err(vm.new_type_error(format!("Unsupported type({:#?}) for pow()", pow)));
+ return Err(vm.new_type_error(format!("Unsupported type({pow:#?}) for pow()")));
};
// pyfunction can return PyResult<...>, args can be like PyObjectRef or anything
// impl IntoPyNativeFunc, see rustpython-vm function for more details
@@ -837,8 +833,7 @@ pub(crate) mod greptime_builtin {
let ret = cur.slice(0, 0);
let ret = Helper::try_into_vector(ret.clone()).map_err(|e| {
vm.new_type_error(format!(
- "Can't cast result into vector, result: {:?}, err: {:?}",
- ret, e
+ "Can't cast result into vector, result: {ret:?}, err: {e:?}",
))
})?;
return Ok(ret.into());
@@ -850,8 +845,7 @@ pub(crate) mod greptime_builtin {
})?;
let ret = Helper::try_into_vector(ret.clone()).map_err(|e| {
vm.new_type_error(format!(
- "Can't cast result into vector, result: {:?}, err: {:?}",
- ret, e
+ "Can't cast result into vector, result: {ret:?}, err: {e:?}",
))
})?;
Ok(ret.into())
@@ -864,8 +858,7 @@ pub(crate) mod greptime_builtin {
let ret = cur.slice(0, 0);
let ret = Helper::try_into_vector(ret.clone()).map_err(|e| {
vm.new_type_error(format!(
- "Can't cast result into vector, result: {:?}, err: {:?}",
- ret, e
+ "Can't cast result into vector, result: {ret:?}, err: {e:?}",
))
})?;
return Ok(ret.into());
@@ -877,8 +870,7 @@ pub(crate) mod greptime_builtin {
})?;
let ret = Helper::try_into_vector(ret.clone()).map_err(|e| {
vm.new_type_error(format!(
- "Can't cast result into vector, result: {:?}, err: {:?}",
- ret, e
+ "Can't cast result into vector, result: {ret:?}, err: {e:?}",
))
})?;
Ok(ret.into())
@@ -929,7 +921,7 @@ pub(crate) mod greptime_builtin {
.as_any()
.downcast_ref::<Int64Array>()
.ok_or_else(|| {
- vm.new_type_error(format!("ts must be int64, found: {:?}", ts_array_ref))
+ vm.new_type_error(format!("ts must be int64, found: {ts_array_ref:?}"))
})?;
let slices = {
let oldest = aggregate::min(ts)
@@ -975,7 +967,7 @@ pub(crate) mod greptime_builtin {
},
Err(err) => Err(vm
.new_runtime_error(
- format!("expect `interval()`'s `func` return a PyVector(`vector`) or int/float/bool, found return to be {:?}, error msg: {err}", obj)
+ format!("expect `interval()`'s `func` return a PyVector(`vector`) or int/float/bool, found return to be {obj:?}, error msg: {err}")
)
)
}
@@ -1019,8 +1011,7 @@ pub(crate) mod greptime_builtin {
};
let ret = Helper::try_into_vector(ret.clone()).map_err(|e| {
vm.new_type_error(format!(
- "Can't cast result into vector, result: {:?}, err: {:?}",
- ret, e
+ "Can't cast result into vector, result: {ret:?}, err: {e:?}",
))
})?;
Ok(ret.into())
@@ -1036,8 +1027,7 @@ pub(crate) mod greptime_builtin {
};
let ret = Helper::try_into_vector(ret.clone()).map_err(|e| {
vm.new_type_error(format!(
- "Can't cast result into vector, result: {:?}, err: {:?}",
- ret, e
+ "Can't cast result into vector, result: {ret:?}, err: {e:?}",
))
})?;
Ok(ret.into())
diff --git a/src/script/src/python/builtins/test.rs b/src/script/src/python/builtins/test.rs
index 16828ba8836f..d9ab067811ac 100644
--- a/src/script/src/python/builtins/test.rs
+++ b/src/script/src/python/builtins/test.rs
@@ -88,7 +88,7 @@ fn convert_scalar_to_py_obj_and_back() {
let col = try_into_columnar_value(list_obj, vm);
if let Err(err) = col {
let reason = format_py_error(err, vm);
- assert!(format!("{}", reason).contains(
+ assert!(format!("{reason}").contains(
"TypeError: All elements in a list should be same type to cast to Datafusion list!"
));
}
@@ -353,7 +353,7 @@ fn run_builtin_fn_testcases() {
},
Err(err) => {
if !err_res.contains(&err){
- panic!("Error message not containing, expect {err_res}, found {}", err)
+ panic!("Error message not containing, expect {err_res}, found {err}")
}
}
}
diff --git a/src/script/src/python/coprocessor.rs b/src/script/src/python/coprocessor.rs
index 3dcc34856216..b9a77ef34043 100644
--- a/src/script/src/python/coprocessor.rs
+++ b/src/script/src/python/coprocessor.rs
@@ -185,9 +185,9 @@ fn try_into_columns(
col_len: usize,
) -> Result<Vec<VectorRef>> {
if is_instance::<PyTuple>(obj, vm) {
- let tuple = obj.payload::<PyTuple>().with_context(|| {
- ret_other_error_with(format!("can't cast obj {:?} to PyTuple)", obj))
- })?;
+ let tuple = obj
+ .payload::<PyTuple>()
+ .with_context(|| ret_other_error_with(format!("can't cast obj {obj:?} to PyTuple)")))?;
let cols = tuple
.iter()
.map(|obj| py_vec_obj_to_array(obj, vm, col_len))
@@ -206,7 +206,7 @@ fn select_from_rb(rb: &RecordBatch, fetch_names: &[String]) -> Result<Vec<PyVect
.iter()
.map(|name| {
let vector = rb.column_by_name(name).with_context(|| OtherSnafu {
- reason: format!("Can't find field name {}", name),
+ reason: format!("Can't find field name {name}"),
})?;
Ok(PyVector::from(vector.clone()))
})
@@ -227,7 +227,7 @@ fn check_args_anno_real_type(
ensure!(
anno_ty
.to_owned()
- .map(|v| v.datatype == None // like a vector[_]
+ .map(|v| v.datatype.is_none() // like a vector[_]
|| v.datatype == Some(real_ty.to_owned()) && v.is_nullable == is_nullable)
.unwrap_or(true),
OtherSnafu {
diff --git a/src/script/src/python/coprocessor/compile.rs b/src/script/src/python/coprocessor/compile.rs
index 8b8a10d228ee..ddb3f1a9146c 100644
--- a/src/script/src/python/coprocessor/compile.rs
+++ b/src/script/src/python/coprocessor/compile.rs
@@ -117,10 +117,7 @@ pub fn compile_script(name: &str, deco_args: &DecoratorArgs, script: &str) -> Re
// It's safe to unwrap loc, it is always exists.
stmts.push(gen_call(name, deco_args, &loc.unwrap()));
} else {
- return fail_parse_error!(
- format!("Expect statement in script, found: {:?}", top),
- None,
- );
+ return fail_parse_error!(format!("Expect statement in script, found: {top:?}"), None);
}
// use `compile::Mode::BlockExpr` so it return the result of statement
compile_top(
diff --git a/src/script/src/python/coprocessor/parse.rs b/src/script/src/python/coprocessor/parse.rs
index 324b5f7fc837..b8280a8db11f 100644
--- a/src/script/src/python/coprocessor/parse.rs
+++ b/src/script/src/python/coprocessor/parse.rs
@@ -99,7 +99,7 @@ fn try_into_datatype(ty: &str, loc: &Location) -> Result<Option<DataType>> {
"_" => Ok(None),
// note the different between "_" and _
_ => fail_parse_error!(
- format!("Unknown datatype: {ty} at {:?}", loc),
+ format!("Unknown datatype: {ty} at {loc:?}"),
Some(loc.to_owned())
),
}
@@ -209,10 +209,7 @@ fn check_annotation_ret_slice(sub: &ast::Expr<()>) -> Result<&ast::Expr<()>> {
ensure!(
id == "vector",
ret_parse_error(
- format!(
- "Wrong type annotation, expect `vector[...]`, found `{}`",
- id
- ),
+ format!("Wrong type annotation, expect `vector[...]`, found `{id}`"),
Some(value.location)
)
);
diff --git a/src/script/src/python/error.rs b/src/script/src/python/error.rs
index 6e20e86db004..9564e373f726 100644
--- a/src/script/src/python/error.rs
+++ b/src/script/src/python/error.rs
@@ -221,7 +221,7 @@ pub fn get_error_reason_loc(err: &Error) -> (String, Option<Location>) {
Error::PyRuntime { msg, .. } => (msg.clone(), None),
Error::PyParse { source, .. } => (source.error.to_string(), Some(source.location)),
Error::PyCompile { source, .. } => (source.error.to_string(), Some(source.location)),
- _ => (format!("Unknown error: {:?}", err), None),
+ _ => (format!("Unknown error: {err:?}"), None),
}
}
diff --git a/src/script/src/python/test.rs b/src/script/src/python/test.rs
index 49b511c10137..901e8391ba6a 100644
--- a/src/script/src/python/test.rs
+++ b/src/script/src/python/test.rs
@@ -153,7 +153,7 @@ fn run_ron_testcases() {
} => {
let rb = create_sample_recordbatch();
let res = coprocessor::exec_coprocessor(&testcase.code, &rb);
- assert!(res.is_err(), "{:#?}\nExpect Err(...), actual Ok(...)", res);
+ assert!(res.is_err(), "{res:#?}\nExpect Err(...), actual Ok(...)");
if let Err(res) = res {
error!(
"{}",
diff --git a/src/script/src/python/utils.rs b/src/script/src/python/utils.rs
index 8f078c163cbf..56c73ccb3ef3 100644
--- a/src/script/src/python/utils.rs
+++ b/src/script/src/python/utils.rs
@@ -40,7 +40,7 @@ pub fn format_py_error(excep: PyBaseExceptionRef, vm: &VirtualMachine) -> error:
let mut msg = String::new();
if let Err(e) = vm.write_exception(&mut msg, &excep) {
return error::Error::PyRuntime {
- msg: format!("Failed to write exception msg, err: {}", e),
+ msg: format!("Failed to write exception msg, err: {e}"),
backtrace: Backtrace::generate(),
};
}
@@ -59,9 +59,9 @@ pub fn py_vec_obj_to_array(
) -> Result<VectorRef, error::Error> {
// It's ugly, but we can't find a better way right now.
if is_instance::<PyVector>(obj, vm) {
- let pyv = obj.payload::<PyVector>().with_context(|| {
- ret_other_error_with(format!("can't cast obj {:?} to PyVector", obj))
- })?;
+ let pyv = obj
+ .payload::<PyVector>()
+ .with_context(|| ret_other_error_with(format!("can't cast obj {obj:?} to PyVector")))?;
Ok(pyv.as_vector_ref())
} else if is_instance::<PyInt>(obj, vm) {
let val = obj
@@ -110,6 +110,6 @@ pub fn py_vec_obj_to_array(
_ => unreachable!(),
}
} else {
- ret_other_error_with(format!("Expect a vector or a constant, found {:?}", obj)).fail()
+ ret_other_error_with(format!("Expect a vector or a constant, found {obj:?}")).fail()
}
}
diff --git a/src/script/src/python/vector.rs b/src/script/src/python/vector.rs
index 47fae45ed13f..7ec61fbd4c22 100644
--- a/src/script/src/python/vector.rs
+++ b/src/script/src/python/vector.rs
@@ -59,28 +59,26 @@ fn emit_cast_error(
dst_ty: &ArrowDataType,
) -> PyBaseExceptionRef {
vm.new_type_error(format!(
- "Can't cast source operand of type {:?} into target type of {:?}",
- src_ty, dst_ty
+ "Can't cast source operand of type {src_ty:?} into target type of {dst_ty:?}",
))
}
/// Performs `val - arr`.
fn arrow_rsub(arr: &dyn Array, val: &dyn Array, vm: &VirtualMachine) -> PyResult<ArrayRef> {
- arithmetic::subtract_dyn(val, arr).map_err(|e| vm.new_type_error(format!("rsub error: {}", e)))
+ arithmetic::subtract_dyn(val, arr).map_err(|e| vm.new_type_error(format!("rsub error: {e}")))
}
/// Performs `val / arr`
fn arrow_rtruediv(arr: &dyn Array, val: &dyn Array, vm: &VirtualMachine) -> PyResult<ArrayRef> {
- arithmetic::divide_dyn(val, arr)
- .map_err(|e| vm.new_type_error(format!("rtruediv error: {}", e)))
+ arithmetic::divide_dyn(val, arr).map_err(|e| vm.new_type_error(format!("rtruediv error: {e}")))
}
/// Performs `val / arr`, but cast to i64.
fn arrow_rfloordiv(arr: &dyn Array, val: &dyn Array, vm: &VirtualMachine) -> PyResult<ArrayRef> {
let array = arithmetic::divide_dyn(val, arr)
- .map_err(|e| vm.new_type_error(format!("rtruediv divide error: {}", e)))?;
+ .map_err(|e| vm.new_type_error(format!("rtruediv divide error: {e}")))?;
compute::cast(&array, &ArrowDataType::Int64)
- .map_err(|e| vm.new_type_error(format!("rtruediv cast error: {}", e)))
+ .map_err(|e| vm.new_type_error(format!("rtruediv cast error: {e}")))
}
fn wrap_result<F>(f: F) -> impl Fn(&dyn Array, &dyn Array, &VirtualMachine) -> PyResult<ArrayRef>
@@ -88,7 +86,7 @@ where
F: Fn(&dyn Array, &dyn Array) -> ArrowResult<ArrayRef>,
{
move |left, right, vm| {
- f(left, right).map_err(|e| vm.new_type_error(format!("arithmetic error {}", e)))
+ f(left, right).map_err(|e| vm.new_type_error(format!("arithmetic error {e}")))
}
}
@@ -154,8 +152,7 @@ impl PyVector {
v
} else {
return Err(vm.new_type_error(format!(
- "Can't cast pyobject {:?} into concrete type {:?}",
- obj, datatype
+ "Can't cast pyobject {obj:?} into concrete type {datatype:?}",
)));
};
// Safety: `pyobj_try_to_typed_val()` has checked the data type.
@@ -262,8 +259,7 @@ impl PyVector {
Ok(Helper::try_into_vector(result.clone())
.map_err(|e| {
vm.new_type_error(format!(
- "Can't cast result into vector, result: {:?}, err: {:?}",
- result, e
+ "Can't cast result into vector, result: {result:?}, err: {e:?}",
))
})?
.into())
@@ -305,13 +301,12 @@ impl PyVector {
let right = cast(right, &target_type, vm)?;
let result = op(left.as_ref(), right.as_ref())
- .map_err(|e| vm.new_type_error(format!("Can't compute op, error: {}", e)))?;
+ .map_err(|e| vm.new_type_error(format!("Can't compute op, error: {e}")))?;
Ok(Helper::try_into_vector(result.clone())
.map_err(|e| {
vm.new_type_error(format!(
- "Can't cast result into vector, result: {:?}, err: {:?}",
- result, e
+ "Can't cast result into vector, result: {result:?}, err: {e:?}",
))
})?
.into())
@@ -549,8 +544,7 @@ impl PyVector {
res.map_err(|err| vm.new_runtime_error(format!("Arrow Error: {err:#?}")))?;
let ret = Helper::try_into_vector(res.clone()).map_err(|e| {
vm.new_type_error(format!(
- "Can't cast result into vector, result: {:?}, err: {:?}",
- res, e
+ "Can't cast result into vector, result: {res:?}, err: {e:?}",
))
})?;
Ok(ret.into())
@@ -580,7 +574,7 @@ impl PyVector {
let res = compute::filter(self.to_arrow_array().as_ref(), mask)
.map_err(|err| vm.new_runtime_error(format!("Arrow Error: {err:#?}")))?;
let ret = Helper::try_into_vector(res.clone()).map_err(|e| {
- vm.new_type_error(format!("Can't cast result into vector, err: {:?}", e))
+ vm.new_type_error(format!("Can't cast result into vector, err: {e:?}"))
})?;
Ok(Self::from(ret).into_pyobject(vm))
} else {
@@ -683,7 +677,7 @@ fn get_arrow_scalar_op(
move |a: &dyn Array, b: &dyn Array, vm| -> PyResult<ArrayRef> {
let array =
- op_bool_arr(a, b).map_err(|e| vm.new_type_error(format!("scalar op error: {}", e)))?;
+ op_bool_arr(a, b).map_err(|e| vm.new_type_error(format!("scalar op error: {e}")))?;
Ok(Arc::new(array))
}
}
@@ -932,7 +926,7 @@ fn get_concrete_type(obj: &PyObjectRef, vm: &VirtualMachine) -> PyResult<Concret
} else if is_instance::<PyStr>(obj, vm) {
Ok(ConcreteDataType::string_datatype())
} else {
- Err(vm.new_type_error(format!("Unsupported pyobject type: {:?}", obj)))
+ Err(vm.new_type_error(format!("Unsupported pyobject type: {obj:?}")))
}
}
@@ -1205,7 +1199,7 @@ pub mod tests {
}
}
} else {
- panic!("{code}: {:?}", result)
+ panic!("{code}: {result:?}")
}
}
}
diff --git a/src/servers/src/auth/user_provider.rs b/src/servers/src/auth/user_provider.rs
index f58dbb485302..d2888cc5e3bc 100644
--- a/src/servers/src/auth/user_provider.rs
+++ b/src/servers/src/auth/user_provider.rs
@@ -243,7 +243,7 @@ admin=654321",
assert!(lw.flush().is_ok());
}
- let param = format!("file:{}", file_path);
+ let param = format!("file:{file_path}");
let provider = StaticUserProvider::try_from(param.as_str()).unwrap();
test_auth(&provider, "root", "123456").await;
test_auth(&provider, "admin", "654321").await;
diff --git a/src/servers/src/grpc/handler.rs b/src/servers/src/grpc/handler.rs
index 8ee4ce88aeb3..a087f76aafb3 100644
--- a/src/servers/src/grpc/handler.rs
+++ b/src/servers/src/grpc/handler.rs
@@ -79,7 +79,7 @@ impl BatchHandler {
// 1. prevent the execution from being cancelled unexpected by tonic runtime.
// 2. avoid the handler blocks the gRPC runtime because `exec_admin_request` may block
// the caller thread.
- let _ = self.runtime.spawn(async move {
+ self.runtime.spawn(async move {
let result = future.await;
// Ignore send result. Usually an error indicates the rx is dropped (request timeouted).
diff --git a/src/servers/src/http.rs b/src/servers/src/http.rs
index 263b8b119304..ccb28b0e1188 100644
--- a/src/servers/src/http.rs
+++ b/src/servers/src/http.rs
@@ -255,7 +255,7 @@ impl JsonResponse {
Err(e) => {
return Self::with_error(
- format!("Recordbatch error: {}", e),
+ format!("Recordbatch error: {e}"),
e.status_code(),
);
}
@@ -271,7 +271,7 @@ impl JsonResponse {
},
Err(e) => {
return Self::with_error(
- format!("Query engine output error: {}", e),
+ format!("Query engine output error: {e}"),
e.status_code(),
);
}
@@ -378,7 +378,7 @@ impl HttpServer {
..Info::default()
},
servers: vec![OpenAPIServer {
- url: format!("/{}", HTTP_API_VERSION),
+ url: format!("/{HTTP_API_VERSION}"),
..OpenAPIServer::default()
}],
..OpenApi::default()
@@ -392,25 +392,25 @@ impl HttpServer {
.finish_api(&mut api)
.layer(Extension(Arc::new(api)));
- let mut router = Router::new().nest(&format!("/{}", HTTP_API_VERSION), sql_router);
+ let mut router = Router::new().nest(&format!("/{HTTP_API_VERSION}"), sql_router);
if let Some(opentsdb_handler) = self.opentsdb_handler.clone() {
router = router.nest(
- &format!("/{}/opentsdb", HTTP_API_VERSION),
+ &format!("/{HTTP_API_VERSION}/opentsdb"),
self.route_opentsdb(opentsdb_handler),
);
}
if let Some(influxdb_handler) = self.influxdb_handler.clone() {
router = router.nest(
- &format!("/{}/influxdb", HTTP_API_VERSION),
+ &format!("/{HTTP_API_VERSION}/influxdb"),
self.route_influxdb(influxdb_handler),
);
}
if let Some(prom_handler) = self.prom_handler.clone() {
router = router.nest(
- &format!("/{}/prometheus", HTTP_API_VERSION),
+ &format!("/{HTTP_API_VERSION}/prometheus"),
self.route_prom(prom_handler),
);
}
@@ -513,7 +513,7 @@ impl Server for HttpServer {
/// handle error middleware
async fn handle_error(err: BoxError) -> Json<JsonResponse> {
Json(JsonResponse::with_error(
- format!("Unhandled internal error: {}", err),
+ format!("Unhandled internal error: {err}"),
StatusCode::Unexpected,
))
}
diff --git a/src/servers/src/http/handler.rs b/src/servers/src/http/handler.rs
index 419b0f891bca..361a00dab2e2 100644
--- a/src/servers/src/http/handler.rs
+++ b/src/servers/src/http/handler.rs
@@ -52,13 +52,13 @@ pub async fn sql(
Ok(true) => query_ctx.set_current_schema(db),
Ok(false) => {
return Json(JsonResponse::with_error(
- format!("Database not found: {}", db),
+ format!("Database not found: {db}"),
StatusCode::DatabaseNotFound,
));
}
Err(e) => {
return Json(JsonResponse::with_error(
- format!("Error checking database: {}, {}", db, e),
+ format!("Error checking database: {db}, {e}"),
StatusCode::Internal,
));
}
diff --git a/src/servers/src/http/opentsdb.rs b/src/servers/src/http/opentsdb.rs
index d47e26b44108..62a20609909d 100644
--- a/src/servers/src/http/opentsdb.rs
+++ b/src/servers/src/http/opentsdb.rs
@@ -214,7 +214,7 @@ mod test {
assert_eq!(data_points.len(), 1);
assert_eq!(data_points[0], data_point1);
- let body = Body::from(format!("[{},{}]", raw_data_point1, raw_data_point2));
+ let body = Body::from(format!("[{raw_data_point1},{raw_data_point2}]"));
let data_points = parse_data_points(body).await.unwrap();
assert_eq!(data_points.len(), 2);
assert_eq!(data_points[0], data_point1);
diff --git a/src/servers/src/http/script.rs b/src/servers/src/http/script.rs
index 04d0571b8d48..3683f8aeb0ad 100644
--- a/src/servers/src/http/script.rs
+++ b/src/servers/src/http/script.rs
@@ -62,7 +62,7 @@ pub async fn scripts(
let body = match script_handler.insert_script(name.unwrap(), &script).await {
Ok(()) => JsonResponse::with_output(None),
- Err(e) => json_err!(format!("Insert script error: {}", e), e.status_code()),
+ Err(e) => json_err!(format!("Insert script error: {e}"), e.status_code()),
};
Json(body)
diff --git a/src/servers/src/influxdb.rs b/src/servers/src/influxdb.rs
index 870f6918b8a9..93da4c2c07d6 100644
--- a/src/servers/src/influxdb.rs
+++ b/src/servers/src/influxdb.rs
@@ -81,10 +81,7 @@ impl TryFrom<&InfluxdbRequest> for Vec<InsertRequest> {
writer.commit();
}
- Ok(writers
- .into_iter()
- .map(|(_, writer)| writer.finish())
- .collect())
+ Ok(writers.into_values().map(|x| x.finish()).collect())
}
}
diff --git a/src/servers/src/mysql/federated.rs b/src/servers/src/mysql/federated.rs
index 1736ae67feca..d9e5d635af36 100644
--- a/src/servers/src/mysql/federated.rs
+++ b/src/servers/src/mysql/federated.rs
@@ -55,7 +55,7 @@ static SHOW_SQL_MODE_PATTERN: Lazy<Regex> =
Lazy::new(|| Regex::new("(?i)^(SHOW VARIABLES LIKE 'sql_mode'(.*))").unwrap());
static OTHER_NOT_SUPPORTED_STMT: Lazy<RegexSet> = Lazy::new(|| {
- RegexSet::new(&[
+ RegexSet::new([
// Txn.
"(?i)^(ROLLBACK(.*))",
"(?i)^(COMMIT(.*))",
diff --git a/src/servers/src/opentsdb.rs b/src/servers/src/opentsdb.rs
index dbe0fb601a12..a50c87907ecf 100644
--- a/src/servers/src/opentsdb.rs
+++ b/src/servers/src/opentsdb.rs
@@ -84,7 +84,7 @@ impl OpentsdbServer {
let connection = Connection::new(stream);
let mut handler = Handler::new(query_handler, connection, shutdown);
- let _ = io_runtime.spawn(async move {
+ io_runtime.spawn(async move {
if let Err(e) = handler.run().await {
error!(e; "Unexpected error when handling OpenTSDB connection");
}
diff --git a/src/servers/src/opentsdb/codec.rs b/src/servers/src/opentsdb/codec.rs
index 49fccc48489a..6dc61f6784cc 100644
--- a/src/servers/src/opentsdb/codec.rs
+++ b/src/servers/src/opentsdb/codec.rs
@@ -48,7 +48,7 @@ impl DataPoint {
// OpenTSDB command is case sensitive, verified in real OpenTSDB.
if cmd != "put" {
return error::InvalidQuerySnafu {
- reason: format!("unknown command {}.", cmd),
+ reason: format!("unknown command {cmd}."),
}
.fail();
}
@@ -89,7 +89,7 @@ impl DataPoint {
let tag = token.split('=').collect::<Vec<&str>>();
if tag.len() != 2 || tag[0].is_empty() || tag[1].is_empty() {
return error::InvalidQuerySnafu {
- reason: format!("put: invalid tag: {}", token),
+ reason: format!("put: invalid tag: {token}"),
}
.fail();
}
@@ -97,7 +97,7 @@ impl DataPoint {
let tagv = tag[1].to_string();
if tags.iter().any(|(t, _)| t == &tagk) {
return error::InvalidQuerySnafu {
- reason: format!("put: illegal argument: duplicate tag: {}", tagk),
+ reason: format!("put: illegal argument: duplicate tag: {tagk}"),
}
.fail();
}
diff --git a/src/servers/src/postgres/auth_handler.rs b/src/servers/src/postgres/auth_handler.rs
index 22ea5798baf8..4fd7aad9e263 100644
--- a/src/servers/src/postgres/auth_handler.rs
+++ b/src/servers/src/postgres/auth_handler.rs
@@ -162,7 +162,7 @@ impl StartupHandler for PgAuthStartupHandler {
client,
"FATAL",
"3D000",
- format!("Database not found: {}", db),
+ format!("Database not found: {db}"),
)
.await?;
return Ok(());
diff --git a/src/servers/src/postgres/handler.rs b/src/servers/src/postgres/handler.rs
index 6fa4da6a167a..e7f68f1fc046 100644
--- a/src/servers/src/postgres/handler.rs
+++ b/src/servers/src/postgres/handler.rs
@@ -193,7 +193,7 @@ fn type_translate(origin: &ConcreteDataType) -> Result<Type> {
&ConcreteDataType::DateTime(_) => Ok(Type::TIMESTAMP),
&ConcreteDataType::Timestamp(_) => Ok(Type::TIMESTAMP),
&ConcreteDataType::List(_) => error::InternalSnafu {
- err_msg: format!("not implemented for column datatype {:?}", origin),
+ err_msg: format!("not implemented for column datatype {origin:?}"),
}
.fail(),
}
@@ -336,7 +336,7 @@ mod test {
let err = encode_value(
&Value::List(ListValue::new(
- Some(Box::new(vec![])),
+ Some(Box::default()),
ConcreteDataType::int8_datatype(),
)),
&mut builder,
@@ -344,7 +344,7 @@ mod test {
.unwrap_err();
match err {
PgWireError::ApiError(e) => {
- assert!(format!("{}", e).contains("Internal error:"));
+ assert!(format!("{e}").contains("Internal error:"));
}
_ => {
unreachable!()
diff --git a/src/servers/src/prometheus.rs b/src/servers/src/prometheus.rs
index 80d9db0b743a..b252b1cfa489 100644
--- a/src/servers/src/prometheus.rs
+++ b/src/servers/src/prometheus.rs
@@ -63,8 +63,7 @@ pub fn query_to_sql(db: &str, q: &Query) -> Result<(String, String)> {
let mut conditions: Vec<String> = Vec::with_capacity(label_matches.len());
conditions.push(format!(
- "{}>={} AND {}<={}",
- TIMESTAMP_COLUMN_NAME, start_timestamp_ms, TIMESTAMP_COLUMN_NAME, end_timestamp_ms,
+ "{TIMESTAMP_COLUMN_NAME}>={start_timestamp_ms} AND {TIMESTAMP_COLUMN_NAME}<={end_timestamp_ms}",
));
for m in label_matches {
@@ -82,18 +81,18 @@ pub fn query_to_sql(db: &str, q: &Query) -> Result<(String, String)> {
match m_type {
MatcherType::Eq => {
- conditions.push(format!("{}='{}'", name, value));
+ conditions.push(format!("{name}='{value}'"));
}
MatcherType::Neq => {
- conditions.push(format!("{}!='{}'", name, value));
+ conditions.push(format!("{name}!='{value}'"));
}
// Case sensitive regexp match
MatcherType::Re => {
- conditions.push(format!("{}~'{}'", name, value));
+ conditions.push(format!("{name}~'{value}'"));
}
// Case sensitive regexp not match
MatcherType::Nre => {
- conditions.push(format!("{}!~'{}'", name, value));
+ conditions.push(format!("{name}!~'{value}'"));
}
}
}
@@ -103,8 +102,7 @@ pub fn query_to_sql(db: &str, q: &Query) -> Result<(String, String)> {
Ok((
table_name.to_string(),
format!(
- "select * from {}.{} where {} order by {}",
- db, table_name, conditions, TIMESTAMP_COLUMN_NAME,
+ "select * from {db}.{table_name} where {conditions} order by {TIMESTAMP_COLUMN_NAME}",
),
))
}
diff --git a/src/servers/src/server.rs b/src/servers/src/server.rs
index 70e020fcd0d7..bce62845f923 100644
--- a/src/servers/src/server.rs
+++ b/src/servers/src/server.rs
@@ -19,7 +19,7 @@ use async_trait::async_trait;
use common_runtime::Runtime;
use common_telemetry::logging::{error, info};
use futures::future::{AbortHandle, AbortRegistration, Abortable};
-use snafu::ResultExt;
+use snafu::{ensure, ResultExt};
use tokio::sync::Mutex;
use tokio::task::JoinHandle;
use tokio_stream::wrappers::TcpListenerStream;
@@ -64,12 +64,12 @@ impl AcceptTask {
name, error
);
} else {
- info!("{} server is shutdown.", name);
+ info!("{name} server is shutdown.");
}
Ok(())
}
None => error::InternalSnafu {
- err_msg: format!("{} server is not started.", name),
+ err_msg: format!("{name} server is not started."),
}
.fail()?,
}
@@ -86,32 +86,31 @@ impl AcceptTask {
tokio::net::TcpListener::bind(addr)
.await
.context(error::TokioIoSnafu {
- err_msg: format!("{} failed to bind addr {}", name, addr),
+ err_msg: format!("{name} failed to bind addr {addr}"),
})?;
// get actually bond addr in case input addr use port 0
let addr = listener.local_addr()?;
- info!("{} server started at {}", name, addr);
+ info!("{name} server started at {addr}");
let stream = TcpListenerStream::new(listener);
let stream = Abortable::new(stream, registration);
Ok((stream, addr))
}
None => error::InternalSnafu {
- err_msg: format!("{} server has been started.", name),
+ err_msg: format!("{name} server has been started."),
}
.fail()?,
}
}
fn start_with(&mut self, join_handle: JoinHandle<()>, name: &str) -> Result<()> {
- if self.join_handle.is_some() {
- return error::InternalSnafu {
- err_msg: format!("{} server has been started.", name),
+ ensure!(
+ self.join_handle.is_none(),
+ error::InternalSnafu {
+ err_msg: format!("{name} server has been started."),
}
- .fail();
- }
- let _ = self.join_handle.insert(join_handle);
-
+ );
+ self.join_handle.get_or_insert(join_handle);
Ok(())
}
}
diff --git a/src/servers/tests/http/http_handler_test.rs b/src/servers/tests/http/http_handler_test.rs
index 17a3e8235e34..b15d3844cd6d 100644
--- a/src/servers/tests/http/http_handler_test.rs
+++ b/src/servers/tests/http/http_handler_test.rs
@@ -60,7 +60,7 @@ async fn test_sql_output_rows() {
axum::Extension(UserInfo::default()),
)
.await;
- assert!(json.success(), "{:?}", json);
+ assert!(json.success(), "{json:?}");
assert!(json.error().is_none());
match &json.output().expect("assertion failed")[0] {
JsonOutput::Records(records) => {
@@ -103,7 +103,7 @@ def test(n):
body,
)
.await;
- assert!(!json.success(), "{:?}", json);
+ assert!(!json.success(), "{json:?}");
assert_eq!(json.error().unwrap(), "Invalid argument: invalid name");
let body = RawBody(Body::from(script));
@@ -117,7 +117,7 @@ def test(n):
body,
)
.await;
- assert!(json.success(), "{:?}", json);
+ assert!(json.success(), "{json:?}");
assert!(json.error().is_none());
assert!(json.output().is_none());
}
diff --git a/src/servers/tests/http/opentsdb_test.rs b/src/servers/tests/http/opentsdb_test.rs
index ffb76d8f8921..227093e56328 100644
--- a/src/servers/tests/http/opentsdb_test.rs
+++ b/src/servers/tests/http/opentsdb_test.rs
@@ -202,13 +202,12 @@ async fn test_opentsdb_debug_put() {
fn create_data_point(metric: &str) -> String {
format!(
r#"{{
- "metric": "{}",
+ "metric": "{metric}",
"timestamp": 1000,
"value": 1,
"tags": {{
"host": "web01"
}}
}}"#,
- metric
)
}
diff --git a/src/servers/tests/mysql/mysql_server_test.rs b/src/servers/tests/mysql/mysql_server_test.rs
index 2d1aac91a98b..eca4f05b2d6e 100644
--- a/src/servers/tests/mysql/mysql_server_test.rs
+++ b/src/servers/tests/mysql/mysql_server_test.rs
@@ -266,8 +266,7 @@ async fn test_query_concurrently() -> Result<()> {
let expected: u32 = rand.gen_range(0..100);
let result: u32 = connection
.query_first(format!(
- "SELECT uint32s FROM numbers WHERE uint32s = {}",
- expected
+ "SELECT uint32s FROM numbers WHERE uint32s = {expected}"
))
.await
.unwrap()
diff --git a/src/servers/tests/opentsdb.rs b/src/servers/tests/opentsdb.rs
index 12c08ec89ab7..e9611703cd5c 100644
--- a/src/servers/tests/opentsdb.rs
+++ b/src/servers/tests/opentsdb.rs
@@ -105,7 +105,7 @@ async fn test_shutdown_opentsdb_server_concurrently() -> Result<()> {
match stream {
Ok(stream) => {
let mut connection = Connection::new(stream);
- let result = connection.write_line(format!("put {} 1 1", i)).await;
+ let result = connection.write_line(format!("put {i} 1 1")).await;
i += 1;
if i > 4 {
@@ -116,7 +116,7 @@ async fn test_shutdown_opentsdb_server_concurrently() -> Result<()> {
if let Err(e) = result {
match e {
Error::InternalIo { .. } => return,
- _ => panic!("Not IO error, err is {}", e),
+ _ => panic!("Not IO error, err is {e}"),
}
}
@@ -170,13 +170,13 @@ async fn test_opentsdb_connection_shutdown() -> Result<()> {
let mut i = 2;
loop {
// The connection may not be unwritable after shutdown immediately.
- let result = connection.write_line(format!("put {} 1 1", i)).await;
+ let result = connection.write_line(format!("put {i} 1 1")).await;
i += 1;
if result.is_err() {
if let Err(e) = result {
match e {
Error::InternalIo { .. } => break,
- _ => panic!("Not IO error, err is {}", e),
+ _ => panic!("Not IO error, err is {e}"),
}
}
}
@@ -250,10 +250,7 @@ async fn test_query_concurrently() -> Result<()> {
let stream = TcpStream::connect(addr).await.unwrap();
let mut connection = Connection::new(stream);
for i in 0..expect_executed_queries_per_worker {
- connection
- .write_line(format!("put {} 1 1", i))
- .await
- .unwrap();
+ connection.write_line(format!("put {i} 1 1")).await.unwrap();
let should_recreate_conn = rand.gen_range(0..100) == 1;
if should_recreate_conn {
diff --git a/src/servers/tests/postgres/mod.rs b/src/servers/tests/postgres/mod.rs
index 556fb52875ef..a6b4ab977a78 100644
--- a/src/servers/tests/postgres/mod.rs
+++ b/src/servers/tests/postgres/mod.rs
@@ -162,8 +162,7 @@ async fn test_query_pg_concurrently() -> Result<()> {
let result: u32 = unwrap_results(
client
.simple_query(&format!(
- "SELECT uint32s FROM numbers WHERE uint32s = {}",
- expected
+ "SELECT uint32s FROM numbers WHERE uint32s = {expected}"
))
.await
.unwrap()
@@ -284,14 +283,10 @@ async fn create_secure_connection(
) -> std::result::Result<Client, PgError> {
let url = if with_pwd {
format!(
- "sslmode=require host=127.0.0.1 port={} user=test_user password=test_pwd connect_timeout=2, dbname={}",
- port, DEFAULT_SCHEMA_NAME
+ "sslmode=require host=127.0.0.1 port={port} user=test_user password=test_pwd connect_timeout=2, dbname={DEFAULT_SCHEMA_NAME}",
)
} else {
- format!(
- "host=127.0.0.1 port={} connect_timeout=2 dbname={}",
- port, DEFAULT_SCHEMA_NAME
- )
+ format!("host=127.0.0.1 port={port} connect_timeout=2 dbname={DEFAULT_SCHEMA_NAME}")
};
let mut config = rustls::ClientConfig::builder()
@@ -315,14 +310,10 @@ async fn create_plain_connection(
) -> std::result::Result<Client, PgError> {
let url = if with_pwd {
format!(
- "host=127.0.0.1 port={} user=test_user password=test_pwd connect_timeout=2 dbname={}",
- port, DEFAULT_SCHEMA_NAME
+ "host=127.0.0.1 port={port} user=test_user password=test_pwd connect_timeout=2 dbname={DEFAULT_SCHEMA_NAME}",
)
} else {
- format!(
- "host=127.0.0.1 port={} connect_timeout=2 dbname={}",
- port, DEFAULT_SCHEMA_NAME
- )
+ format!("host=127.0.0.1 port={port} connect_timeout=2 dbname={DEFAULT_SCHEMA_NAME}")
};
let (client, conn) = tokio_postgres::connect(&url, NoTls).await?;
tokio::spawn(conn);
@@ -333,17 +324,14 @@ async fn create_connection_with_given_db(
port: u16,
db: &str,
) -> std::result::Result<Client, PgError> {
- let url = format!(
- "host=127.0.0.1 port={} connect_timeout=2 dbname={}",
- port, db
- );
+ let url = format!("host=127.0.0.1 port={port} connect_timeout=2 dbname={db}");
let (client, conn) = tokio_postgres::connect(&url, NoTls).await?;
tokio::spawn(conn);
Ok(client)
}
async fn create_connection_without_db(port: u16) -> std::result::Result<Client, PgError> {
- let url = format!("host=127.0.0.1 port={} connect_timeout=2", port);
+ let url = format!("host=127.0.0.1 port={port} connect_timeout=2");
let (client, conn) = tokio_postgres::connect(&url, NoTls).await?;
tokio::spawn(conn);
Ok(client)
@@ -351,7 +339,7 @@ async fn create_connection_without_db(port: u16) -> std::result::Result<Client,
fn resolve_result(resp: &SimpleQueryMessage, col_index: usize) -> Option<&str> {
match resp {
- &SimpleQueryMessage::Row(ref r) => r.get(col_index),
+ SimpleQueryMessage::Row(r) => r.get(col_index),
_ => None,
}
}
diff --git a/src/sql/src/parser.rs b/src/sql/src/parser.rs
index 3a14fb066619..b2c4513a652b 100644
--- a/src/sql/src/parser.rs
+++ b/src/sql/src/parser.rs
@@ -322,8 +322,7 @@ impl<'a> ParserContext<'a> {
// Report unexpected token
pub(crate) fn expected<T>(&self, expected: &str, found: Token) -> Result<T> {
Err(ParserError::ParserError(format!(
- "Expected {}, found: {}",
- expected, found
+ "Expected {expected}, found: {found}",
)))
.context(SyntaxSnafu { sql: self.sql })
}
diff --git a/src/sql/src/parsers/insert_parser.rs b/src/sql/src/parsers/insert_parser.rs
index 43709407ad44..0f8c0b0aa7d6 100644
--- a/src/sql/src/parsers/insert_parser.rs
+++ b/src/sql/src/parsers/insert_parser.rs
@@ -65,6 +65,6 @@ mod tests {
pub fn test_parse_invalid_insert() {
let sql = r"INSERT INTO table_1 VALUES ("; // intentionally a bad sql
let result = ParserContext::create_with_dialect(sql, &GenericDialect {});
- assert!(result.is_err(), "result is: {:?}", result);
+ assert!(result.is_err(), "result is: {result:?}");
}
}
diff --git a/src/sql/src/statements.rs b/src/sql/src/statements.rs
index 24b152781962..afcd0eb1bc76 100644
--- a/src/sql/src/statements.rs
+++ b/src/sql/src/statements.rs
@@ -65,8 +65,7 @@ pub fn table_idents_to_full_name(obj_name: &ObjectName) -> Result<(String, Strin
)),
_ => error::InvalidSqlSnafu {
msg: format!(
- "expect table name to be <catalog>.<schema>.<table>, <schema>.<table> or <table>, actual: {}",
- obj_name
+ "expect table name to be <catalog>.<schema>.<table>, <schema>.<table> or <table>, actual: {obj_name}",
),
}
.fail(),
@@ -94,7 +93,7 @@ fn parse_string_to_value(
Ok(Value::Date(date))
} else {
ParseSqlValueSnafu {
- msg: format!("Failed to parse {} to Date value", s),
+ msg: format!("Failed to parse {s} to Date value"),
}
.fail()
}
@@ -104,7 +103,7 @@ fn parse_string_to_value(
Ok(Value::DateTime(datetime))
} else {
ParseSqlValueSnafu {
- msg: format!("Failed to parse {} to DateTime value", s),
+ msg: format!("Failed to parse {s} to DateTime value"),
}
.fail()
}
@@ -117,7 +116,7 @@ fn parse_string_to_value(
)))
} else {
ParseSqlValueSnafu {
- msg: format!("Failed to parse {} to Timestamp value", s),
+ msg: format!("Failed to parse {s} to Timestamp value"),
}
.fail()
}
@@ -172,7 +171,7 @@ where
match n.parse::<R>() {
Ok(n) => Ok(n),
Err(e) => ParseSqlValueSnafu {
- msg: format!("Fail to parse number {}, {:?}", n, e),
+ msg: format!("Fail to parse number {n}, {e:?}"),
}
.fail(),
}
@@ -220,7 +219,7 @@ fn parse_column_default_constraint(
}
ColumnOption::Default(Expr::Function(func)) => {
// Always use lowercase for function expression
- ColumnDefaultConstraint::Function(format!("{}", func).to_lowercase())
+ ColumnDefaultConstraint::Function(format!("{func}").to_lowercase())
}
ColumnOption::Default(expr) => {
return UnsupportedDefaultValueSnafu {
@@ -391,7 +390,7 @@ mod tests {
assert_eq!(Value::Int32(999), v);
let v = sql_number_to_value(&ConcreteDataType::string_datatype(), "999");
- assert!(v.is_err(), "parse value error is: {:?}", v);
+ assert!(v.is_err(), "parse value error is: {v:?}");
}
#[test]
@@ -417,18 +416,17 @@ mod tests {
let sql_val = SqlValue::Number("3.0".to_string(), false);
let v = sql_value_to_value("a", &ConcreteDataType::boolean_datatype(), &sql_val);
assert!(v.is_err());
- assert!(format!("{:?}", v)
+ assert!(format!("{v:?}")
.contains("Fail to parse number 3.0, invalid column type: Boolean(BooleanType)"));
let sql_val = SqlValue::Boolean(true);
let v = sql_value_to_value("a", &ConcreteDataType::float64_datatype(), &sql_val);
assert!(v.is_err());
assert!(
- format!("{:?}", v).contains(
+ format!("{v:?}").contains(
"column_name: \"a\", expect: Float64(Float64Type), actual: Boolean(BooleanType)"
),
- "v is {:?}",
- v
+ "v is {v:?}",
);
}
diff --git a/src/sql/src/statements/insert.rs b/src/sql/src/statements/insert.rs
index f105648ea826..fe2ad373e2e0 100644
--- a/src/sql/src/statements/insert.rs
+++ b/src/sql/src/statements/insert.rs
@@ -72,20 +72,20 @@ fn sql_exprs_to_values(exprs: &Vec<Vec<Expr>>) -> Result<Vec<Vec<Value>>> {
{
if let Expr::Value(Value::Number(s, b)) = &**expr {
match op {
- UnaryOperator::Minus => Value::Number(format!("-{}", s), *b),
+ UnaryOperator::Minus => Value::Number(format!("-{s}"), *b),
UnaryOperator::Plus => Value::Number(s.to_string(), *b),
_ => unreachable!(),
}
} else {
return error::ParseSqlValueSnafu {
- msg: format!("{:?}", expr),
+ msg: format!("{expr:?}"),
}
.fail();
}
}
_ => {
return error::ParseSqlValueSnafu {
- msg: format!("{:?}", expr),
+ msg: format!("{expr:?}"),
}
.fail()
}
@@ -103,8 +103,7 @@ impl TryFrom<Statement> for Insert {
match value {
Statement::Insert { .. } => Ok(Insert { inner: value }),
unexp => Err(ParserError::ParserError(format!(
- "Not expected to be {}",
- unexp
+ "Not expected to be {unexp}"
))),
}
}
diff --git a/src/sql/src/statements/show.rs b/src/sql/src/statements/show.rs
index 29b6d38baf70..a137c3e7b144 100644
--- a/src/sql/src/statements/show.rs
+++ b/src/sql/src/statements/show.rs
@@ -28,8 +28,8 @@ impl fmt::Display for ShowKind {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
ShowKind::All => write!(f, "ALL"),
- ShowKind::Like(ident) => write!(f, "LIKE {}", ident),
- ShowKind::Where(expr) => write!(f, "WHERE {}", expr),
+ ShowKind::Like(ident) => write!(f, "LIKE {ident}"),
+ ShowKind::Where(expr) => write!(f, "WHERE {expr}"),
}
}
}
diff --git a/src/storage/benches/memtable/bench_memtable_read_write_ratio.rs b/src/storage/benches/memtable/bench_memtable_read_write_ratio.rs
index d9757b847102..ec3f41774166 100644
--- a/src/storage/benches/memtable/bench_memtable_read_write_ratio.rs
+++ b/src/storage/benches/memtable/bench_memtable_read_write_ratio.rs
@@ -126,21 +126,20 @@ fn bench_memtable_read_write_ratio(c: &mut Criterion) {
let read_num = READ_NUM.load(Ordering::Relaxed);
let read_time = READ_SECS.load(Ordering::Relaxed);
let read_tps = if read_time != 0.0 {
- read_num as f64 / read_time as f64
+ read_num as f64 / read_time
} else {
0.0
};
let write_num = WRITE_NUM.load(Ordering::Relaxed);
let write_time = WRITE_SECS.load(Ordering::Relaxed);
let write_tps = if write_time != 0.0 {
- write_num as f64 / write_time as f64
+ write_num as f64 / write_time
} else {
0.0
};
if read_num != 0 || write_num != 0 {
println!(
- "\nread numbers: {}, read thrpt: {}\nwrite numbers: {}, write thrpt {}\n",
- read_num, read_tps, write_num, write_tps
+ "\nread numbers: {read_num}, read thrpt: {read_tps}\nwrite numbers: {write_num}, write thrpt {write_tps}\n",
);
}
}
diff --git a/src/storage/src/engine.rs b/src/storage/src/engine.rs
index ac1588458e74..ca31701342b8 100644
--- a/src/storage/src/engine.rs
+++ b/src/storage/src/engine.rs
@@ -95,14 +95,14 @@ impl<S: LogStore> EngineImpl<S> {
/// parent_dir is resolved in function `region_store_config` to ensure it's ended with '/'.
#[inline]
pub fn region_sst_dir(parent_dir: &str, region_name: &str) -> String {
- format!("{}{}/", parent_dir, region_name)
+ format!("{parent_dir}{region_name}/")
}
/// Generate region manifest path,
/// parent_dir is resolved in function `region_store_config` to ensure it's ended with '/'.
#[inline]
pub fn region_manifest_dir(parent_dir: &str, region_name: &str) -> String {
- format!("{}{}/manifest/", parent_dir, region_name)
+ format!("{parent_dir}{region_name}/manifest/")
}
/// A slot for region in the engine.
diff --git a/src/storage/src/flush.rs b/src/storage/src/flush.rs
index 51c8f0056526..bc8fa292af40 100644
--- a/src/storage/src/flush.rs
+++ b/src/storage/src/flush.rs
@@ -263,8 +263,7 @@ mod tests {
let regex = Regex::new(r"^[a-f\d]{8}(-[a-f\d]{4}){3}-[a-f\d]{12}.parquet$").unwrap();
assert!(
regex.is_match(&file_name),
- "illegal sst file name: {}",
- file_name
+ "Illegal sst file name: {file_name}",
);
}
}
diff --git a/src/storage/src/lib.rs b/src/storage/src/lib.rs
index 0c41d5b6785b..8e58817490bb 100644
--- a/src/storage/src/lib.rs
+++ b/src/storage/src/lib.rs
@@ -13,7 +13,7 @@
// limitations under the License.
//! Storage engine implementation.
-#![feature(map_first_last)]
+
mod background;
mod chunk;
pub mod codec;
diff --git a/src/storage/src/manifest/storage.rs b/src/storage/src/manifest/storage.rs
index 744f97a6eb21..8e217e95ced1 100644
--- a/src/storage/src/manifest/storage.rs
+++ b/src/storage/src/manifest/storage.rs
@@ -38,12 +38,12 @@ const LAST_CHECKPOINT_FILE: &str = "_last_checkpoint";
#[inline]
pub fn delta_file(version: ManifestVersion) -> String {
- format!("{:020}.json", version)
+ format!("{version:020}.json")
}
#[inline]
pub fn checkpoint_file(version: ManifestVersion) -> String {
- format!("{:020}.checkpoint", version)
+ format!("{version:020}.checkpoint")
}
/// Return's the delta file version from path
@@ -54,7 +54,7 @@ pub fn checkpoint_file(version: ManifestVersion) -> String {
pub fn delta_version(path: &str) -> ManifestVersion {
let s = path.split('.').next().unwrap();
s.parse()
- .unwrap_or_else(|_| panic!("Invalid delta file: {}", path))
+ .unwrap_or_else(|_| panic!("Invalid delta file: {path}"))
}
#[inline]
@@ -298,7 +298,7 @@ mod tests {
for v in 0..5 {
log_store
- .save(v, format!("hello, {}", v).as_bytes())
+ .save(v, format!("hello, {v}").as_bytes())
.await
.unwrap();
}
@@ -307,7 +307,7 @@ mod tests {
for v in 1..4 {
let (version, bytes) = it.next_log().await.unwrap().unwrap();
assert_eq!(v, version);
- assert_eq!(format!("hello, {}", v).as_bytes(), bytes);
+ assert_eq!(format!("hello, {v}").as_bytes(), bytes);
}
assert!(it.next_log().await.unwrap().is_none());
@@ -315,7 +315,7 @@ mod tests {
for v in 0..5 {
let (version, bytes) = it.next_log().await.unwrap().unwrap();
assert_eq!(v, version);
- assert_eq!(format!("hello, {}", v).as_bytes(), bytes);
+ assert_eq!(format!("hello, {v}").as_bytes(), bytes);
}
assert!(it.next_log().await.unwrap().is_none());
@@ -327,7 +327,7 @@ mod tests {
for v in 3..5 {
let (version, bytes) = it.next_log().await.unwrap().unwrap();
assert_eq!(v, version);
- assert_eq!(format!("hello, {}", v).as_bytes(), bytes);
+ assert_eq!(format!("hello, {v}").as_bytes(), bytes);
}
assert!(it.next_log().await.unwrap().is_none());
diff --git a/src/storage/src/metadata.rs b/src/storage/src/metadata.rs
index 02fc437509fc..af31388a2275 100644
--- a/src/storage/src/metadata.rs
+++ b/src/storage/src/metadata.rs
@@ -450,7 +450,7 @@ where
{
if let Some(value) = metadata.get(key) {
return value.parse().with_context(|_| ParseMetaIntSnafu {
- key_value: format!("{}={}", key, value),
+ key_value: format!("{key}={value}"),
});
}
// No such key in metadata.
diff --git a/src/storage/src/read/merge.rs b/src/storage/src/read/merge.rs
index b4f76b1f4197..ab973e9fbc53 100644
--- a/src/storage/src/read/merge.rs
+++ b/src/storage/src/read/merge.rs
@@ -644,10 +644,10 @@ mod tests {
assert_eq!(left, right);
// Check Debug is implemented.
- let output = format!("{:?}", left);
+ let output = format!("{left:?}");
assert!(output.contains("cursor"));
assert!(output.contains("pos: 1"));
- let output = format!("{:?}", right);
+ let output = format!("{right:?}");
assert!(output.contains("cursor"));
let output = format!("{:?}", left.first_row());
assert!(output.contains("pos: 1"));
diff --git a/src/storage/src/schema/compat.rs b/src/storage/src/schema/compat.rs
index d8d5f9a08a0f..ae3f6e4ce139 100644
--- a/src/storage/src/schema/compat.rs
+++ b/src/storage/src/schema/compat.rs
@@ -567,8 +567,7 @@ mod tests {
let err = is_source_column_compatible(&source, &dest).unwrap_err();
assert!(
matches!(err, Error::CompatRead { .. }),
- "{:?} is not CompatRead",
- err
+ "{err:?} is not CompatRead",
);
}
@@ -606,8 +605,7 @@ mod tests {
let err = is_source_column_compatible(&source, &dest).unwrap_err();
assert!(
matches!(err, Error::CompatRead { .. }),
- "{:?} is not CompatRead",
- err
+ "{err:?} is not CompatRead",
);
}
}
diff --git a/src/storage/src/schema/store.rs b/src/storage/src/schema/store.rs
index 691320e8bd53..e20a5c177059 100644
--- a/src/storage/src/schema/store.rs
+++ b/src/storage/src/schema/store.rs
@@ -236,7 +236,7 @@ fn parse_index_from_metadata(metadata: &HashMap<String, String>, key: &str) -> R
.get(key)
.context(metadata::MetaNotFoundSnafu { key })?;
value.parse().with_context(|_| metadata::ParseMetaIntSnafu {
- key_value: format!("{}={}", key, value),
+ key_value: format!("{key}={value}"),
})
}
diff --git a/src/storage/src/test_util/config_util.rs b/src/storage/src/test_util/config_util.rs
index 451c47818839..41d994a5b870 100644
--- a/src/storage/src/test_util/config_util.rs
+++ b/src/storage/src/test_util/config_util.rs
@@ -28,7 +28,7 @@ use crate::region::StoreConfig;
use crate::sst::FsAccessLayer;
fn log_store_dir(store_dir: &str) -> String {
- format!("{}/logstore", store_dir)
+ format!("{store_dir}/logstore")
}
/// Create a new StoreConfig for test.
diff --git a/src/storage/src/test_util/descriptor_util.rs b/src/storage/src/test_util/descriptor_util.rs
index 10d682745b06..21e5a966f72e 100644
--- a/src/storage/src/test_util/descriptor_util.rs
+++ b/src/storage/src/test_util/descriptor_util.rs
@@ -129,7 +129,7 @@ pub fn desc_with_value_columns(region_name: &str, num_value_columns: usize) -> R
let mut builder =
RegionDescBuilder::new(region_name).push_key_column(("k0", LogicalTypeId::Int64, false));
for i in 0..num_value_columns {
- let name = format!("v{}", i);
+ let name = format!("v{i}");
builder = builder.push_value_column((&name, LogicalTypeId::Int64, true));
}
builder.build()
diff --git a/src/storage/src/write_batch.rs b/src/storage/src/write_batch.rs
index 25c51fef9b08..eaaeef6fe721 100644
--- a/src/storage/src/write_batch.rs
+++ b/src/storage/src/write_batch.rs
@@ -215,7 +215,7 @@ impl WriteRequest for WriteBatch {
Mutation::Put(put_data) => {
let column = put_data
.column_by_name(ts_col_name)
- .unwrap_or_else(|| panic!("Cannot find column by name: {}", ts_col_name));
+ .unwrap_or_else(|| panic!("Cannot find column by name: {ts_col_name}"));
if column.is_const() {
let ts = match column.get(0) {
Value::Timestamp(ts) => ts,
diff --git a/src/storage/src/write_batch/codec.rs b/src/storage/src/write_batch/codec.rs
index 56bc387bb277..15dad0ada2a4 100644
--- a/src/storage/src/write_batch/codec.rs
+++ b/src/storage/src/write_batch/codec.rs
@@ -149,7 +149,7 @@ impl Decoder for WriteBatchArrowDecoder {
}
_ => {
return DataCorruptedSnafu {
- message: format!("Unexpceted mutation type: {}", mutation_type),
+ message: format!("Unexpected mutation type: {mutation_type}"),
}
.fail()
}
diff --git a/src/storage/src/write_batch/compat.rs b/src/storage/src/write_batch/compat.rs
index ce45ffc1db4c..b7d6758bb7a4 100644
--- a/src/storage/src/write_batch/compat.rs
+++ b/src/storage/src/write_batch/compat.rs
@@ -188,8 +188,7 @@ mod tests {
let err = batch.compat_write(&schema_old).unwrap_err();
assert!(
matches!(err, Error::WriteToOldVersion { .. }),
- "err {} is not WriteToOldVersion",
- err
+ "err {err} is not WriteToOldVersion",
);
}
@@ -209,8 +208,7 @@ mod tests {
let err = batch.compat_write(&schema_no_column).unwrap_err();
assert!(
matches!(err, Error::NotInSchemaToCompat { .. }),
- "err {} is not NotInSchemaToCompat",
- err
+ "err {err} is not NotInSchemaToCompat",
);
}
}
diff --git a/src/table/src/engine.rs b/src/table/src/engine.rs
index 55f68c31cf56..23e2e4e7ba4b 100644
--- a/src/table/src/engine.rs
+++ b/src/table/src/engine.rs
@@ -86,14 +86,14 @@ pub trait TableEngine: Send + Sync {
) -> Result<TableRef>;
/// Returns the table by it's name.
- fn get_table<'a>(
+ fn get_table(
&self,
ctx: &EngineContext,
- table_ref: &'a TableReference,
+ table_ref: &TableReference,
) -> Result<Option<TableRef>>;
/// Returns true when the given table is exists.
- fn table_exists<'a>(&self, ctx: &EngineContext, table_ref: &'a TableReference) -> bool;
+ fn table_exists(&self, ctx: &EngineContext, table_ref: &TableReference) -> bool;
/// Drops the given table. Return true if the table is dropped, or false if the table doesn't exist.
async fn drop_table(&self, ctx: &EngineContext, request: DropTableRequest) -> Result<bool>;
diff --git a/src/table/src/metadata.rs b/src/table/src/metadata.rs
index e481acbc7ec9..3ae85784dc33 100644
--- a/src/table/src/metadata.rs
+++ b/src/table/src/metadata.rs
@@ -211,10 +211,7 @@ impl TableMeta {
let mut builder = SchemaBuilder::try_from(columns)
.with_context(|_| error::SchemaBuildSnafu {
- msg: format!(
- "Failed to convert column schemas into schema for table {}",
- table_name
- ),
+ msg: format!("Failed to convert column schemas into schema for table {table_name}"),
})?
// Also bump the schema version.
.version(table_schema.version() + 1);
@@ -222,10 +219,7 @@ impl TableMeta {
builder = builder.add_metadata(k, v);
}
let new_schema = builder.build().with_context(|_| error::SchemaBuildSnafu {
- msg: format!(
- "Table {} cannot add new columns {:?}",
- table_name, column_names
- ),
+ msg: format!("Table {table_name} cannot add new columns {column_names:?}"),
})?;
// value_indices would be generated automatically.
@@ -288,10 +282,7 @@ impl TableMeta {
let mut builder = SchemaBuilder::try_from_columns(columns)
.with_context(|_| error::SchemaBuildSnafu {
- msg: format!(
- "Failed to convert column schemas into schema for table {}",
- table_name
- ),
+ msg: format!("Failed to convert column schemas into schema for table {table_name}"),
})?
// Also bump the schema version.
.version(table_schema.version() + 1);
@@ -299,10 +290,7 @@ impl TableMeta {
builder = builder.add_metadata(k, v);
}
let new_schema = builder.build().with_context(|_| error::SchemaBuildSnafu {
- msg: format!(
- "Table {} cannot add remove columns {:?}",
- table_name, column_names
- ),
+ msg: format!("Table {table_name} cannot add remove columns {column_names:?}"),
})?;
// Rebuild the indices of primary key columns.
diff --git a/src/table/src/table/numbers.rs b/src/table/src/table/numbers.rs
index 473d80dd0ab7..455199023ac3 100644
--- a/src/table/src/table/numbers.rs
+++ b/src/table/src/table/numbers.rs
@@ -136,7 +136,7 @@ impl Stream for NumbersStream {
let numbers: Vec<u32> = (0..self.limit).collect();
let batch = DfRecordBatch::try_new(
self.schema.arrow_schema().clone(),
- vec![Arc::new(UInt32Array::from_slice(&numbers))],
+ vec![Arc::new(UInt32Array::from_slice(numbers))],
)
.unwrap();
diff --git a/src/table/src/test_util/mock_engine.rs b/src/table/src/test_util/mock_engine.rs
index 2b19b1889a64..916b46060574 100644
--- a/src/table/src/test_util/mock_engine.rs
+++ b/src/table/src/test_util/mock_engine.rs
@@ -85,15 +85,11 @@ impl TableEngine for MockTableEngine {
unimplemented!()
}
- fn get_table<'a>(
- &self,
- _ctx: &EngineContext,
- _ref: &'a TableReference,
- ) -> Result<Option<TableRef>> {
+ fn get_table(&self, _ctx: &EngineContext, _ref: &TableReference) -> Result<Option<TableRef>> {
unimplemented!()
}
- fn table_exists<'a>(&self, _ctx: &EngineContext, _name: &'a TableReference) -> bool {
+ fn table_exists(&self, _ctx: &EngineContext, _name: &TableReference) -> bool {
unimplemented!()
}
diff --git a/tests-integration/src/test_util.rs b/tests-integration/src/test_util.rs
index 6c77b28d52d4..67feb6226902 100644
--- a/tests-integration/src/test_util.rs
+++ b/tests-integration/src/test_util.rs
@@ -106,7 +106,7 @@ fn get_test_store_config(
(config, Some(TempDirGuard::S3(TempFolder::new(&store, "/"))))
}
StorageType::File => {
- let data_tmp_dir = TempDir::new(&format!("gt_data_{}", name)).unwrap();
+ let data_tmp_dir = TempDir::new(&format!("gt_data_{name}")).unwrap();
(
ObjectStoreConfig::File {
@@ -142,7 +142,7 @@ pub fn create_tmp_dir_and_datanode_opts(
store_type: StorageType,
name: &str,
) -> (DatanodeOptions, TestGuard) {
- let wal_tmp_dir = TempDir::new(&format!("gt_wal_{}", name)).unwrap();
+ let wal_tmp_dir = TempDir::new(&format!("gt_wal_{name}")).unwrap();
let (storage, data_tmp_dir) = get_test_store_config(&store_type, name);
diff --git a/tests/runner/src/env.rs b/tests/runner/src/env.rs
index 0d1bd56cf95f..c70dfa3506cf 100644
--- a/tests/runner/src/env.rs
+++ b/tests/runner/src/env.rs
@@ -41,7 +41,7 @@ impl Environment for Env {
match mode {
"standalone" => Self::start_standalone().await,
"distributed" => Self::start_distributed().await,
- _ => panic!("Unexpected mode: {}", mode),
+ _ => panic!("Unexpected mode: {mode}"),
}
}
@@ -75,7 +75,7 @@ impl Env {
.write(true)
.truncate(true)
.open(SERVER_LOG_FILE)
- .unwrap_or_else(|_| panic!("Cannot open log file at {}", SERVER_LOG_FILE));
+ .unwrap_or_else(|_| panic!("Cannot open log file at {SERVER_LOG_FILE}"));
// Start the DB
let server_process = Command::new("./greptime")
.current_dir(util::get_binary_dir("debug"))
@@ -88,10 +88,7 @@ impl Env {
if !is_up {
panic!("Server doesn't up in 10 seconds, quit.")
}
- println!(
- "Started, going to test. Log will be write to {}",
- SERVER_LOG_FILE
- );
+ println!("Started, going to test. Log will be write to {SERVER_LOG_FILE}");
let client = Client::with_urls(vec![SERVER_ADDR]);
let db = DB::new("greptime", client.clone());
@@ -143,10 +140,10 @@ impl Display for ResultDisplayer {
)
}
ObjectResult::Mutate(mutate_result) => {
- write!(f, "{:?}", mutate_result)
+ write!(f, "{mutate_result:?}")
}
},
- Err(e) => write!(f, "Failed to execute, error: {:?}", e),
+ Err(e) => write!(f, "Failed to execute, error: {e:?}"),
}
}
}
diff --git a/tests/runner/src/util.rs b/tests/runner/src/util.rs
index f417f19968e2..652e1257bbd0 100644
--- a/tests/runner/src/util.rs
+++ b/tests/runner/src/util.rs
@@ -124,7 +124,7 @@ pub fn values_to_string(
values
.binary_values
.into_iter()
- .map(|val| format!("{:?}", val)),
+ .map(|val| format!("{val:?}")),
row_count
),
ColumnDataType::Datetime => {
|
chore
|
upgrade Rust to nightly 2022-12-20 (#772)
|
606b489d532c4964e565b732c3cef0b0fdb8bfb9
|
2023-08-14 12:10:00
|
LFC
|
feat: redact secrets in sql when logging (#2141)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index 96bb529ff0ee..4cc0cbc49e52 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -9221,6 +9221,7 @@ dependencies = [
"hex",
"itertools 0.10.5",
"once_cell",
+ "regex",
"snafu",
"sqlparser 0.34.0",
]
diff --git a/src/frontend/src/instance.rs b/src/frontend/src/instance.rs
index 576c431ee603..00cbe4e0be0d 100644
--- a/src/frontend/src/instance.rs
+++ b/src/frontend/src/instance.rs
@@ -44,7 +44,7 @@ use common_meta::heartbeat::handler::HandlerGroupExecutor;
use common_meta::key::TableMetadataManager;
use common_query::Output;
use common_telemetry::logging::{debug, info};
-use common_telemetry::timer;
+use common_telemetry::{error, timer};
use datanode::instance::sql::table_idents_to_full_name;
use datanode::instance::InstanceRef as DnInstanceRef;
use datatypes::schema::Schema;
@@ -524,6 +524,9 @@ impl SqlQueryHandler for Instance {
results.push(output_result);
}
Err(e) => {
+ let redacted = sql::util::redact_sql_secrets(query.as_ref());
+ error!(e; "Failed to execute query: {redacted}");
+
results.push(Err(e));
break;
}
diff --git a/src/servers/src/mysql/handler.rs b/src/servers/src/mysql/handler.rs
index fb6d793fefe6..61668a8e01f3 100644
--- a/src/servers/src/mysql/handler.rs
+++ b/src/servers/src/mysql/handler.rs
@@ -15,7 +15,7 @@ use std::collections::HashMap;
use std::net::SocketAddr;
use std::sync::atomic::{AtomicU32, Ordering};
use std::sync::Arc;
-use std::time::{Duration, Instant};
+use std::time::Duration;
use ::auth::{Identity, Password, UserProviderRef};
use async_trait::async_trait;
@@ -23,7 +23,7 @@ use chrono::{NaiveDate, NaiveDateTime};
use common_catalog::parse_catalog_and_schema_from_db_string;
use common_error::ext::ErrorExt;
use common_query::Output;
-use common_telemetry::{error, info, logging, timer, warn};
+use common_telemetry::{error, logging, timer, warn};
use datatypes::prelude::ConcreteDataType;
use metrics::increment_counter;
use opensrv_mysql::{
@@ -91,27 +91,16 @@ impl MysqlInstanceShim {
}
async fn do_query(&self, query: &str, query_ctx: QueryContextRef) -> Vec<Result<Output>> {
- let trace_id = query_ctx.trace_id();
- info!("Start executing query: '{}'", query);
- let start = Instant::now();
-
- let output = if let Some(output) = crate::mysql::federated::check(query, query_ctx.clone())
- {
+ if let Some(output) = crate::mysql::federated::check(query, query_ctx.clone()) {
vec![Ok(output)]
} else {
+ let trace_id = query_ctx.trace_id();
common_telemetry::TRACE_ID
.scope(trace_id, async move {
self.query_handler.do_query(query, query_ctx).await
})
.await
- };
-
- info!(
- "Finished executing query: '{}', total time costs in microseconds: {}",
- query,
- start.elapsed().as_micros()
- );
- output
+ }
}
/// Execute the logical plan and return the output
@@ -287,7 +276,7 @@ impl<W: AsyncWrite + Send + Sync + Unpin> AsyncMysqlShim<W> for MysqlInstanceShi
Some(sql_plan) => sql_plan,
};
- let (query, outputs) = match sql_plan.plan {
+ let outputs = match sql_plan.plan {
Some(plan) => {
let param_types = plan
.get_param_types()
@@ -301,23 +290,19 @@ impl<W: AsyncWrite + Send + Sync + Unpin> AsyncMysqlShim<W> for MysqlInstanceShi
}
let plan = replace_params_with_values(&plan, param_types, params)?;
logging::debug!("Mysql execute prepared plan: {}", plan.display_indent());
- let outputs = vec![
+ vec![
self.do_exec_plan(&sql_plan.query, plan, query_ctx.clone())
.await,
- ];
-
- (sql_plan.query, outputs)
+ ]
}
None => {
let query = replace_params(params, sql_plan.query);
logging::debug!("Mysql execute replaced query: {}", query);
- let outputs = self.do_query(&query, query_ctx.clone()).await;
-
- (query, outputs)
+ self.do_query(&query, query_ctx.clone()).await
}
};
- writer::write_output(w, &query, query_ctx, outputs).await?;
+ writer::write_output(w, query_ctx, outputs).await?;
Ok(())
}
@@ -347,7 +332,7 @@ impl<W: AsyncWrite + Send + Sync + Unpin> AsyncMysqlShim<W> for MysqlInstanceShi
]
);
let outputs = self.do_query(query, query_ctx.clone()).await;
- writer::write_output(writer, query, query_ctx, outputs).await?;
+ writer::write_output(writer, query_ctx, outputs).await?;
Ok(())
}
diff --git a/src/servers/src/mysql/writer.rs b/src/servers/src/mysql/writer.rs
index 83a802dcd3e4..3a4ac348722e 100644
--- a/src/servers/src/mysql/writer.rs
+++ b/src/servers/src/mysql/writer.rs
@@ -16,7 +16,6 @@ use std::ops::Deref;
use common_query::Output;
use common_recordbatch::{util, RecordBatch};
-use common_telemetry::warn;
use datatypes::prelude::{ConcreteDataType, Value};
use datatypes::schema::SchemaRef;
use metrics::increment_counter;
@@ -31,9 +30,8 @@ use crate::error::{self, Error, Result};
use crate::metrics::*;
/// Try to write multiple output to the writer if possible.
-pub async fn write_output<'a, W: AsyncWrite + Send + Sync + Unpin>(
- w: QueryResultWriter<'a, W>,
- query: &str,
+pub async fn write_output<W: AsyncWrite + Send + Sync + Unpin>(
+ w: QueryResultWriter<'_, W>,
query_context: QueryContextRef,
outputs: Vec<Result<Output>>,
) -> Result<()> {
@@ -42,7 +40,7 @@ pub async fn write_output<'a, W: AsyncWrite + Send + Sync + Unpin>(
let result_writer = writer.take().context(error::InternalSnafu {
err_msg: "Sending multiple result set is unsupported",
})?;
- writer = result_writer.try_write_one(query, output).await?;
+ writer = result_writer.try_write_one(output).await?;
}
if let Some(result_writer) = writer {
@@ -75,7 +73,6 @@ impl<'a, W: AsyncWrite + Unpin> MysqlResultWriter<'a, W> {
/// Try to write one result set. If there are more than one result set, return `Some`.
pub async fn try_write_one(
self,
- query: &str,
output: Result<Output>,
) -> Result<Option<MysqlResultWriter<'a, W>>> {
// We don't support sending multiple query result because the RowWriter's lifetime is bound to
@@ -91,16 +88,14 @@ impl<'a, W: AsyncWrite + Unpin> MysqlResultWriter<'a, W> {
recordbatches,
schema,
};
- Self::write_query_result(query, query_result, self.writer, self.query_context)
- .await?;
+ Self::write_query_result(query_result, self.writer, self.query_context).await?;
}
Output::RecordBatches(recordbatches) => {
let query_result = QueryResult {
schema: recordbatches.schema(),
recordbatches: recordbatches.take(),
};
- Self::write_query_result(query, query_result, self.writer, self.query_context)
- .await?;
+ Self::write_query_result(query_result, self.writer, self.query_context).await?;
}
Output::AffectedRows(rows) => {
let next_writer = Self::write_affected_rows(self.writer, rows).await?;
@@ -110,7 +105,7 @@ impl<'a, W: AsyncWrite + Unpin> MysqlResultWriter<'a, W> {
)));
}
},
- Err(error) => Self::write_query_error(query, error, self.writer).await?,
+ Err(error) => Self::write_query_error(error, self.writer).await?,
}
Ok(None)
}
@@ -135,7 +130,6 @@ impl<'a, W: AsyncWrite + Unpin> MysqlResultWriter<'a, W> {
}
async fn write_query_result(
- query: &str,
query_result: QueryResult,
writer: QueryResultWriter<'a, W>,
query_context: QueryContextRef,
@@ -152,7 +146,7 @@ impl<'a, W: AsyncWrite + Unpin> MysqlResultWriter<'a, W> {
row_writer.finish().await?;
Ok(())
}
- Err(error) => Self::write_query_error(query, error, writer).await,
+ Err(error) => Self::write_query_error(error, writer).await,
}
}
@@ -200,12 +194,7 @@ impl<'a, W: AsyncWrite + Unpin> MysqlResultWriter<'a, W> {
Ok(())
}
- async fn write_query_error(
- query: &str,
- error: Error,
- w: QueryResultWriter<'a, W>,
- ) -> Result<()> {
- warn!(error; "Failed to execute query '{}'", query);
+ async fn write_query_error(error: Error, w: QueryResultWriter<'a, W>) -> Result<()> {
increment_counter!(
METRIC_ERROR_COUNTER,
&[(METRIC_PROTOCOL_LABEL, METRIC_ERROR_COUNTER_LABEL_MYSQL)]
diff --git a/src/sql/Cargo.toml b/src/sql/Cargo.toml
index 93f41e081ae9..ee85f93616dd 100644
--- a/src/sql/Cargo.toml
+++ b/src/sql/Cargo.toml
@@ -16,6 +16,7 @@ datatypes = { workspace = true }
hex = "0.4"
itertools.workspace = true
once_cell.workspace = true
+regex.workspace = true
snafu = { version = "0.7", features = ["backtraces"] }
sqlparser.workspace = true
diff --git a/src/sql/src/lib.rs b/src/sql/src/lib.rs
index ef9ef453fcff..3fe8d2953227 100644
--- a/src/sql/src/lib.rs
+++ b/src/sql/src/lib.rs
@@ -14,6 +14,7 @@
#![feature(box_patterns)]
#![feature(assert_matches)]
#![feature(let_chains)]
+#![feature(lazy_cell)]
pub mod ast;
pub mod dialect;
diff --git a/src/sql/src/util.rs b/src/sql/src/util.rs
index 0ad23221a365..f6e98a864836 100644
--- a/src/sql/src/util.rs
+++ b/src/sql/src/util.rs
@@ -13,9 +13,18 @@
// limitations under the License.
use std::collections::HashMap;
+use std::sync::LazyLock;
+use regex::Regex;
use sqlparser::ast::{SqlOption, Value};
+static SQL_SECRET_PATTERNS: LazyLock<Vec<Regex>> = LazyLock::new(|| {
+ vec["'].*"#).unwrap(),
+ Regex::new(r#"(?i)secret_access_key=["'](\w*)["'].*"#).unwrap(),
+ ]
+});
+
pub fn parse_option_string(value: Value) -> Option<String> {
match value {
Value::SingleQuotedString(v) | Value::DoubleQuotedString(v) => Some(v),
@@ -36,3 +45,31 @@ pub fn to_lowercase_options_map(opts: &[SqlOption]) -> HashMap<String, String> {
}
map
}
+
+/// Use regex to match and replace common seen secret values in SQL.
+pub fn redact_sql_secrets(sql: &str) -> String {
+ let mut s = sql.to_string();
+ for p in SQL_SECRET_PATTERNS.iter() {
+ if let Some(captures) = p.captures(&s) {
+ if let Some(m) = captures.get(1) {
+ s = s.replace(m.as_str(), "******");
+ }
+ }
+ }
+ s
+}
+
+#[cfg(test)]
+mod test {
+ use super::*;
+
+ #[test]
+ fn test_redact_sql_secrets() {
+ assert_eq!(
+ redact_sql_secrets(
+ r#"COPY 'my_table' FROM '/test.orc' WITH (FORMAT = 'orc') CONNECTION(ENDPOINT = 's3.storage.site', REGION = 'hz', ACCESS_KEY_ID='my_key_id', SECRET_ACCESS_KEY="my_access_key");"#
+ ),
+ r#"COPY 'my_table' FROM '/test.orc' WITH (FORMAT = 'orc') CONNECTION(ENDPOINT = 's3.storage.site', REGION = 'hz', ACCESS_KEY_ID='******', SECRET_ACCESS_KEY="******");"#
+ );
+ }
+}
|
feat
|
redact secrets in sql when logging (#2141)
|
e35a494a3fe0cd32712b5490c4a0ef149afc9806
|
2023-12-14 19:57:36
|
discord9
|
test: fix a wronged test script (#2934)
| false
|
diff --git a/src/script/src/python/ffi_types/pair_tests/sample_testcases.rs b/src/script/src/python/ffi_types/pair_tests/sample_testcases.rs
index 4c3a4f09e8f7..f83aa2bf1868 100644
--- a/src/script/src/python/ffi_types/pair_tests/sample_testcases.rs
+++ b/src/script/src/python/ffi_types/pair_tests/sample_testcases.rs
@@ -613,7 +613,7 @@ def answer() -> vector[i64]:
except ImportError:
# Python didn't have pyarrow
print("Warning: no pyarrow in current python")
- return vector([42, 43, 44])
+ return vector([42])
a = vector.from_pyarrow(pa.array([42]))
return a[0:1]
"#
|
test
|
fix a wronged test script (#2934)
|
9b3037fe97a843ad47f90bec377f8a58ea490797
|
2023-06-14 13:20:21
|
LFC
|
feat: a countdown task for closing region in Datanode (#1775)
| false
|
diff --git a/src/catalog/src/remote.rs b/src/catalog/src/remote.rs
index e8e1504a0266..03f068d09bd3 100644
--- a/src/catalog/src/remote.rs
+++ b/src/catalog/src/remote.rs
@@ -30,6 +30,10 @@ mod manager;
#[cfg(feature = "testing")]
pub mod mock;
+// FIXME(LFC): Used in next PR.
+#[allow(dead_code)]
+mod region_alive_keeper;
+
#[derive(Debug, Clone)]
pub struct Kv(pub Vec<u8>, pub Vec<u8>);
diff --git a/src/catalog/src/remote/mock.rs b/src/catalog/src/remote/mock.rs
index e8bd73f93adb..a975ab64a45e 100644
--- a/src/catalog/src/remote/mock.rs
+++ b/src/catalog/src/remote/mock.rs
@@ -27,9 +27,11 @@ use datatypes::data_type::ConcreteDataType;
use datatypes::schema::{ColumnSchema, Schema};
use datatypes::vectors::StringVector;
use serde::Serializer;
-use table::engine::{EngineContext, TableEngine, TableReference};
+use table::engine::{CloseTableResult, EngineContext, TableEngine, TableReference};
use table::metadata::TableId;
-use table::requests::{AlterTableRequest, CreateTableRequest, DropTableRequest, OpenTableRequest};
+use table::requests::{
+ AlterTableRequest, CloseTableRequest, CreateTableRequest, DropTableRequest, OpenTableRequest,
+};
use table::test_util::MemTable;
use table::TableRef;
use tokio::sync::RwLock;
@@ -183,6 +185,8 @@ impl TableEngine for MockTableEngine {
let table_name = request.table_name.clone();
let catalog_name = request.catalog_name.clone();
let schema_name = request.schema_name.clone();
+ let table_full_name =
+ TableReference::full(&catalog_name, &schema_name, &table_name).to_string();
let default_table_id = "0".to_owned();
let table_id = TableId::from_str(
@@ -211,7 +215,7 @@ impl TableEngine for MockTableEngine {
)) as Arc<_>;
let mut tables = self.tables.write().await;
- tables.insert(table_name, table.clone() as TableRef);
+ tables.insert(table_full_name, table.clone() as TableRef);
Ok(table)
}
@@ -263,6 +267,19 @@ impl TableEngine for MockTableEngine {
unimplemented!()
}
+ async fn close_table(
+ &self,
+ _ctx: &EngineContext,
+ request: CloseTableRequest,
+ ) -> table::Result<CloseTableResult> {
+ let _ = self
+ .tables
+ .write()
+ .await
+ .remove(&request.table_ref().to_string());
+ Ok(CloseTableResult::Released(vec![]))
+ }
+
async fn close(&self) -> table::Result<()> {
Ok(())
}
diff --git a/src/catalog/src/remote/region_alive_keeper.rs b/src/catalog/src/remote/region_alive_keeper.rs
new file mode 100644
index 000000000000..a291fe9337de
--- /dev/null
+++ b/src/catalog/src/remote/region_alive_keeper.rs
@@ -0,0 +1,329 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use common_meta::instruction::TableIdent;
+use common_telemetry::{debug, error, info, warn};
+use store_api::storage::RegionNumber;
+use table::engine::{CloseTableResult, EngineContext, TableEngineRef};
+use table::requests::CloseTableRequest;
+use tokio::sync::mpsc;
+use tokio::task::JoinHandle;
+use tokio::time::{Duration, Instant};
+
+#[derive(Debug)]
+enum CountdownCommand {
+ Start(u64),
+ Reset(Instant),
+
+ #[cfg(test)]
+ Deadline(tokio::sync::oneshot::Sender<Instant>),
+}
+
+struct CountdownTaskHandle {
+ tx: mpsc::Sender<CountdownCommand>,
+ handler: JoinHandle<()>,
+}
+
+impl CountdownTaskHandle {
+ /// Creates a new [CountdownTaskHandle] and starts the countdown task.
+ /// # Params
+ /// - `on_task_finished`: a callback to be invoked when the task is finished. Note that it will not
+ /// be invoked if the task is cancelled (by dropping the handle). This is because we want something
+ /// meaningful to be done when the task is finished, e.g. deregister the handle from the map.
+ /// While dropping the handle does not necessarily mean the task is finished.
+ fn new<F>(
+ table_engine: TableEngineRef,
+ table_ident: TableIdent,
+ region: RegionNumber,
+ on_task_finished: F,
+ ) -> Self
+ where
+ F: FnOnce() + Send + 'static,
+ {
+ let (tx, rx) = mpsc::channel(1024);
+
+ let mut countdown_task = CountdownTask {
+ table_engine,
+ table_ident,
+ region,
+ rx,
+ };
+ let handler = common_runtime::spawn_bg(async move {
+ countdown_task.run(on_task_finished).await;
+ });
+
+ Self { tx, handler }
+ }
+
+ async fn start(&self, heartbeat_interval_millis: u64) {
+ if let Err(e) = self
+ .tx
+ .send(CountdownCommand::Start(heartbeat_interval_millis))
+ .await
+ {
+ warn!(
+ "Failed to start region alive keeper countdown: {e}. \
+ Maybe the task is stopped due to region been closed."
+ );
+ }
+ }
+
+ async fn reset_deadline(&self, deadline: Instant) {
+ if let Err(e) = self.tx.send(CountdownCommand::Reset(deadline)).await {
+ warn!(
+ "Failed to reset region alive keeper deadline: {e}. \
+ Maybe the task is stopped due to region been closed."
+ );
+ }
+ }
+}
+
+impl Drop for CountdownTaskHandle {
+ fn drop(&mut self) {
+ self.handler.abort()
+ }
+}
+
+struct CountdownTask {
+ table_engine: TableEngineRef,
+ table_ident: TableIdent,
+ region: RegionNumber,
+ rx: mpsc::Receiver<CountdownCommand>,
+}
+
+impl CountdownTask {
+ async fn run<F>(&mut self, on_task_finished: F)
+ where
+ F: FnOnce() + Send + 'static,
+ {
+ // 30 years. See `Instant::far_future`.
+ let far_future = Instant::now() + Duration::from_secs(86400 * 365 * 30);
+
+ // Make sure the alive countdown is not gonna happen before heartbeat task is started (the
+ // "start countdown" command will be sent from heartbeat task).
+ let countdown = tokio::time::sleep_until(far_future);
+ tokio::pin!(countdown);
+
+ let region = &self.region;
+ let table_ident = &self.table_ident;
+ loop {
+ tokio::select! {
+ command = self.rx.recv() => {
+ match command {
+ Some(CountdownCommand::Start(heartbeat_interval_millis)) => {
+ // Set first deadline in 4 heartbeats (roughly after 20 seconds from now if heartbeat
+ // interval is set to default 5 seconds), to make Datanode and Metasrv more tolerable to
+ // network or other jitters during startup.
+ let first_deadline = Instant::now() + Duration::from_millis(heartbeat_interval_millis) * 4;
+ countdown.set(tokio::time::sleep_until(first_deadline));
+ },
+ Some(CountdownCommand::Reset(deadline)) => {
+ if countdown.deadline() < deadline {
+ debug!("Reset deadline to region {region} of table {table_ident} to {deadline:?}");
+ countdown.set(tokio::time::sleep_until(deadline));
+ }
+ // Else we have received a past deadline, it could be the following
+ // possible reasons:
+ // 1. the clock drift happened in Metasrv or Datanode;
+ // 2. some messages are lagged;
+ // 3. during the period of Datanode startup.
+ // We can safely ignore case 2 and 3. However, case 1 is catastrophic.
+ // We must think of a way to resolve it, maybe using logical clock, or
+ // simply fire an alarm for it? For now, we can tolerate that, because it's
+ // seconds resolution to deadline, and clock drift is less likely
+ // to happen in that resolution.
+ },
+ None => {
+ info!(
+ "The handle of countdown task for region {region} of table {table_ident} \
+ is dropped, RegionAliveKeeper out."
+ );
+ break;
+ },
+
+ #[cfg(test)]
+ Some(CountdownCommand::Deadline(tx)) => {
+ tx.send(countdown.deadline()).unwrap()
+ }
+ }
+ }
+ () = &mut countdown => {
+ let result = self.close_region().await;
+ warn!(
+ "Region {region} of table {table_ident} is closed, result: {result:?}. \
+ RegionAliveKeeper out.",
+ );
+ break;
+ }
+ }
+ }
+
+ on_task_finished();
+ }
+
+ async fn close_region(&self) -> CloseTableResult {
+ let ctx = EngineContext::default();
+ let region = self.region;
+ let table_ident = &self.table_ident;
+ loop {
+ let request = CloseTableRequest {
+ catalog_name: table_ident.catalog.clone(),
+ schema_name: table_ident.schema.clone(),
+ table_name: table_ident.table.clone(),
+ region_numbers: vec![region],
+ flush: true,
+ };
+ match self.table_engine.close_table(&ctx, request).await {
+ Ok(result) => return result,
+ // If region is failed to close, immediately retry. Maybe we should panic instead?
+ Err(e) => error!(e;
+ "Failed to close region {region} of table {table_ident}. \
+ For the integrity of data, retry closing and retry without wait.",
+ ),
+ }
+ }
+ }
+}
+
+#[cfg(test)]
+mod test {
+ use std::sync::atomic::{AtomicBool, Ordering};
+ use std::sync::Arc;
+
+ use datatypes::schema::RawSchema;
+ use table::engine::{TableEngine, TableReference};
+ use table::requests::{CreateTableRequest, TableOptions};
+
+ use super::*;
+ use crate::remote::mock::MockTableEngine;
+
+ #[tokio::test(flavor = "multi_thread")]
+ async fn test_countdown_task_handle() {
+ let table_engine = Arc::new(MockTableEngine::default());
+ let table_ident = TableIdent {
+ catalog: "my_catalog".to_string(),
+ schema: "my_schema".to_string(),
+ table: "my_table".to_string(),
+ table_id: 1024,
+ engine: "mito".to_string(),
+ };
+ let finished = Arc::new(AtomicBool::new(false));
+ let finished_clone = finished.clone();
+ let handle =
+ CountdownTaskHandle::new(table_engine.clone(), table_ident.clone(), 1, move || {
+ finished_clone.store(true, Ordering::Relaxed)
+ });
+ let tx = handle.tx.clone();
+
+ // assert countdown task is running
+ assert!(tx.send(CountdownCommand::Start(5000)).await.is_ok());
+ assert!(!finished.load(Ordering::Relaxed));
+
+ drop(handle);
+ tokio::time::sleep(Duration::from_secs(1)).await;
+
+ // assert countdown task is stopped
+ assert!(tx
+ .try_send(CountdownCommand::Reset(
+ Instant::now() + Duration::from_secs(10)
+ ))
+ .is_err());
+ // assert `on_task_finished` is not called (because the task is aborted by the handle's drop)
+ assert!(!finished.load(Ordering::Relaxed));
+
+ let finished = Arc::new(AtomicBool::new(false));
+ let finished_clone = finished.clone();
+ let handle = CountdownTaskHandle::new(table_engine, table_ident, 1, move || {
+ finished_clone.store(true, Ordering::Relaxed)
+ });
+ handle.tx.send(CountdownCommand::Start(100)).await.unwrap();
+ tokio::time::sleep(Duration::from_secs(1)).await;
+ // assert `on_task_finished` is called when task is finished normally
+ assert!(finished.load(Ordering::Relaxed));
+ }
+
+ #[tokio::test(flavor = "multi_thread")]
+ async fn test_countdown_task_run() {
+ let ctx = &EngineContext::default();
+ let catalog = "my_catalog";
+ let schema = "my_schema";
+ let table = "my_table";
+ let request = CreateTableRequest {
+ id: 1,
+ catalog_name: catalog.to_string(),
+ schema_name: schema.to_string(),
+ table_name: table.to_string(),
+ desc: None,
+ schema: RawSchema {
+ column_schemas: vec![],
+ timestamp_index: None,
+ version: 0,
+ },
+ region_numbers: vec![],
+ primary_key_indices: vec![],
+ create_if_not_exists: false,
+ table_options: TableOptions::default(),
+ engine: "mito".to_string(),
+ };
+ let table_ref = TableReference::full(catalog, schema, table);
+
+ let table_engine = Arc::new(MockTableEngine::default());
+ table_engine.create_table(ctx, request).await.unwrap();
+
+ let table_ident = TableIdent {
+ catalog: catalog.to_string(),
+ schema: schema.to_string(),
+ table: table.to_string(),
+ table_id: 1024,
+ engine: "mito".to_string(),
+ };
+ let (tx, rx) = mpsc::channel(10);
+ let mut task = CountdownTask {
+ table_engine: table_engine.clone(),
+ table_ident,
+ region: 1,
+ rx,
+ };
+ common_runtime::spawn_bg(async move {
+ task.run(|| ()).await;
+ });
+
+ async fn deadline(tx: &mpsc::Sender<CountdownCommand>) -> Instant {
+ let (s, r) = tokio::sync::oneshot::channel();
+ tx.send(CountdownCommand::Deadline(s)).await.unwrap();
+ r.await.unwrap()
+ }
+
+ // if countdown task is not started, its deadline is set to far future
+ assert!(deadline(&tx).await > Instant::now() + Duration::from_secs(86400 * 365 * 29));
+
+ // start countdown in 250ms * 4 = 1s
+ tx.send(CountdownCommand::Start(250)).await.unwrap();
+ // assert deadline is correctly set
+ assert!(deadline(&tx).await <= Instant::now() + Duration::from_secs(1));
+
+ // reset countdown in 1.5s
+ tx.send(CountdownCommand::Reset(
+ Instant::now() + Duration::from_millis(1500),
+ ))
+ .await
+ .unwrap();
+
+ // assert the table is closed after deadline is reached
+ assert!(table_engine.table_exists(ctx, &table_ref));
+ // spare 500ms for the task to close the table
+ tokio::time::sleep(Duration::from_millis(2000)).await;
+ assert!(!table_engine.table_exists(ctx, &table_ref));
+ }
+}
|
feat
|
a countdown task for closing region in Datanode (#1775)
|
a52aedec5bc050957dee792bb17a4b045cdbfc4d
|
2024-03-15 11:45:18
|
Weny Xu
|
feat: implement the drop database parser (#3521)
| false
|
diff --git a/src/frontend/src/instance.rs b/src/frontend/src/instance.rs
index fb0d5f991391..f8402e1ccb70 100644
--- a/src/frontend/src/instance.rs
+++ b/src/frontend/src/instance.rs
@@ -473,7 +473,8 @@ pub fn check_permission(
// These are executed by query engine, and will be checked there.
Statement::Query(_) | Statement::Explain(_) | Statement::Tql(_) | Statement::Delete(_) => {}
// database ops won't be checked
- Statement::CreateDatabase(_) | Statement::ShowDatabases(_) => {}
+ Statement::CreateDatabase(_) | Statement::ShowDatabases(_) | Statement::DropDatabase(_) => {
+ }
// show create table and alter are not supported yet
Statement::ShowCreateTable(_) | Statement::CreateExternalTable(_) | Statement::Alter(_) => {
}
diff --git a/src/operator/src/statement.rs b/src/operator/src/statement.rs
index 5231f99a58ae..fc194e1496e5 100644
--- a/src/operator/src/statement.rs
+++ b/src/operator/src/statement.rs
@@ -171,6 +171,13 @@ impl StatementExecutor {
let table_name = TableName::new(catalog, schema, table);
self.drop_table(table_name, stmt.drop_if_exists()).await
}
+ Statement::DropDatabase(_stmt) => {
+ // TODO(weny): implement the drop database procedure
+ error::NotSupportedSnafu {
+ feat: "Drop Database",
+ }
+ .fail()
+ }
Statement::TruncateTable(stmt) => {
let (catalog, schema, table) =
table_idents_to_full_name(stmt.table_name(), &query_ctx)
diff --git a/src/sql/src/parsers/create_parser.rs b/src/sql/src/parsers/create_parser.rs
index b84c965aa5f7..ec61a20d5378 100644
--- a/src/sql/src/parsers/create_parser.rs
+++ b/src/sql/src/parsers/create_parser.rs
@@ -119,7 +119,7 @@ impl<'a> ParserContext<'a> {
expected: "a database name",
actual: self.peek_token_as_string(),
})?;
-
+ let database_name = Self::canonicalize_object_name(database_name);
Ok(Statement::CreateDatabase(CreateDatabase {
name: database_name,
if_not_exists,
@@ -722,7 +722,7 @@ mod tests {
use common_catalog::consts::FILE_ENGINE;
use common_error::ext::ErrorExt;
use sqlparser::ast::ColumnOption::NotNull;
- use sqlparser::ast::{BinaryOperator, Value};
+ use sqlparser::ast::{BinaryOperator, ObjectName, Value};
use super::*;
use crate::dialect::GreptimeDbDialect;
@@ -916,6 +916,18 @@ mod tests {
}
_ => unreachable!(),
}
+
+ let sql = "CREATE DATABASE `fOo`";
+ let result =
+ ParserContext::create_with_dialect(sql, &GreptimeDbDialect {}, ParseOptions::default());
+ let mut stmts = result.unwrap();
+ assert_eq!(
+ stmts.pop().unwrap(),
+ Statement::CreateDatabase(CreateDatabase::new(
+ ObjectName(vec![Ident::with_quote('`', "fOo"),]),
+ false
+ ))
+ );
}
#[test]
diff --git a/src/sql/src/parsers/drop_parser.rs b/src/sql/src/parsers/drop_parser.rs
index ca4f7d0eb909..d5d872ee1689 100644
--- a/src/sql/src/parsers/drop_parser.rs
+++ b/src/sql/src/parsers/drop_parser.rs
@@ -13,20 +13,29 @@
// limitations under the License.
use snafu::{ensure, ResultExt};
-use sqlparser::keywords::Keyword;
+use sqlparser::dialect::keywords::Keyword;
+use sqlparser::tokenizer::Token;
use crate::error::{self, InvalidTableNameSnafu, Result};
use crate::parser::ParserContext;
-use crate::statements::drop::DropTable;
+use crate::statements::drop::{DropDatabase, DropTable};
use crate::statements::statement::Statement;
/// DROP statement parser implementation
impl<'a> ParserContext<'a> {
pub(crate) fn parse_drop(&mut self) -> Result<Statement> {
let _ = self.parser.next_token();
- if !self.matches_keyword(Keyword::TABLE) {
- return self.unsupported(self.peek_token_as_string());
+ match self.parser.peek_token().token {
+ Token::Word(w) => match w.keyword {
+ Keyword::TABLE => self.parse_drop_table(),
+ Keyword::SCHEMA | Keyword::DATABASE => self.parse_drop_database(),
+ _ => self.unsupported(w.to_string()),
+ },
+ unexpected => self.unsupported(unexpected.to_string()),
}
+ }
+
+ fn parse_drop_table(&mut self) -> Result<Statement> {
let _ = self.parser.next_token();
let if_exists = self.parser.parse_keywords(&[Keyword::IF, Keyword::EXISTS]);
@@ -48,6 +57,26 @@ impl<'a> ParserContext<'a> {
Ok(Statement::DropTable(DropTable::new(table_ident, if_exists)))
}
+
+ fn parse_drop_database(&mut self) -> Result<Statement> {
+ let _ = self.parser.next_token();
+
+ let if_exists = self.parser.parse_keywords(&[Keyword::IF, Keyword::EXISTS]);
+ let database_name =
+ self.parser
+ .parse_object_name()
+ .with_context(|_| error::UnexpectedSnafu {
+ sql: self.sql,
+ expected: "a database name",
+ actual: self.peek_token_as_string(),
+ })?;
+ let database_name = Self::canonicalize_object_name(database_name);
+
+ Ok(Statement::DropDatabase(DropDatabase::new(
+ database_name,
+ if_exists,
+ )))
+ }
}
#[cfg(test)]
@@ -106,4 +135,43 @@ mod tests {
))
)
}
+
+ #[test]
+ pub fn test_drop_database() {
+ let sql = "DROP DATABASE public";
+ let result =
+ ParserContext::create_with_dialect(sql, &GreptimeDbDialect {}, ParseOptions::default());
+ let mut stmts = result.unwrap();
+ assert_eq!(
+ stmts.pop().unwrap(),
+ Statement::DropDatabase(DropDatabase::new(
+ ObjectName(vec![Ident::new("public")]),
+ false
+ ))
+ );
+
+ let sql = "DROP DATABASE IF EXISTS public";
+ let result =
+ ParserContext::create_with_dialect(sql, &GreptimeDbDialect {}, ParseOptions::default());
+ let mut stmts = result.unwrap();
+ assert_eq!(
+ stmts.pop().unwrap(),
+ Statement::DropDatabase(DropDatabase::new(
+ ObjectName(vec![Ident::new("public")]),
+ true
+ ))
+ );
+
+ let sql = "DROP DATABASE `fOo`";
+ let result =
+ ParserContext::create_with_dialect(sql, &GreptimeDbDialect {}, ParseOptions::default());
+ let mut stmts = result.unwrap();
+ assert_eq!(
+ stmts.pop().unwrap(),
+ Statement::DropDatabase(DropDatabase::new(
+ ObjectName(vec![Ident::with_quote('`', "fOo"),]),
+ false
+ ))
+ );
+ }
}
diff --git a/src/sql/src/statements/create.rs b/src/sql/src/statements/create.rs
index e665ef257750..cfcbd8d68242 100644
--- a/src/sql/src/statements/create.rs
+++ b/src/sql/src/statements/create.rs
@@ -206,6 +206,16 @@ pub struct CreateDatabase {
pub if_not_exists: bool,
}
+impl CreateDatabase {
+ /// Creates a statement for `CREATE DATABASE`
+ pub fn new(name: ObjectName, if_not_exists: bool) -> Self {
+ Self {
+ name,
+ if_not_exists,
+ }
+ }
+}
+
#[derive(Debug, PartialEq, Eq, Clone, Visit, VisitMut)]
pub struct CreateExternalTable {
/// Table name
diff --git a/src/sql/src/statements/drop.rs b/src/sql/src/statements/drop.rs
index d5cf364a4cfa..62da68a90c9c 100644
--- a/src/sql/src/statements/drop.rs
+++ b/src/sql/src/statements/drop.rs
@@ -40,3 +40,29 @@ impl DropTable {
self.drop_if_exists
}
}
+
+/// DROP DATABASE statement.
+#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut)]
+pub struct DropDatabase {
+ name: ObjectName,
+ /// drop table if exists
+ drop_if_exists: bool,
+}
+
+impl DropDatabase {
+ /// Creates a statement for `DROP DATABASE`
+ pub fn new(name: ObjectName, if_exists: bool) -> Self {
+ Self {
+ name,
+ drop_if_exists: if_exists,
+ }
+ }
+
+ pub fn name(&self) -> &ObjectName {
+ &self.name
+ }
+
+ pub fn drop_if_exists(&self) -> bool {
+ self.drop_if_exists
+ }
+}
diff --git a/src/sql/src/statements/statement.rs b/src/sql/src/statements/statement.rs
index b8af789616d1..edb9396aaab7 100644
--- a/src/sql/src/statements/statement.rs
+++ b/src/sql/src/statements/statement.rs
@@ -16,6 +16,7 @@ use datafusion_sql::parser::Statement as DfStatement;
use sqlparser::ast::Statement as SpStatement;
use sqlparser_derive::{Visit, VisitMut};
+use super::drop::DropDatabase;
use super::show::ShowVariables;
use crate::error::{ConvertToDfStatementSnafu, Error};
use crate::statements::alter::AlterTable;
@@ -51,6 +52,8 @@ pub enum Statement {
CreateTableLike(CreateTableLike),
// DROP TABLE
DropTable(DropTable),
+ // DROP DATABASE
+ DropDatabase(DropDatabase),
// CREATE DATABASE
CreateDatabase(CreateDatabase),
/// ALTER TABLE
diff --git a/tests/cases/standalone/common/catalog/schema.result b/tests/cases/standalone/common/catalog/schema.result
index 8a385b7e171f..4c0a29be1f7f 100644
--- a/tests/cases/standalone/common/catalog/schema.result
+++ b/tests/cases/standalone/common/catalog/schema.result
@@ -120,7 +120,7 @@ SHOW TABLES FROM public WHERE Tables = 'numbers';
DROP SCHEMA test_public_schema;
-Error: 1001(Unsupported), SQL statement is not supported: DROP SCHEMA test_public_schema;, keyword: SCHEMA
+Error: 1001(Unsupported), Not supported: Drop Database
SELECT * FROM test_public_schema.hello;
diff --git a/tests/cases/standalone/common/system/information_schema.result b/tests/cases/standalone/common/system/information_schema.result
index 2a1d2d49f772..23764d8c2bd9 100644
--- a/tests/cases/standalone/common/system/information_schema.result
+++ b/tests/cases/standalone/common/system/information_schema.result
@@ -447,7 +447,7 @@ Affected Rows: 0
drop schema my_db;
-Error: 1001(Unsupported), SQL statement is not supported: drop schema my_db;, keyword: schema
+Error: 1001(Unsupported), Not supported: Drop Database
use information_schema;
|
feat
|
implement the drop database parser (#3521)
|
d86b3386dc1ed1be53964e1eed9e27f4efcd8fe5
|
2023-05-05 08:59:09
|
Niwaka
|
fix: incorrect show create table output (#1514)
| false
|
diff --git a/src/query/src/sql/show.rs b/src/query/src/sql/show.rs
index f3016747825e..051874a48b0a 100644
--- a/src/query/src/sql/show.rs
+++ b/src/query/src/sql/show.rs
@@ -25,6 +25,7 @@ use sql::parser::ParserContext;
use sql::statements::create::{CreateTable, TIME_INDEX};
use sql::statements::{self};
use table::metadata::{TableInfoRef, TableMeta};
+use table::requests::IMMUTABLE_TABLE_META_KEY;
use crate::error::{ConvertSqlTypeSnafu, ConvertSqlValueSnafu, Result, SqlSnafu};
@@ -74,7 +75,11 @@ fn create_sql_options(table_meta: &TableMeta) -> Vec<SqlOption> {
options.push(sql_option("compaction_time_window", number_value(w)));
}
- for (k, v) in &table_opts.extra_options {
+ for (k, v) in table_opts
+ .extra_options
+ .iter()
+ .filter(|(k, _)| k != &IMMUTABLE_TABLE_META_KEY)
+ {
options.push(sql_option(k, string_value(v)));
}
@@ -184,6 +189,10 @@ mod tests {
use datatypes::prelude::ConcreteDataType;
use datatypes::schema::{Schema, SchemaRef};
use table::metadata::*;
+ use table::requests::{
+ TableOptions, IMMUTABLE_TABLE_FORMAT_KEY, IMMUTABLE_TABLE_LOCATION_KEY,
+ IMMUTABLE_TABLE_META_KEY,
+ };
use super::*;
@@ -255,6 +264,72 @@ CREATE TABLE IF NOT EXISTS system_metrics (
ENGINE=mito
WITH(
regions = 3
+)"#,
+ sql
+ );
+ }
+
+ #[test]
+ fn test_show_create_external_table_sql() {
+ let schema = vec![
+ ColumnSchema::new("host", ConcreteDataType::string_datatype(), true),
+ ColumnSchema::new("cpu", ConcreteDataType::float64_datatype(), true),
+ ];
+ let table_schema = SchemaRef::new(Schema::new(schema));
+ let table_name = "system_metrics";
+ let schema_name = "public".to_string();
+ let catalog_name = "greptime".to_string();
+ let mut options: TableOptions = Default::default();
+ options.extra_options.insert(
+ IMMUTABLE_TABLE_LOCATION_KEY.to_string(),
+ "foo.csv".to_string(),
+ );
+ options.extra_options.insert(
+ IMMUTABLE_TABLE_META_KEY.to_string(),
+ "{{\"files\":[\"foo.csv\"]}}".to_string(),
+ );
+ options
+ .extra_options
+ .insert(IMMUTABLE_TABLE_FORMAT_KEY.to_string(), "csv".to_string());
+ let meta = TableMetaBuilder::default()
+ .schema(table_schema)
+ .primary_key_indices(vec![])
+ .engine("file".to_string())
+ .next_column_id(0)
+ .engine_options(Default::default())
+ .options(options)
+ .created_on(Default::default())
+ .build()
+ .unwrap();
+
+ let info = Arc::new(
+ TableInfoBuilder::default()
+ .table_id(1024)
+ .table_version(0 as TableVersion)
+ .name(table_name)
+ .schema_name(schema_name)
+ .catalog_name(catalog_name)
+ .desc(None)
+ .table_type(TableType::Base)
+ .meta(meta)
+ .build()
+ .unwrap(),
+ );
+
+ let stmt = create_table_stmt(&info).unwrap();
+
+ let sql = format!("\n{}", stmt);
+ assert_eq!(
+ r#"
+CREATE EXTERNAL TABLE IF NOT EXISTS system_metrics (
+ host STRING NULL,
+ cpu DOUBLE NULL,
+
+)
+ENGINE=file
+WITH(
+ FORMAT = 'csv',
+ LOCATION = 'foo.csv'
)"#,
sql
);
diff --git a/src/sql/src/statements/create.rs b/src/sql/src/statements/create.rs
index 4a9eb00337bb..431742db6c91 100644
--- a/src/sql/src/statements/create.rs
+++ b/src/sql/src/statements/create.rs
@@ -15,6 +15,7 @@
use std::collections::HashMap;
use std::fmt::{Display, Formatter};
+use common_catalog::consts::IMMUTABLE_FILE_ENGINE;
use itertools::Itertools;
use crate::ast::{ColumnDef, Ident, ObjectName, SqlOption, TableConstraint, Value as SqlValue};
@@ -110,7 +111,8 @@ impl CreateTable {
if self.options.is_empty() {
"".to_string()
} else {
- let options = format_list_indent!(self.options);
+ let options: Vec<&SqlOption> = self.options.iter().sorted().collect();
+ let options = format_list_indent!(options);
format!(
r#"WITH(
{options}
@@ -165,10 +167,14 @@ impl Display for CreateTable {
let partitions = self.format_partitions();
let engine = &self.engine;
let options = self.format_options();
-
+ let maybe_external = if self.engine == IMMUTABLE_FILE_ENGINE {
+ "EXTERNAL "
+ } else {
+ ""
+ };
write!(
f,
- r#"CREATE TABLE {if_not_exists} {name} (
+ r#"CREATE {maybe_external}TABLE {if_not_exists} {name} (
{columns},
{constraints}
)
diff --git a/tests/cases/standalone/show/show_create.result b/tests/cases/standalone/show/show_create.result
index f6b07ba640a7..763bf3e9d60c 100644
--- a/tests/cases/standalone/show/show_create.result
+++ b/tests/cases/standalone/show/show_create.result
@@ -32,8 +32,8 @@ SHOW CREATE TABLE system_metrics;
| | ENGINE=mito |
| | WITH( |
| | regions = 1, |
-| | write_buffer_size = '1.0KiB', |
-| | ttl = '7days' |
+| | ttl = '7days', |
+| | write_buffer_size = '1.0KiB' |
| | ) |
+----------------+---------------------------------------------------------+
|
fix
|
incorrect show create table output (#1514)
|
9e33ddceeaf14a87f12f3a502d22eda734608533
|
2023-10-12 08:23:42
|
Ning Sun
|
ci: run windows tests every night instead of every commit (#2577)
| false
|
diff --git a/.github/workflows/develop.yml b/.github/workflows/develop.yml
index 23e81f56679c..f2d9b27bcb0a 100644
--- a/.github/workflows/develop.yml
+++ b/.github/workflows/develop.yml
@@ -80,7 +80,7 @@ jobs:
runs-on: ${{ matrix.os }}
strategy:
matrix:
- os: [ ubuntu-latest-8-cores, windows-latest-8-cores ]
+ os: [ ubuntu-latest-8-cores ]
timeout-minutes: 60
steps:
- uses: actions/checkout@v3
@@ -188,43 +188,3 @@ jobs:
flags: rust
fail_ci_if_error: false
verbose: true
-
- test-on-windows:
- if: github.event.pull_request.draft == false
- runs-on: windows-latest-8-cores
- timeout-minutes: 60
- steps:
- - run: git config --global core.autocrlf false
- - uses: actions/checkout@v3
- - uses: arduino/setup-protoc@v1
- with:
- repo-token: ${{ secrets.GITHUB_TOKEN }}
- - name: Install Rust toolchain
- uses: dtolnay/rust-toolchain@master
- with:
- toolchain: ${{ env.RUST_TOOLCHAIN }}
- components: llvm-tools-preview
- - name: Rust Cache
- uses: Swatinem/rust-cache@v2
- - name: Install Cargo Nextest
- uses: taiki-e/install-action@nextest
- - name: Install Python
- uses: actions/setup-python@v4
- with:
- python-version: '3.10'
- - name: Install PyArrow Package
- run: pip install pyarrow
- - name: Install WSL distribution
- uses: Vampire/setup-wsl@v2
- with:
- distribution: Ubuntu-22.04
- - name: Running tests
- run: cargo nextest run -F pyo3_backend,dashboard
- env:
- RUST_BACKTRACE: 1
- CARGO_INCREMENTAL: 0
- GT_S3_BUCKET: ${{ secrets.S3_BUCKET }}
- GT_S3_ACCESS_KEY_ID: ${{ secrets.S3_ACCESS_KEY_ID }}
- GT_S3_ACCESS_KEY: ${{ secrets.S3_ACCESS_KEY }}
- GT_S3_REGION: ${{ secrets.S3_REGION }}
- UNITTEST_LOG_DIR: "__unittest_logs"
diff --git a/.github/workflows/nightly-ci.yml b/.github/workflows/nightly-ci.yml
new file mode 100644
index 000000000000..7d2b6be636b7
--- /dev/null
+++ b/.github/workflows/nightly-ci.yml
@@ -0,0 +1,82 @@
+# Nightly CI: runs tests every night for our second tier plaforms (Windows)
+
+on:
+ schedule:
+ - cron: '0 23 * * 1-5'
+ workflow_dispatch:
+
+name: Nightly CI
+
+concurrency:
+ group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
+ cancel-in-progress: true
+
+env:
+ RUST_TOOLCHAIN: nightly-2023-08-07
+
+jobs:
+ sqlness:
+ name: Sqlness Test
+ runs-on: ${{ matrix.os }}
+ strategy:
+ matrix:
+ os: [ windows-latest-8-cores ]
+ timeout-minutes: 60
+ steps:
+ - uses: actions/[email protected]
+ - uses: arduino/setup-protoc@v1
+ with:
+ repo-token: ${{ secrets.GITHUB_TOKEN }}
+ - uses: dtolnay/rust-toolchain@master
+ with:
+ toolchain: ${{ env.RUST_TOOLCHAIN }}
+ - name: Rust Cache
+ uses: Swatinem/rust-cache@v2
+ - name: Run sqlness
+ run: cargo sqlness
+ - name: Upload sqlness logs
+ if: always()
+ uses: actions/upload-artifact@v3
+ with:
+ name: sqlness-logs
+ path: ${{ runner.temp }}/greptime-*.log
+ retention-days: 3
+
+ test-on-windows:
+ runs-on: windows-latest-8-cores
+ timeout-minutes: 60
+ steps:
+ - run: git config --global core.autocrlf false
+ - uses: actions/[email protected]
+ - uses: arduino/setup-protoc@v1
+ with:
+ repo-token: ${{ secrets.GITHUB_TOKEN }}
+ - name: Install Rust toolchain
+ uses: dtolnay/rust-toolchain@master
+ with:
+ toolchain: ${{ env.RUST_TOOLCHAIN }}
+ components: llvm-tools-preview
+ - name: Rust Cache
+ uses: Swatinem/rust-cache@v2
+ - name: Install Cargo Nextest
+ uses: taiki-e/install-action@nextest
+ - name: Install Python
+ uses: actions/setup-python@v4
+ with:
+ python-version: '3.10'
+ - name: Install PyArrow Package
+ run: pip install pyarrow
+ - name: Install WSL distribution
+ uses: Vampire/setup-wsl@v2
+ with:
+ distribution: Ubuntu-22.04
+ - name: Running tests
+ run: cargo nextest run -F pyo3_backend,dashboard
+ env:
+ RUST_BACKTRACE: 1
+ CARGO_INCREMENTAL: 0
+ GT_S3_BUCKET: ${{ secrets.S3_BUCKET }}
+ GT_S3_ACCESS_KEY_ID: ${{ secrets.S3_ACCESS_KEY_ID }}
+ GT_S3_ACCESS_KEY: ${{ secrets.S3_ACCESS_KEY }}
+ GT_S3_REGION: ${{ secrets.S3_REGION }}
+ UNITTEST_LOG_DIR: "__unittest_logs"
|
ci
|
run windows tests every night instead of every commit (#2577)
|
c2218f8be80e2159fcdc6f0cc239d2ab6491d8fe
|
2024-06-01 19:33:00
|
Ruihang Xia
|
build(deps): bump datafusion 20240528 (#4061)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index 007f77744138..35fabee98b6c 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -196,9 +196,9 @@ dependencies = [
[[package]]
name = "anyhow"
-version = "1.0.83"
+version = "1.0.86"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "25bdb32cbbdce2b519a9cd7df3a678443100e265d5e25ca763b7572a5104f5f3"
+checksum = "b3d1d046238990b9cf5bcde22a3fb3584ee5cf65fb2765f454ed428c7a0063da"
[[package]]
name = "anymap"
@@ -224,8 +224,8 @@ dependencies = [
"datatypes",
"greptime-proto",
"paste",
- "prost 0.12.4",
- "snafu 0.8.2",
+ "prost 0.12.6",
+ "snafu 0.8.3",
"tonic-build 0.9.2",
]
@@ -423,8 +423,8 @@ dependencies = [
"bytes",
"futures",
"paste",
- "prost 0.12.4",
- "prost-types 0.12.4",
+ "prost 0.12.6",
+ "prost-types 0.12.6",
"tokio",
"tonic 0.11.0",
]
@@ -617,7 +617,7 @@ checksum = "3b43422f69d8ff38f95f1b2bb76517c91589a924d1559a0e935d7c8ce0274c11"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.61",
+ "syn 2.0.66",
]
[[package]]
@@ -639,7 +639,7 @@ checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.61",
+ "syn 2.0.66",
]
[[package]]
@@ -650,7 +650,7 @@ checksum = "c6fa2087f2753a7da8cc1c0dbfcf89579dd57458e36769de5ac750b4671737ca"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.61",
+ "syn 2.0.66",
]
[[package]]
@@ -715,7 +715,7 @@ dependencies = [
"digest",
"notify",
"sha1",
- "snafu 0.8.2",
+ "snafu 0.8.3",
"sql",
"tokio",
]
@@ -728,7 +728,7 @@ checksum = "3c87f3f15e7794432337fc718554eaa4dc8f04c9677a950ffe366f20a162ae42"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.61",
+ "syn 2.0.66",
]
[[package]]
@@ -811,7 +811,7 @@ dependencies = [
"heck 0.4.1",
"proc-macro2",
"quote",
- "syn 2.0.61",
+ "syn 2.0.66",
]
[[package]]
@@ -883,7 +883,6 @@ dependencies = [
"arrow",
"chrono",
"clap 4.5.4",
- "client",
"common-base",
"common-telemetry",
"common-wal",
@@ -905,7 +904,7 @@ dependencies = [
"serde",
"store-api",
"tokio",
- "toml 0.8.12",
+ "toml 0.8.13",
"uuid",
]
@@ -948,7 +947,7 @@ dependencies = [
"regex",
"rustc-hash",
"shlex",
- "syn 2.0.61",
+ "syn 2.0.66",
]
[[package]]
@@ -1050,7 +1049,7 @@ dependencies = [
"proc-macro-crate 3.1.0",
"proc-macro2",
"quote",
- "syn 2.0.61",
+ "syn 2.0.66",
"syn_derive",
]
@@ -1157,9 +1156,9 @@ dependencies = [
[[package]]
name = "bytemuck"
-version = "1.15.0"
+version = "1.16.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5d6d68c57235a3a081186990eca2867354726650f42f7516ca50c28d6281fd15"
+checksum = "78834c15cb5d5efe3452d58b1e8ba890dd62d21907f867f383358198e56ebca5"
[[package]]
name = "byteorder"
@@ -1226,7 +1225,7 @@ dependencies = [
"common-macro",
"common-meta",
"moka",
- "snafu 0.8.2",
+ "snafu 0.8.3",
"substrait 0.8.1",
]
@@ -1277,7 +1276,7 @@ dependencies = [
"common-time",
"common-version",
"dashmap",
- "datafusion 37.0.0",
+ "datafusion 38.0.0",
"datatypes",
"futures",
"futures-util",
@@ -1293,7 +1292,7 @@ dependencies = [
"prometheus",
"serde_json",
"session",
- "snafu 0.8.2",
+ "snafu 0.8.3",
"sql",
"store-api",
"table",
@@ -1311,9 +1310,9 @@ dependencies = [
[[package]]
name = "cc"
-version = "1.0.97"
+version = "1.0.98"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "099a5357d84c4c61eb35fc8eafa9a79a902c2f76911e5747ced4e032edd8d9b4"
+checksum = "41c270e7540d725e65ac7f1b212ac8ce349719624d7bcff99f8e2e488e8cf03f"
dependencies = [
"jobserver",
"libc",
@@ -1449,9 +1448,9 @@ dependencies = [
[[package]]
name = "clang-sys"
-version = "1.7.0"
+version = "1.8.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "67523a3b4be3ce1989d607a828d036249522dd9c1c8de7f4dd2dae43a37369d1"
+checksum = "a483f3cbf7cec2e153d424d0e92329d816becc6421389bd494375c6065921b9b"
dependencies = [
"glob",
"libc",
@@ -1516,7 +1515,7 @@ dependencies = [
"heck 0.5.0",
"proc-macro2",
"quote",
- "syn 2.0.61",
+ "syn 2.0.66",
]
[[package]]
@@ -1558,12 +1557,12 @@ dependencies = [
"futures-util",
"lazy_static",
"moka",
- "parking_lot 0.12.2",
+ "parking_lot 0.12.3",
"prometheus",
- "prost 0.12.4",
+ "prost 0.12.6",
"rand",
"serde_json",
- "snafu 0.8.2",
+ "snafu 0.8.3",
"substrait 0.17.1",
"substrait 0.8.1",
"tokio",
@@ -1638,7 +1637,7 @@ dependencies = [
"nu-ansi-term",
"plugins",
"prometheus",
- "prost 0.12.4",
+ "prost 0.12.6",
"query",
"rand",
"regex",
@@ -1649,7 +1648,7 @@ dependencies = [
"serde_json",
"servers",
"session",
- "snafu 0.8.2",
+ "snafu 0.8.3",
"store-api",
"substrait 0.8.1",
"table",
@@ -1657,7 +1656,7 @@ dependencies = [
"tempfile",
"tikv-jemallocator",
"tokio",
- "toml 0.8.12",
+ "toml 0.8.13",
"tracing-appender",
]
@@ -1705,8 +1704,8 @@ dependencies = [
"common-macro",
"paste",
"serde",
- "snafu 0.8.2",
- "toml 0.8.12",
+ "snafu 0.8.3",
+ "toml 0.8.13",
"zeroize",
]
@@ -1717,7 +1716,7 @@ dependencies = [
"chrono",
"common-error",
"common-macro",
- "snafu 0.8.2",
+ "snafu 0.8.3",
"tokio",
]
@@ -1737,11 +1736,11 @@ dependencies = [
"num_cpus",
"serde",
"serde_json",
- "snafu 0.8.2",
+ "snafu 0.8.3",
"sysinfo",
"temp-env",
"tempfile",
- "toml 0.8.12",
+ "toml 0.8.13",
]
[[package]]
@@ -1758,7 +1757,7 @@ dependencies = [
"common-recordbatch",
"common-runtime",
"common-test-util",
- "datafusion 37.0.0",
+ "datafusion 38.0.0",
"datatypes",
"derive_builder 0.12.0",
"futures",
@@ -1769,7 +1768,7 @@ dependencies = [
"paste",
"regex",
"serde",
- "snafu 0.8.2",
+ "snafu 0.8.3",
"strum 0.25.0",
"tokio",
"tokio-util",
@@ -1786,14 +1785,14 @@ dependencies = [
"rust_decimal",
"serde",
"serde_json",
- "snafu 0.8.2",
+ "snafu 0.8.3",
]
[[package]]
name = "common-error"
version = "0.8.1"
dependencies = [
- "snafu 0.8.2",
+ "snafu 0.8.3",
"strum 0.25.0",
]
@@ -1808,7 +1807,7 @@ dependencies = [
"common-macro",
"common-query",
"session",
- "snafu 0.8.2",
+ "snafu 0.8.3",
"sql",
]
@@ -1829,7 +1828,7 @@ dependencies = [
"common-telemetry",
"common-time",
"common-version",
- "datafusion 37.0.0",
+ "datafusion 38.0.0",
"datatypes",
"num",
"num-traits",
@@ -1839,7 +1838,7 @@ dependencies = [
"serde",
"serde_json",
"session",
- "snafu 0.8.2",
+ "snafu 0.8.3",
"statrs",
"store-api",
"table",
@@ -1880,9 +1879,9 @@ dependencies = [
"datatypes",
"flatbuffers",
"lazy_static",
- "prost 0.12.4",
+ "prost 0.12.6",
"rand",
- "snafu 0.8.2",
+ "snafu 0.8.3",
"tokio",
"tonic 0.11.0",
"tower",
@@ -1901,7 +1900,7 @@ dependencies = [
"common-time",
"datatypes",
"paste",
- "snafu 0.8.2",
+ "snafu 0.8.3",
"table",
]
@@ -1914,10 +1913,10 @@ dependencies = [
"datatypes",
"proc-macro2",
"quote",
- "snafu 0.8.2",
+ "snafu 0.8.3",
"static_assertions",
"syn 1.0.109",
- "syn 2.0.61",
+ "syn 2.0.66",
]
[[package]]
@@ -1926,7 +1925,7 @@ version = "0.8.1"
dependencies = [
"common-error",
"common-macro",
- "snafu 0.8.2",
+ "snafu 0.8.3",
"tempfile",
"tikv-jemalloc-ctl",
"tikv-jemalloc-sys",
@@ -1955,7 +1954,7 @@ dependencies = [
"common-telemetry",
"common-time",
"common-wal",
- "datafusion-common 37.0.0",
+ "datafusion-common 38.0.0",
"datatypes",
"derive_builder 0.12.0",
"etcd-client",
@@ -1968,7 +1967,7 @@ dependencies = [
"lazy_static",
"moka",
"prometheus",
- "prost 0.12.4",
+ "prost 0.12.6",
"rand",
"regex",
"rskafka",
@@ -1976,7 +1975,7 @@ dependencies = [
"serde_json",
"serde_with",
"session",
- "snafu 0.8.2",
+ "snafu 0.8.3",
"store-api",
"strum 0.25.0",
"table",
@@ -2010,7 +2009,7 @@ dependencies = [
"serde",
"serde_json",
"smallvec",
- "snafu 0.8.2",
+ "snafu 0.8.3",
"tokio",
"uuid",
]
@@ -2035,13 +2034,13 @@ dependencies = [
"common-macro",
"common-recordbatch",
"common-time",
- "datafusion 37.0.0",
- "datafusion-common 37.0.0",
- "datafusion-expr 37.0.0",
+ "datafusion 38.0.0",
+ "datafusion-common 38.0.0",
+ "datafusion-expr 38.0.0",
"datatypes",
"serde",
- "snafu 0.8.2",
- "sqlparser 0.44.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=e4e496b8d62416ad50ce70a1b460c7313610cf5d)",
+ "snafu 0.8.3",
+ "sqlparser 0.45.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=54a267ac89c09b11c0c88934690530807185d3e7)",
"sqlparser_derive 0.1.1",
"statrs",
"tokio",
@@ -2055,14 +2054,14 @@ dependencies = [
"common-error",
"common-macro",
"common-telemetry",
- "datafusion 37.0.0",
- "datafusion-common 37.0.0",
+ "datafusion 38.0.0",
+ "datafusion-common 38.0.0",
"datatypes",
"futures",
"pin-project",
"serde",
"serde_json",
- "snafu 0.8.2",
+ "snafu 0.8.3",
"tokio",
]
@@ -2078,7 +2077,7 @@ dependencies = [
"once_cell",
"paste",
"prometheus",
- "snafu 0.8.2",
+ "snafu 0.8.3",
"tokio",
"tokio-metrics",
"tokio-metrics-collector",
@@ -2101,7 +2100,7 @@ dependencies = [
"opentelemetry-otlp",
"opentelemetry-semantic-conventions",
"opentelemetry_sdk 0.21.2",
- "parking_lot 0.12.2",
+ "parking_lot 0.12.3",
"prometheus",
"serde",
"serde_json",
@@ -2138,7 +2137,7 @@ dependencies = [
"rand",
"serde",
"serde_json",
- "snafu 0.8.2",
+ "snafu 0.8.3",
]
[[package]]
@@ -2164,9 +2163,9 @@ dependencies = [
"serde",
"serde_json",
"serde_with",
- "snafu 0.8.2",
+ "snafu 0.8.3",
"tokio",
- "toml 0.8.12",
+ "toml 0.8.13",
]
[[package]]
@@ -2344,9 +2343,9 @@ dependencies = [
[[package]]
name = "crc32fast"
-version = "1.4.0"
+version = "1.4.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b3855a8a784b474f333699ef2bbca9db2c4a1f6d9088a90a2d25b1eb53111eaa"
+checksum = "a97769d94ddab943e4510d138150169a2758b5ef3eb191a9ee688de3e23ef7b3"
dependencies = [
"cfg-if",
]
@@ -2404,9 +2403,9 @@ dependencies = [
[[package]]
name = "crossbeam-channel"
-version = "0.5.12"
+version = "0.5.13"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ab3db02a9c5b5121e1e42fbdb1aeb65f5e02624cc58c43f2884c6ccac0b82f95"
+checksum = "33480d6946193aa8033910124896ca395333cae7e2d1113d1fef6c3272217df2"
dependencies = [
"crossbeam-utils",
]
@@ -2441,9 +2440,9 @@ dependencies = [
[[package]]
name = "crossbeam-utils"
-version = "0.8.19"
+version = "0.8.20"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "248e3bacc7dc6baa3b21e405ee045c3047101a49145e7e9eca583ab4c2ca5345"
+checksum = "22ec99545bb0ed0ea7bb9b8e1e9122ea386ff8a48c0922e43f36d45ab09e0e80"
[[package]]
name = "crunchy"
@@ -2514,12 +2513,12 @@ dependencies = [
[[package]]
name = "darling"
-version = "0.20.8"
+version = "0.20.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "54e36fcd13ed84ffdfda6f5be89b31287cbb80c439841fe69e04841435464391"
+checksum = "83b2eb4d90d12bdda5ed17de686c2acb4c57914f8f921b8da7e112b5a36f3fe1"
dependencies = [
- "darling_core 0.20.8",
- "darling_macro 0.20.8",
+ "darling_core 0.20.9",
+ "darling_macro 0.20.9",
]
[[package]]
@@ -2538,16 +2537,16 @@ dependencies = [
[[package]]
name = "darling_core"
-version = "0.20.8"
+version = "0.20.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9c2cf1c23a687a1feeb728783b993c4e1ad83d99f351801977dd809b48d0a70f"
+checksum = "622687fe0bac72a04e5599029151f5796111b90f1baaa9b544d807a5e31cd120"
dependencies = [
"fnv",
"ident_case",
"proc-macro2",
"quote",
- "strsim 0.10.0",
- "syn 2.0.61",
+ "strsim 0.11.1",
+ "syn 2.0.66",
]
[[package]]
@@ -2563,13 +2562,13 @@ dependencies = [
[[package]]
name = "darling_macro"
-version = "0.20.8"
+version = "0.20.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a668eda54683121533a393014d8692171709ff57a7d61f187b6e782719f8933f"
+checksum = "733cabb43482b1a1b53eee8583c2b9e8684d592215ea83efd305dd31bc2f0178"
dependencies = [
- "darling_core 0.20.8",
+ "darling_core 0.20.9",
"quote",
- "syn 2.0.61",
+ "syn 2.0.66",
]
[[package]]
@@ -2593,8 +2592,9 @@ checksum = "e8566979429cf69b49a5c740c60791108e86440e8be149bbea4fe54d2c32d6e2"
[[package]]
name = "datafusion"
-version = "37.0.0"
-source = "git+https://github.com/apache/arrow-datafusion.git?rev=34eda15b73a9e278af8844b30ed2f1c21c10359c#34eda15b73a9e278af8844b30ed2f1c21c10359c"
+version = "37.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "85069782056753459dc47e386219aa1fdac5b731f26c28abb8c0ffd4b7c5ab11"
dependencies = [
"ahash 0.8.11",
"arrow",
@@ -2607,17 +2607,16 @@ dependencies = [
"bzip2",
"chrono",
"dashmap",
- "datafusion-common 37.0.0",
- "datafusion-common-runtime 37.0.0",
- "datafusion-execution 37.0.0",
- "datafusion-expr 37.0.0",
- "datafusion-functions 37.0.0",
- "datafusion-functions-aggregate",
- "datafusion-functions-array 37.0.0",
- "datafusion-optimizer 37.0.0",
- "datafusion-physical-expr 37.0.0",
- "datafusion-physical-plan 37.0.0",
- "datafusion-sql 37.0.0",
+ "datafusion-common 37.1.0",
+ "datafusion-common-runtime 37.1.0",
+ "datafusion-execution 37.1.0",
+ "datafusion-expr 37.1.0",
+ "datafusion-functions 37.1.0",
+ "datafusion-functions-array 37.1.0",
+ "datafusion-optimizer 37.1.0",
+ "datafusion-physical-expr 37.1.0",
+ "datafusion-physical-plan 37.1.0",
+ "datafusion-sql 37.1.0",
"flate2",
"futures",
"glob",
@@ -2628,11 +2627,11 @@ dependencies = [
"log",
"num_cpus",
"object_store",
- "parking_lot 0.12.2",
+ "parking_lot 0.12.3",
"parquet",
"pin-project-lite",
"rand",
- "sqlparser 0.44.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "sqlparser 0.44.0",
"tempfile",
"tokio",
"tokio-util",
@@ -2644,9 +2643,8 @@ dependencies = [
[[package]]
name = "datafusion"
-version = "37.1.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "85069782056753459dc47e386219aa1fdac5b731f26c28abb8c0ffd4b7c5ab11"
+version = "38.0.0"
+source = "git+https://github.com/apache/datafusion.git?rev=08e19f4956d32164be6fc66eb5a4c080eb0023d1#08e19f4956d32164be6fc66eb5a4c080eb0023d1"
dependencies = [
"ahash 0.8.11",
"arrow",
@@ -2659,16 +2657,18 @@ dependencies = [
"bzip2",
"chrono",
"dashmap",
- "datafusion-common 37.1.0",
- "datafusion-common-runtime 37.1.0",
- "datafusion-execution 37.1.0",
- "datafusion-expr 37.1.0",
- "datafusion-functions 37.1.0",
- "datafusion-functions-array 37.1.0",
- "datafusion-optimizer 37.1.0",
- "datafusion-physical-expr 37.1.0",
- "datafusion-physical-plan 37.1.0",
- "datafusion-sql 37.1.0",
+ "datafusion-common 38.0.0",
+ "datafusion-common-runtime 38.0.0",
+ "datafusion-execution 38.0.0",
+ "datafusion-expr 38.0.0",
+ "datafusion-functions 38.0.0",
+ "datafusion-functions-aggregate",
+ "datafusion-functions-array 38.0.0",
+ "datafusion-optimizer 38.0.0",
+ "datafusion-physical-expr 38.0.0",
+ "datafusion-physical-expr-common",
+ "datafusion-physical-plan 38.0.0",
+ "datafusion-sql 38.0.0",
"flate2",
"futures",
"glob",
@@ -2679,11 +2679,11 @@ dependencies = [
"log",
"num_cpus",
"object_store",
- "parking_lot 0.12.2",
+ "parking_lot 0.12.3",
"parquet",
"pin-project-lite",
"rand",
- "sqlparser 0.44.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "sqlparser 0.45.0 (registry+https://github.com/rust-lang/crates.io-index)",
"tempfile",
"tokio",
"tokio-util",
@@ -2695,8 +2695,9 @@ dependencies = [
[[package]]
name = "datafusion-common"
-version = "37.0.0"
-source = "git+https://github.com/apache/arrow-datafusion.git?rev=34eda15b73a9e278af8844b30ed2f1c21c10359c#34eda15b73a9e278af8844b30ed2f1c21c10359c"
+version = "37.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "309d9040751f6dc9e33c85dce6abb55a46ef7ea3644577dd014611c379447ef3"
dependencies = [
"ahash 0.8.11",
"arrow",
@@ -2710,14 +2711,13 @@ dependencies = [
"num_cpus",
"object_store",
"parquet",
- "sqlparser 0.44.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "sqlparser 0.44.0",
]
[[package]]
name = "datafusion-common"
-version = "37.1.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "309d9040751f6dc9e33c85dce6abb55a46ef7ea3644577dd014611c379447ef3"
+version = "38.0.0"
+source = "git+https://github.com/apache/datafusion.git?rev=08e19f4956d32164be6fc66eb5a4c080eb0023d1#08e19f4956d32164be6fc66eb5a4c080eb0023d1"
dependencies = [
"ahash 0.8.11",
"arrow",
@@ -2726,46 +2726,48 @@ dependencies = [
"arrow-schema",
"chrono",
"half 2.4.1",
+ "hashbrown 0.14.5",
"instant",
"libc",
"num_cpus",
"object_store",
"parquet",
- "sqlparser 0.44.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "sqlparser 0.45.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "datafusion-common-runtime"
-version = "37.0.0"
-source = "git+https://github.com/apache/arrow-datafusion.git?rev=34eda15b73a9e278af8844b30ed2f1c21c10359c#34eda15b73a9e278af8844b30ed2f1c21c10359c"
+version = "37.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a3e4a44d8ef1b1e85d32234e6012364c411c3787859bb3bba893b0332cb03dfd"
dependencies = [
"tokio",
]
[[package]]
name = "datafusion-common-runtime"
-version = "37.1.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a3e4a44d8ef1b1e85d32234e6012364c411c3787859bb3bba893b0332cb03dfd"
+version = "38.0.0"
+source = "git+https://github.com/apache/datafusion.git?rev=08e19f4956d32164be6fc66eb5a4c080eb0023d1#08e19f4956d32164be6fc66eb5a4c080eb0023d1"
dependencies = [
"tokio",
]
[[package]]
name = "datafusion-execution"
-version = "37.0.0"
-source = "git+https://github.com/apache/arrow-datafusion.git?rev=34eda15b73a9e278af8844b30ed2f1c21c10359c#34eda15b73a9e278af8844b30ed2f1c21c10359c"
+version = "37.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "06a3a29ae36bcde07d179cc33b45656a8e7e4d023623e320e48dcf1200eeee95"
dependencies = [
"arrow",
"chrono",
"dashmap",
- "datafusion-common 37.0.0",
- "datafusion-expr 37.0.0",
+ "datafusion-common 37.1.0",
+ "datafusion-expr 37.1.0",
"futures",
"hashbrown 0.14.5",
"log",
"object_store",
- "parking_lot 0.12.2",
+ "parking_lot 0.12.3",
"rand",
"tempfile",
"url",
@@ -2773,20 +2775,19 @@ dependencies = [
[[package]]
name = "datafusion-execution"
-version = "37.1.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "06a3a29ae36bcde07d179cc33b45656a8e7e4d023623e320e48dcf1200eeee95"
+version = "38.0.0"
+source = "git+https://github.com/apache/datafusion.git?rev=08e19f4956d32164be6fc66eb5a4c080eb0023d1#08e19f4956d32164be6fc66eb5a4c080eb0023d1"
dependencies = [
"arrow",
"chrono",
"dashmap",
- "datafusion-common 37.1.0",
- "datafusion-expr 37.1.0",
+ "datafusion-common 38.0.0",
+ "datafusion-expr 38.0.0",
"futures",
"hashbrown 0.14.5",
"log",
"object_store",
- "parking_lot 0.12.2",
+ "parking_lot 0.12.3",
"rand",
"tempfile",
"url",
@@ -2794,58 +2795,57 @@ dependencies = [
[[package]]
name = "datafusion-expr"
-version = "37.0.0"
-source = "git+https://github.com/apache/arrow-datafusion.git?rev=34eda15b73a9e278af8844b30ed2f1c21c10359c#34eda15b73a9e278af8844b30ed2f1c21c10359c"
+version = "37.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2a3542aa322029c2121a671ce08000d4b274171070df13f697b14169ccf4f628"
dependencies = [
"ahash 0.8.11",
"arrow",
"arrow-array",
"chrono",
- "datafusion-common 37.0.0",
+ "datafusion-common 37.1.0",
"paste",
- "serde_json",
- "sqlparser 0.44.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "sqlparser 0.44.0",
"strum 0.26.2",
"strum_macros 0.26.2",
]
[[package]]
name = "datafusion-expr"
-version = "37.1.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2a3542aa322029c2121a671ce08000d4b274171070df13f697b14169ccf4f628"
+version = "38.0.0"
+source = "git+https://github.com/apache/datafusion.git?rev=08e19f4956d32164be6fc66eb5a4c080eb0023d1#08e19f4956d32164be6fc66eb5a4c080eb0023d1"
dependencies = [
"ahash 0.8.11",
"arrow",
"arrow-array",
"chrono",
- "datafusion-common 37.1.0",
+ "datafusion-common 38.0.0",
"paste",
- "sqlparser 0.44.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "serde_json",
+ "sqlparser 0.45.0 (registry+https://github.com/rust-lang/crates.io-index)",
"strum 0.26.2",
"strum_macros 0.26.2",
]
[[package]]
name = "datafusion-functions"
-version = "37.0.0"
-source = "git+https://github.com/apache/arrow-datafusion.git?rev=34eda15b73a9e278af8844b30ed2f1c21c10359c#34eda15b73a9e278af8844b30ed2f1c21c10359c"
+version = "37.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "dd221792c666eac174ecc09e606312844772acc12cbec61a420c2fca1ee70959"
dependencies = [
"arrow",
"base64 0.22.1",
"blake2",
"blake3",
"chrono",
- "datafusion-common 37.0.0",
- "datafusion-execution 37.0.0",
- "datafusion-expr 37.0.0",
- "datafusion-physical-expr 37.0.0",
- "hashbrown 0.14.5",
+ "datafusion-common 37.1.0",
+ "datafusion-execution 37.1.0",
+ "datafusion-expr 37.1.0",
+ "datafusion-physical-expr 37.1.0",
"hex",
"itertools 0.12.1",
"log",
"md-5",
- "rand",
"regex",
"sha2",
"unicode-segmentation",
@@ -2854,23 +2854,24 @@ dependencies = [
[[package]]
name = "datafusion-functions"
-version = "37.1.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "dd221792c666eac174ecc09e606312844772acc12cbec61a420c2fca1ee70959"
+version = "38.0.0"
+source = "git+https://github.com/apache/datafusion.git?rev=08e19f4956d32164be6fc66eb5a4c080eb0023d1#08e19f4956d32164be6fc66eb5a4c080eb0023d1"
dependencies = [
"arrow",
"base64 0.22.1",
"blake2",
"blake3",
"chrono",
- "datafusion-common 37.1.0",
- "datafusion-execution 37.1.0",
- "datafusion-expr 37.1.0",
- "datafusion-physical-expr 37.1.0",
+ "datafusion-common 38.0.0",
+ "datafusion-execution 38.0.0",
+ "datafusion-expr 38.0.0",
+ "datafusion-physical-expr 38.0.0",
+ "hashbrown 0.14.5",
"hex",
"itertools 0.12.1",
"log",
"md-5",
+ "rand",
"regex",
"sha2",
"unicode-segmentation",
@@ -2879,32 +2880,35 @@ dependencies = [
[[package]]
name = "datafusion-functions-aggregate"
-version = "37.0.0"
-source = "git+https://github.com/apache/arrow-datafusion.git?rev=34eda15b73a9e278af8844b30ed2f1c21c10359c#34eda15b73a9e278af8844b30ed2f1c21c10359c"
+version = "38.0.0"
+source = "git+https://github.com/apache/datafusion.git?rev=08e19f4956d32164be6fc66eb5a4c080eb0023d1#08e19f4956d32164be6fc66eb5a4c080eb0023d1"
dependencies = [
"arrow",
- "datafusion-common 37.0.0",
- "datafusion-execution 37.0.0",
- "datafusion-expr 37.0.0",
+ "arrow-schema",
+ "datafusion-common 38.0.0",
+ "datafusion-execution 38.0.0",
+ "datafusion-expr 38.0.0",
"datafusion-physical-expr-common",
"log",
"paste",
+ "sqlparser 0.45.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "datafusion-functions-array"
-version = "37.0.0"
-source = "git+https://github.com/apache/arrow-datafusion.git?rev=34eda15b73a9e278af8844b30ed2f1c21c10359c#34eda15b73a9e278af8844b30ed2f1c21c10359c"
+version = "37.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e501801e84d9c6ef54caaebcda1b18a6196a24176c12fb70e969bc0572e03c55"
dependencies = [
"arrow",
"arrow-array",
"arrow-buffer",
"arrow-ord",
"arrow-schema",
- "datafusion-common 37.0.0",
- "datafusion-execution 37.0.0",
- "datafusion-expr 37.0.0",
- "datafusion-functions 37.0.0",
+ "datafusion-common 37.1.0",
+ "datafusion-execution 37.1.0",
+ "datafusion-expr 37.1.0",
+ "datafusion-functions 37.1.0",
"itertools 0.12.1",
"log",
"paste",
@@ -2912,19 +2916,18 @@ dependencies = [
[[package]]
name = "datafusion-functions-array"
-version = "37.1.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e501801e84d9c6ef54caaebcda1b18a6196a24176c12fb70e969bc0572e03c55"
+version = "38.0.0"
+source = "git+https://github.com/apache/datafusion.git?rev=08e19f4956d32164be6fc66eb5a4c080eb0023d1#08e19f4956d32164be6fc66eb5a4c080eb0023d1"
dependencies = [
"arrow",
"arrow-array",
"arrow-buffer",
"arrow-ord",
"arrow-schema",
- "datafusion-common 37.1.0",
- "datafusion-execution 37.1.0",
- "datafusion-expr 37.1.0",
- "datafusion-functions 37.1.0",
+ "datafusion-common 38.0.0",
+ "datafusion-execution 38.0.0",
+ "datafusion-expr 38.0.0",
+ "datafusion-functions 38.0.0",
"itertools 0.12.1",
"log",
"paste",
@@ -2932,15 +2935,16 @@ dependencies = [
[[package]]
name = "datafusion-optimizer"
-version = "37.0.0"
-source = "git+https://github.com/apache/arrow-datafusion.git?rev=34eda15b73a9e278af8844b30ed2f1c21c10359c#34eda15b73a9e278af8844b30ed2f1c21c10359c"
+version = "37.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "76bd7f5087817deb961764e8c973d243b54f8572db414a8f0a8f33a48f991e0a"
dependencies = [
"arrow",
"async-trait",
"chrono",
- "datafusion-common 37.0.0",
- "datafusion-expr 37.0.0",
- "datafusion-physical-expr 37.0.0",
+ "datafusion-common 37.1.0",
+ "datafusion-expr 37.1.0",
+ "datafusion-physical-expr 37.1.0",
"hashbrown 0.14.5",
"itertools 0.12.1",
"log",
@@ -2949,17 +2953,18 @@ dependencies = [
[[package]]
name = "datafusion-optimizer"
-version = "37.1.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "76bd7f5087817deb961764e8c973d243b54f8572db414a8f0a8f33a48f991e0a"
+version = "38.0.0"
+source = "git+https://github.com/apache/datafusion.git?rev=08e19f4956d32164be6fc66eb5a4c080eb0023d1#08e19f4956d32164be6fc66eb5a4c080eb0023d1"
dependencies = [
"arrow",
"async-trait",
"chrono",
- "datafusion-common 37.1.0",
- "datafusion-expr 37.1.0",
- "datafusion-physical-expr 37.1.0",
+ "datafusion-common 38.0.0",
+ "datafusion-expr 38.0.0",
+ "datafusion-functions-aggregate",
+ "datafusion-physical-expr 38.0.0",
"hashbrown 0.14.5",
+ "indexmap 2.2.6",
"itertools 0.12.1",
"log",
"regex-syntax 0.8.3",
@@ -2967,8 +2972,9 @@ dependencies = [
[[package]]
name = "datafusion-physical-expr"
-version = "37.0.0"
-source = "git+https://github.com/apache/arrow-datafusion.git?rev=34eda15b73a9e278af8844b30ed2f1c21c10359c#34eda15b73a9e278af8844b30ed2f1c21c10359c"
+version = "37.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5cabc0d9aaa0f5eb1b472112f16223c9ffd2fb04e58cbf65c0a331ee6e993f96"
dependencies = [
"ahash 0.8.11",
"arrow",
@@ -2978,28 +2984,31 @@ dependencies = [
"arrow-schema",
"arrow-string",
"base64 0.22.1",
+ "blake2",
+ "blake3",
"chrono",
- "datafusion-common 37.0.0",
- "datafusion-execution 37.0.0",
- "datafusion-expr 37.0.0",
- "datafusion-functions-aggregate",
- "datafusion-physical-expr-common",
+ "datafusion-common 37.1.0",
+ "datafusion-execution 37.1.0",
+ "datafusion-expr 37.1.0",
"half 2.4.1",
"hashbrown 0.14.5",
"hex",
"indexmap 2.2.6",
"itertools 0.12.1",
"log",
+ "md-5",
"paste",
"petgraph",
+ "rand",
"regex",
+ "sha2",
+ "unicode-segmentation",
]
[[package]]
name = "datafusion-physical-expr"
-version = "37.1.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5cabc0d9aaa0f5eb1b472112f16223c9ffd2fb04e58cbf65c0a331ee6e993f96"
+version = "38.0.0"
+source = "git+https://github.com/apache/datafusion.git?rev=08e19f4956d32164be6fc66eb5a4c080eb0023d1#08e19f4956d32164be6fc66eb5a4c080eb0023d1"
dependencies = [
"ahash 0.8.11",
"arrow",
@@ -3009,57 +3018,52 @@ dependencies = [
"arrow-schema",
"arrow-string",
"base64 0.22.1",
- "blake2",
- "blake3",
"chrono",
- "datafusion-common 37.1.0",
- "datafusion-execution 37.1.0",
- "datafusion-expr 37.1.0",
+ "datafusion-common 38.0.0",
+ "datafusion-execution 38.0.0",
+ "datafusion-expr 38.0.0",
+ "datafusion-functions-aggregate",
+ "datafusion-physical-expr-common",
"half 2.4.1",
"hashbrown 0.14.5",
"hex",
"indexmap 2.2.6",
"itertools 0.12.1",
"log",
- "md-5",
"paste",
"petgraph",
- "rand",
"regex",
- "sha2",
- "unicode-segmentation",
]
[[package]]
name = "datafusion-physical-expr-common"
-version = "37.0.0"
-source = "git+https://github.com/apache/arrow-datafusion.git?rev=34eda15b73a9e278af8844b30ed2f1c21c10359c#34eda15b73a9e278af8844b30ed2f1c21c10359c"
+version = "38.0.0"
+source = "git+https://github.com/apache/datafusion.git?rev=08e19f4956d32164be6fc66eb5a4c080eb0023d1#08e19f4956d32164be6fc66eb5a4c080eb0023d1"
dependencies = [
"arrow",
- "datafusion-common 37.0.0",
- "datafusion-expr 37.0.0",
+ "datafusion-common 38.0.0",
+ "datafusion-expr 38.0.0",
+ "rand",
]
[[package]]
name = "datafusion-physical-plan"
-version = "37.0.0"
-source = "git+https://github.com/apache/arrow-datafusion.git?rev=34eda15b73a9e278af8844b30ed2f1c21c10359c#34eda15b73a9e278af8844b30ed2f1c21c10359c"
+version = "37.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "17c0523e9c8880f2492a88bbd857dde02bed1ed23f3e9211a89d3d7ec3b44af9"
dependencies = [
"ahash 0.8.11",
"arrow",
"arrow-array",
"arrow-buffer",
- "arrow-ord",
"arrow-schema",
"async-trait",
"chrono",
- "datafusion-common 37.0.0",
- "datafusion-common-runtime 37.0.0",
- "datafusion-execution 37.0.0",
- "datafusion-expr 37.0.0",
- "datafusion-functions-aggregate",
- "datafusion-physical-expr 37.0.0",
- "datafusion-physical-expr-common",
+ "datafusion-common 37.1.0",
+ "datafusion-common-runtime 37.1.0",
+ "datafusion-execution 37.1.0",
+ "datafusion-expr 37.1.0",
+ "datafusion-physical-expr 37.1.0",
"futures",
"half 2.4.1",
"hashbrown 0.14.5",
@@ -3067,7 +3071,7 @@ dependencies = [
"itertools 0.12.1",
"log",
"once_cell",
- "parking_lot 0.12.2",
+ "parking_lot 0.12.3",
"pin-project-lite",
"rand",
"tokio",
@@ -3075,22 +3079,24 @@ dependencies = [
[[package]]
name = "datafusion-physical-plan"
-version = "37.1.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "17c0523e9c8880f2492a88bbd857dde02bed1ed23f3e9211a89d3d7ec3b44af9"
+version = "38.0.0"
+source = "git+https://github.com/apache/datafusion.git?rev=08e19f4956d32164be6fc66eb5a4c080eb0023d1#08e19f4956d32164be6fc66eb5a4c080eb0023d1"
dependencies = [
"ahash 0.8.11",
"arrow",
"arrow-array",
"arrow-buffer",
+ "arrow-ord",
"arrow-schema",
"async-trait",
"chrono",
- "datafusion-common 37.1.0",
- "datafusion-common-runtime 37.1.0",
- "datafusion-execution 37.1.0",
- "datafusion-expr 37.1.0",
- "datafusion-physical-expr 37.1.0",
+ "datafusion-common 38.0.0",
+ "datafusion-common-runtime 38.0.0",
+ "datafusion-execution 38.0.0",
+ "datafusion-expr 38.0.0",
+ "datafusion-functions-aggregate",
+ "datafusion-physical-expr 38.0.0",
+ "datafusion-physical-expr-common",
"futures",
"half 2.4.1",
"hashbrown 0.14.5",
@@ -3098,7 +3104,7 @@ dependencies = [
"itertools 0.12.1",
"log",
"once_cell",
- "parking_lot 0.12.2",
+ "parking_lot 0.12.3",
"pin-project-lite",
"rand",
"tokio",
@@ -3106,48 +3112,49 @@ dependencies = [
[[package]]
name = "datafusion-sql"
-version = "37.0.0"
-source = "git+https://github.com/apache/arrow-datafusion.git?rev=34eda15b73a9e278af8844b30ed2f1c21c10359c#34eda15b73a9e278af8844b30ed2f1c21c10359c"
+version = "37.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "49eb54b42227136f6287573f2434b1de249fe1b8e6cd6cc73a634e4a3ec29356"
dependencies = [
"arrow",
"arrow-array",
"arrow-schema",
- "datafusion-common 37.0.0",
- "datafusion-expr 37.0.0",
+ "datafusion-common 37.1.0",
+ "datafusion-expr 37.1.0",
"log",
- "sqlparser 0.44.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "sqlparser 0.44.0",
"strum 0.26.2",
]
[[package]]
name = "datafusion-sql"
-version = "37.1.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "49eb54b42227136f6287573f2434b1de249fe1b8e6cd6cc73a634e4a3ec29356"
+version = "38.0.0"
+source = "git+https://github.com/apache/datafusion.git?rev=08e19f4956d32164be6fc66eb5a4c080eb0023d1#08e19f4956d32164be6fc66eb5a4c080eb0023d1"
dependencies = [
"arrow",
"arrow-array",
"arrow-schema",
- "datafusion-common 37.1.0",
- "datafusion-expr 37.1.0",
+ "datafusion-common 38.0.0",
+ "datafusion-expr 38.0.0",
"log",
- "sqlparser 0.44.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "regex",
+ "sqlparser 0.45.0 (registry+https://github.com/rust-lang/crates.io-index)",
"strum 0.26.2",
]
[[package]]
name = "datafusion-substrait"
-version = "37.0.0"
-source = "git+https://github.com/apache/arrow-datafusion.git?rev=34eda15b73a9e278af8844b30ed2f1c21c10359c#34eda15b73a9e278af8844b30ed2f1c21c10359c"
+version = "38.0.0"
+source = "git+https://github.com/apache/datafusion.git?rev=08e19f4956d32164be6fc66eb5a4c080eb0023d1#08e19f4956d32164be6fc66eb5a4c080eb0023d1"
dependencies = [
"async-recursion",
"chrono",
- "datafusion 37.0.0",
+ "datafusion 38.0.0",
"itertools 0.12.1",
"object_store",
- "prost 0.12.4",
- "prost-types 0.12.4",
- "substrait 0.30.0",
+ "prost 0.12.6",
+ "prost-types 0.12.6",
+ "substrait 0.34.0",
]
[[package]]
@@ -3178,9 +3185,9 @@ dependencies = [
"common-version",
"common-wal",
"dashmap",
- "datafusion 37.0.0",
- "datafusion-common 37.0.0",
- "datafusion-expr 37.0.0",
+ "datafusion 38.0.0",
+ "datafusion-common 38.0.0",
+ "datafusion-expr 38.0.0",
"datatypes",
"file-engine",
"futures",
@@ -3193,17 +3200,17 @@ dependencies = [
"mito2",
"object-store",
"prometheus",
- "prost 0.12.4",
+ "prost 0.12.6",
"query",
"reqwest",
"serde",
"servers",
"session",
- "snafu 0.8.2",
+ "snafu 0.8.3",
"store-api",
"table",
"tokio",
- "toml 0.8.12",
+ "toml 0.8.13",
"tonic 0.11.0",
]
@@ -3220,7 +3227,7 @@ dependencies = [
"common-macro",
"common-telemetry",
"common-time",
- "datafusion-common 37.0.0",
+ "datafusion-common 38.0.0",
"enum_dispatch",
"num",
"num-traits",
@@ -3228,7 +3235,7 @@ dependencies = [
"paste",
"serde",
"serde_json",
- "snafu 0.8.2",
+ "snafu 0.8.3",
]
[[package]]
@@ -3291,7 +3298,7 @@ checksum = "d150dea618e920167e5973d70ae6ece4385b7164e0d799fe7c122dd0a5d912ad"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.61",
+ "syn 2.0.66",
]
[[package]]
@@ -3302,7 +3309,7 @@ checksum = "67e77553c4162a157adbf834ebae5b415acbecbeafc7a74b0e886657506a7611"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.61",
+ "syn 2.0.66",
]
[[package]]
@@ -3506,9 +3513,9 @@ checksum = "0d6ef0072f8a535281e4876be788938b528e9a1d43900b82c2569af7da799125"
[[package]]
name = "either"
-version = "1.11.0"
+version = "1.12.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a47c1c47d2f5964e29c61246e81db715514cd532db6b5116a25ea3c03d6780a2"
+checksum = "3dca9240753cf90908d7e4aac30f630662b02aebaa1b58a3cadabdb23385b58b"
[[package]]
name = "ena"
@@ -3540,7 +3547,7 @@ dependencies = [
"heck 0.4.1",
"proc-macro2",
"quote",
- "syn 2.0.61",
+ "syn 2.0.66",
]
[[package]]
@@ -3552,7 +3559,7 @@ dependencies = [
"once_cell",
"proc-macro2",
"quote",
- "syn 2.0.61",
+ "syn 2.0.66",
]
[[package]]
@@ -3563,11 +3570,12 @@ checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5"
[[package]]
name = "erased-serde"
-version = "0.4.4"
+version = "0.4.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2b73807008a3c7f171cc40312f37d95ef0396e048b5848d775f54b1a4dd4a0d3"
+checksum = "24e2389d65ab4fab27dc2a5de7b191e1f6617d1f1c8855c0dc569c94a4cbb18d"
dependencies = [
"serde",
+ "typeid",
]
[[package]]
@@ -3596,7 +3604,7 @@ version = "0.12.4"
source = "git+https://github.com/MichaelScofield/etcd-client.git?rev=4c371e9b3ea8e0a8ee2f9cbd7ded26e54a45df3b#4c371e9b3ea8e0a8ee2f9cbd7ded26e54a45df3b"
dependencies = [
"http 0.2.12",
- "prost 0.12.4",
+ "prost 0.12.6",
"tokio",
"tokio-stream",
"tonic 0.11.0",
@@ -3706,14 +3714,14 @@ dependencies = [
"common-telemetry",
"common-test-util",
"common-time",
- "datafusion 37.0.0",
- "datafusion-expr 37.0.0",
+ "datafusion 38.0.0",
+ "datafusion-expr 38.0.0",
"datatypes",
"futures",
"object-store",
"serde",
"serde_json",
- "snafu 0.8.2",
+ "snafu 0.8.3",
"store-api",
"table",
"tokio",
@@ -3743,12 +3751,6 @@ dependencies = [
"winapi",
]
-[[package]]
-name = "finl_unicode"
-version = "1.2.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8fcfdc7a0362c9f4444381a9e697c79d435fe65b52a37466fc2c1184cee9edc6"
-
[[package]]
name = "fixedbitset"
version = "0.4.2"
@@ -3811,8 +3813,8 @@ dependencies = [
"common-runtime",
"common-telemetry",
"common-time",
- "datafusion-common 37.0.0",
- "datafusion-expr 37.0.0",
+ "datafusion-common 38.0.0",
+ "datafusion-expr 38.0.0",
"datatypes",
"enum-as-inner",
"enum_dispatch",
@@ -3824,14 +3826,14 @@ dependencies = [
"nom",
"num-traits",
"pretty_assertions",
- "prost 0.12.4",
+ "prost 0.12.6",
"query",
"serde",
"serde_json",
"servers",
"session",
"smallvec",
- "snafu 0.8.2",
+ "snafu 0.8.3",
"store-api",
"strum 0.25.0",
"substrait 0.8.1",
@@ -3909,21 +3911,21 @@ dependencies = [
"operator",
"partition",
"prometheus",
- "prost 0.12.4",
+ "prost 0.12.6",
"query",
"raft-engine",
"script",
"serde",
"servers",
"session",
- "snafu 0.8.2",
+ "snafu 0.8.3",
"sql",
- "sqlparser 0.44.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=e4e496b8d62416ad50ce70a1b460c7313610cf5d)",
+ "sqlparser 0.45.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=54a267ac89c09b11c0c88934690530807185d3e7)",
"store-api",
"strfmt",
"table",
"tokio",
- "toml 0.8.12",
+ "toml 0.8.13",
"tonic 0.11.0",
"tower",
"uuid",
@@ -3954,7 +3956,7 @@ checksum = "b0fa992f1656e1707946bbba340ad244f0814009ef8c0118eb7b658395f19a2e"
dependencies = [
"frunk_proc_macro_helpers",
"quote",
- "syn 2.0.61",
+ "syn 2.0.66",
]
[[package]]
@@ -3966,7 +3968,7 @@ dependencies = [
"frunk_core",
"proc-macro2",
"quote",
- "syn 2.0.61",
+ "syn 2.0.66",
]
[[package]]
@@ -3978,7 +3980,7 @@ dependencies = [
"frunk_core",
"frunk_proc_macro_helpers",
"quote",
- "syn 2.0.61",
+ "syn 2.0.66",
]
[[package]]
@@ -4079,7 +4081,7 @@ checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.61",
+ "syn 2.0.66",
]
[[package]]
@@ -4190,7 +4192,7 @@ name = "greptime-proto"
version = "0.1.0"
source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=ae26136accd82fbdf8be540cd502f2e94951077e#ae26136accd82fbdf8be540cd502f2e94951077e"
dependencies = [
- "prost 0.12.4",
+ "prost 0.12.6",
"serde",
"serde_json",
"strum 0.25.0",
@@ -4484,7 +4486,7 @@ dependencies = [
"os_info",
"serde",
"serde_derive",
- "toml 0.8.12",
+ "toml 0.8.13",
"uuid",
]
@@ -4545,7 +4547,7 @@ dependencies = [
"proc-macro-crate 1.3.1",
"proc-macro2",
"quote",
- "syn 2.0.61",
+ "syn 2.0.66",
]
[[package]]
@@ -4560,7 +4562,7 @@ dependencies = [
"rust-sitter",
"rust-sitter-tool",
"slotmap",
- "syn 2.0.61",
+ "syn 2.0.66",
]
[[package]]
@@ -4579,7 +4581,7 @@ dependencies = [
"serde",
"serde_json",
"slotmap",
- "syn 2.0.61",
+ "syn 2.0.66",
"webbrowser",
]
@@ -4593,7 +4595,7 @@ dependencies = [
"proc-macro-crate 1.3.1",
"proc-macro2",
"quote",
- "syn 2.0.61",
+ "syn 2.0.66",
]
[[package]]
@@ -4769,11 +4771,11 @@ dependencies = [
"greptime-proto",
"mockall",
"pin-project",
- "prost 0.12.4",
+ "prost 0.12.6",
"rand",
"regex",
"regex-automata 0.4.6",
- "snafu 0.8.2",
+ "snafu 0.8.3",
"tempfile",
"tokio",
"tokio-util",
@@ -4881,9 +4883,9 @@ dependencies = [
[[package]]
name = "instant"
-version = "0.1.12"
+version = "0.1.13"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c"
+checksum = "e0242819d153cba4b4b05a5a8f2a7e9bbf97b6055b2a002b395c96b5ff3c0222"
dependencies = [
"cfg-if",
"js-sys",
@@ -5205,9 +5207,9 @@ dependencies = [
[[package]]
name = "libc"
-version = "0.2.154"
+version = "0.2.155"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ae743338b92ff9146ce83992f766a31066a91a8c84a45e0e9f21e7cf6de6d346"
+checksum = "97b3888a4aecf77e811145cadf6eef5901f4782c53886191b2f693f24761847c"
[[package]]
name = "libfuzzer-sys"
@@ -5281,9 +5283,9 @@ dependencies = [
[[package]]
name = "libz-sys"
-version = "1.1.16"
+version = "1.1.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5e143b5e666b2695d28f6bca6497720813f699c9602dd7f5cac91008b8ada7f9"
+checksum = "c15da26e5af7e25c90b37a2d75cdbf940cf4a55316de9d84c679c9b8bfabf82e"
dependencies = [
"cc",
"libc",
@@ -5299,9 +5301,9 @@ checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f"
[[package]]
name = "linux-raw-sys"
-version = "0.4.13"
+version = "0.4.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "01cda141df6706de531b6c46c3a33ecca755538219bd484262fa09410c13539c"
+checksum = "78b3ae25bc7c8c38cec158d1f2757ee79e9b3740fbc7ccf0e59e4b08d793fa89"
[[package]]
name = "lock_api"
@@ -5348,7 +5350,7 @@ dependencies = [
"rskafka",
"serde",
"serde_json",
- "snafu 0.8.2",
+ "snafu 0.8.3",
"store-api",
"tokio",
"uuid",
@@ -5478,9 +5480,9 @@ dependencies = [
[[package]]
name = "mac_address"
-version = "1.1.6"
+version = "1.1.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5aa12182b93606fff55b70a5cfe6130eaf7407c2ea4f2c2bcc8b113b67c9928f"
+checksum = "8836fae9d0d4be2c8b4efcdd79e828a2faa058a90d005abf42f91cac5493a08e"
dependencies = [
"nix 0.28.0",
"winapi",
@@ -5633,7 +5635,7 @@ dependencies = [
"meta-srv",
"rand",
"serde",
- "snafu 0.8.2",
+ "snafu 0.8.3",
"tokio",
"tokio-stream",
"tonic 0.11.0",
@@ -5677,21 +5679,21 @@ dependencies = [
"itertools 0.10.5",
"lazy_static",
"once_cell",
- "parking_lot 0.12.2",
+ "parking_lot 0.12.3",
"prometheus",
- "prost 0.12.4",
+ "prost 0.12.6",
"rand",
"regex",
"serde",
"serde_json",
"servers",
"session",
- "snafu 0.8.2",
+ "snafu 0.8.3",
"store-api",
"table",
"tokio",
"tokio-stream",
- "toml 0.8.12",
+ "toml 0.8.13",
"tonic 0.11.0",
"tower",
"tracing",
@@ -5707,7 +5709,7 @@ source = "git+https://github.com/GreptimeTeam/greptime-meter.git?rev=80b72716dcd
dependencies = [
"anymap",
"once_cell",
- "parking_lot 0.12.2",
+ "parking_lot 0.12.3",
]
[[package]]
@@ -5733,7 +5735,7 @@ dependencies = [
"common-telemetry",
"common-test-util",
"common-time",
- "datafusion 37.0.0",
+ "datafusion 38.0.0",
"datatypes",
"itertools 0.10.5",
"lazy_static",
@@ -5742,7 +5744,7 @@ dependencies = [
"object-store",
"prometheus",
"serde_json",
- "snafu 0.8.2",
+ "snafu 0.8.3",
"store-api",
"tokio",
]
@@ -5771,9 +5773,9 @@ checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a"
[[package]]
name = "miniz_oxide"
-version = "0.7.2"
+version = "0.7.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9d811f3e15f28568be3407c8e7fdb6514c1cda3cb30683f15b6a1a1dc4ea14a7"
+checksum = "87dfd01fe195c66b572b37921ad8803d010623c0aca821bea2302239d155cdae"
dependencies = [
"adler",
]
@@ -5827,9 +5829,9 @@ dependencies = [
"crc32fast",
"criterion",
"crossbeam-utils",
- "datafusion 37.0.0",
- "datafusion-common 37.0.0",
- "datafusion-expr 37.0.0",
+ "datafusion 38.0.0",
+ "datafusion-common 38.0.0",
+ "datafusion-expr 38.0.0",
"datatypes",
"futures",
"humantime-serde",
@@ -5844,7 +5846,7 @@ dependencies = [
"paste",
"pin-project",
"prometheus",
- "prost 0.12.4",
+ "prost 0.12.6",
"puffin",
"rand",
"regex",
@@ -5852,14 +5854,14 @@ dependencies = [
"serde_json",
"serde_with",
"smallvec",
- "snafu 0.8.2",
+ "snafu 0.8.3",
"store-api",
"strum 0.25.0",
"table",
"tokio",
"tokio-stream",
"tokio-util",
- "toml 0.8.12",
+ "toml 0.8.13",
"uuid",
]
@@ -5904,7 +5906,7 @@ dependencies = [
"event-listener 5.3.0",
"futures-util",
"once_cell",
- "parking_lot 0.12.2",
+ "parking_lot 0.12.3",
"quanta",
"rustc_version",
"smallvec",
@@ -5947,14 +5949,14 @@ version = "0.30.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "56b0d8a0db9bf6d2213e11f2c701cb91387b0614361625ab7b9743b41aa4938f"
dependencies = [
- "darling 0.20.8",
+ "darling 0.20.9",
"heck 0.4.1",
"num-bigint",
"proc-macro-crate 1.3.1",
"proc-macro-error",
"proc-macro2",
"quote",
- "syn 2.0.61",
+ "syn 2.0.66",
"termcolor",
"thiserror",
]
@@ -5965,14 +5967,14 @@ version = "0.31.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "afe0450cc9344afff34915f8328600ab5ae19260802a334d0f72d2d5bdda3bfe"
dependencies = [
- "darling 0.20.8",
+ "darling 0.20.9",
"heck 0.4.1",
"num-bigint",
"proc-macro-crate 3.1.0",
"proc-macro-error",
"proc-macro2",
"quote",
- "syn 2.0.61",
+ "syn 2.0.66",
"termcolor",
"thiserror",
]
@@ -6055,9 +6057,9 @@ dependencies = [
[[package]]
name = "mysql_common"
-version = "0.32.2"
+version = "0.32.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0ccdc1fe2bb3ef97e07ba4397327ed45509a1e2e499e2f8265243879cbc7313c"
+checksum = "d1e52cf194ab414202ead9dfda216d2a9ec59cc97ac024ba499ca686d82f040d"
dependencies = [
"base64 0.21.7",
"bigdecimal",
@@ -6301,7 +6303,7 @@ checksum = "ed3955f1a9c7c0c15e092f9c887db08b1fc683305fdf6eb6684f22555355e202"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.61",
+ "syn 2.0.66",
]
[[package]]
@@ -6458,7 +6460,7 @@ dependencies = [
"futures",
"humantime",
"itertools 0.12.1",
- "parking_lot 0.12.2",
+ "parking_lot 0.12.3",
"percent-encoding",
"snafu 0.7.5",
"tokio",
@@ -6531,7 +6533,7 @@ dependencies = [
"async-trait",
"byteorder",
"chrono",
- "mysql_common 0.32.2",
+ "mysql_common 0.32.3",
"nom",
"pin-project-lite",
"tokio",
@@ -6614,7 +6616,7 @@ checksum = "3a8fddc9b68f5b80dae9d6f510b88e02396f006ad48cac349411fbecc80caae4"
dependencies = [
"opentelemetry 0.22.0",
"opentelemetry_sdk 0.22.1",
- "prost 0.12.4",
+ "prost 0.12.6",
"tonic 0.11.0",
]
@@ -6692,9 +6694,9 @@ dependencies = [
"common-telemetry",
"common-test-util",
"common-time",
- "datafusion 37.0.0",
- "datafusion-common 37.0.0",
- "datafusion-expr 37.0.0",
+ "datafusion 38.0.0",
+ "datafusion-common 38.0.0",
+ "datafusion-expr 38.0.0",
"datatypes",
"file-engine",
"futures",
@@ -6713,9 +6715,9 @@ dependencies = [
"serde_json",
"servers",
"session",
- "snafu 0.8.2",
+ "snafu 0.8.3",
"sql",
- "sqlparser 0.44.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=e4e496b8d62416ad50ce70a1b460c7313610cf5d)",
+ "sqlparser 0.45.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=54a267ac89c09b11c0c88934690530807185d3e7)",
"store-api",
"substrait 0.8.1",
"table",
@@ -6878,9 +6880,9 @@ dependencies = [
[[package]]
name = "parking_lot"
-version = "0.12.2"
+version = "0.12.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7e4af0ca4f6caed20e900d564c242b8e5d4903fdacf31d3daf527b66fe6f42fb"
+checksum = "f1bf18183cf54e8d6059647fc3063646a1801cf30896933ec2311622cc4b9a27"
dependencies = [
"lock_api",
"parking_lot_core 0.9.10",
@@ -6970,15 +6972,15 @@ dependencies = [
"common-macro",
"common-meta",
"common-query",
- "datafusion-common 37.0.0",
- "datafusion-expr 37.0.0",
+ "datafusion-common 38.0.0",
+ "datafusion-expr 38.0.0",
"datatypes",
"itertools 0.10.5",
"serde",
"serde_json",
- "snafu 0.8.2",
+ "snafu 0.8.3",
"sql",
- "sqlparser 0.44.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=e4e496b8d62416ad50ce70a1b460c7313610cf5d)",
+ "sqlparser 0.45.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=54a267ac89c09b11c0c88934690530807185d3e7)",
"store-api",
"table",
]
@@ -7082,7 +7084,7 @@ dependencies = [
"pest_meta",
"proc-macro2",
"quote",
- "syn 2.0.61",
+ "syn 2.0.66",
]
[[package]]
@@ -7197,7 +7199,7 @@ checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.61",
+ "syn 2.0.66",
]
[[package]]
@@ -7280,9 +7282,9 @@ checksum = "d231b230927b5e4ad203db57bbcbee2802f6bce620b1e4a9024a07d94e2907ec"
[[package]]
name = "plotters"
-version = "0.3.5"
+version = "0.3.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d2c224ba00d7cadd4d5c660deaf2098e5e80e07846537c51f9cfa4be50c1fd45"
+checksum = "a15b6eccb8484002195a3e44fe65a4ce8e93a625797a063735536fd59cb01cf3"
dependencies = [
"num-traits",
"plotters-backend",
@@ -7293,15 +7295,15 @@ dependencies = [
[[package]]
name = "plotters-backend"
-version = "0.3.5"
+version = "0.3.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9e76628b4d3a7581389a35d5b6e2139607ad7c75b17aed325f210aa91f4a9609"
+checksum = "414cec62c6634ae900ea1c56128dfe87cf63e7caece0852ec76aba307cebadb7"
[[package]]
name = "plotters-svg"
-version = "0.3.5"
+version = "0.3.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "38f6d39893cca0701371e3c27294f09797214b86f1fb951b89ade8ec04e2abab"
+checksum = "81b30686a7d9c3e010b84284bdd26a29f2138574f52f5eb6f794fc0ad924e705"
dependencies = [
"plotters-backend",
]
@@ -7315,7 +7317,7 @@ dependencies = [
"datanode",
"frontend",
"meta-srv",
- "snafu 0.8.2",
+ "snafu 0.8.3",
]
[[package]]
@@ -7386,10 +7388,10 @@ dependencies = [
"log",
"nix 0.26.4",
"once_cell",
- "parking_lot 0.12.2",
- "prost 0.12.4",
- "prost-build 0.12.4",
- "prost-derive 0.12.5",
+ "parking_lot 0.12.3",
+ "prost 0.12.6",
+ "prost-build 0.12.6",
+ "prost-derive 0.12.6",
"protobuf",
"sha2",
"smallvec",
@@ -7477,7 +7479,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5f12335488a2f3b0a83b14edad48dca9879ce89b2edd10e80237e4e852dd645e"
dependencies = [
"proc-macro2",
- "syn 2.0.61",
+ "syn 2.0.66",
]
[[package]]
@@ -7525,9 +7527,9 @@ dependencies = [
[[package]]
name = "proc-macro2"
-version = "1.0.82"
+version = "1.0.84"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8ad3d49ab951a01fbaafe34f2ec74122942fe18a3f9814c3268f1bb72042131b"
+checksum = "ec96c6a92621310b51366f1e28d05ef11489516e93be030060e5fc12024a49d6"
dependencies = [
"unicode-ident",
]
@@ -7566,7 +7568,7 @@ dependencies = [
"lazy_static",
"libc",
"memchr",
- "parking_lot 0.12.2",
+ "parking_lot 0.12.3",
"procfs",
"protobuf",
"thiserror",
@@ -7595,18 +7597,18 @@ dependencies = [
"common-macro",
"common-recordbatch",
"common-telemetry",
- "datafusion 37.0.0",
- "datafusion-expr 37.0.0",
+ "datafusion 38.0.0",
+ "datafusion-expr 38.0.0",
"datatypes",
"futures",
"greptime-proto",
"lazy_static",
"prometheus",
"promql-parser",
- "prost 0.12.4",
+ "prost 0.12.6",
"query",
"session",
- "snafu 0.8.2",
+ "snafu 0.8.3",
"tokio",
]
@@ -7635,12 +7637,12 @@ dependencies = [
[[package]]
name = "prost"
-version = "0.12.4"
+version = "0.12.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d0f5d036824e4761737860779c906171497f6d55681139d8312388f8fe398922"
+checksum = "deb1435c188b76130da55f17a466d252ff7b1418b2ad3e037d127b94e3411f29"
dependencies = [
"bytes",
- "prost-derive 0.12.5",
+ "prost-derive 0.12.6",
]
[[package]]
@@ -7667,9 +7669,9 @@ dependencies = [
[[package]]
name = "prost-build"
-version = "0.12.4"
+version = "0.12.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "80b776a1b2dc779f5ee0641f8ade0125bc1298dd41a9a0c16d8bd57b42d222b1"
+checksum = "22505a5c94da8e3b7c2996394d1c933236c4d743e81a410bcca4e6989fc066a4"
dependencies = [
"bytes",
"heck 0.5.0",
@@ -7679,10 +7681,10 @@ dependencies = [
"once_cell",
"petgraph",
"prettyplease 0.2.20",
- "prost 0.12.4",
- "prost-types 0.12.4",
+ "prost 0.12.6",
+ "prost-types 0.12.6",
"regex",
- "syn 2.0.61",
+ "syn 2.0.66",
"tempfile",
]
@@ -7701,15 +7703,15 @@ dependencies = [
[[package]]
name = "prost-derive"
-version = "0.12.5"
+version = "0.12.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9554e3ab233f0a932403704f1a1d08c30d5ccd931adfdfa1e8b5a19b52c1d55a"
+checksum = "81bddcdb20abf9501610992b6759a4c888aef7d1a7247ef75e2404275ac24af1"
dependencies = [
"anyhow",
"itertools 0.12.1",
"proc-macro2",
"quote",
- "syn 2.0.61",
+ "syn 2.0.66",
]
[[package]]
@@ -7723,11 +7725,11 @@ dependencies = [
[[package]]
name = "prost-types"
-version = "0.12.4"
+version = "0.12.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3235c33eb02c1f1e212abdbe34c78b264b038fb58ca612664343271e36e55ffe"
+checksum = "9091c90b0a32608e984ff2fa4091273cbdd755d54935c51d520887f4a1dbd5b0"
dependencies = [
- "prost 0.12.4",
+ "prost 0.12.6",
]
[[package]]
@@ -7803,7 +7805,7 @@ dependencies = [
"pin-project",
"serde",
"serde_json",
- "snafu 0.8.2",
+ "snafu 0.8.3",
"tokio",
"tokio-util",
]
@@ -7833,7 +7835,7 @@ dependencies = [
"indoc",
"libc",
"memoffset 0.9.1",
- "parking_lot 0.12.2",
+ "parking_lot 0.12.3",
"portable-atomic",
"pyo3-build-config",
"pyo3-ffi",
@@ -7870,7 +7872,7 @@ dependencies = [
"proc-macro2",
"pyo3-macros-backend",
"quote",
- "syn 2.0.61",
+ "syn 2.0.66",
]
[[package]]
@@ -7883,7 +7885,7 @@ dependencies = [
"proc-macro2",
"pyo3-build-config",
"quote",
- "syn 2.0.61",
+ "syn 2.0.66",
]
[[package]]
@@ -7930,13 +7932,13 @@ dependencies = [
"common-runtime",
"common-telemetry",
"common-time",
- "datafusion 37.0.0",
- "datafusion-common 37.0.0",
- "datafusion-expr 37.0.0",
- "datafusion-functions 37.0.0",
- "datafusion-optimizer 37.0.0",
- "datafusion-physical-expr 37.0.0",
- "datafusion-sql 37.0.0",
+ "datafusion 38.0.0",
+ "datafusion-common 38.0.0",
+ "datafusion-expr 38.0.0",
+ "datafusion-functions 38.0.0",
+ "datafusion-optimizer 38.0.0",
+ "datafusion-physical-expr 38.0.0",
+ "datafusion-sql 38.0.0",
"datatypes",
"format_num",
"futures",
@@ -7955,12 +7957,13 @@ dependencies = [
"prometheus",
"promql",
"promql-parser",
- "prost 0.12.4",
+ "prost 0.12.6",
"rand",
"regex",
"session",
- "snafu 0.8.2",
+ "snafu 0.8.3",
"sql",
+ "sqlparser 0.45.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=54a267ac89c09b11c0c88934690530807185d3e7)",
"statrs",
"stats-cli",
"store-api",
@@ -8036,7 +8039,7 @@ dependencies = [
"nix 0.26.4",
"num-derive",
"num-traits",
- "parking_lot 0.12.2",
+ "parking_lot 0.12.3",
"prometheus",
"prometheus-static-metric",
"protobuf",
@@ -8186,7 +8189,7 @@ checksum = "bcc303e793d3734489387d205e9b186fac9c6cfacedd98cbb2e8a5943595f3e6"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.61",
+ "syn 2.0.66",
]
[[package]]
@@ -8245,9 +8248,9 @@ dependencies = [
[[package]]
name = "regress"
-version = "0.8.0"
+version = "0.9.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4f5f39ba4513916c1b2657b72af6ec671f091cd637992f58d0ede5cae4e5dea0"
+checksum = "0eae2a1ebfecc58aff952ef8ccd364329abe627762f5bf09ff42eb9d98522479"
dependencies = [
"hashbrown 0.14.5",
"memchr",
@@ -8507,7 +8510,7 @@ dependencies = [
"futures",
"integer-encoding 4.0.0",
"lz4",
- "parking_lot 0.12.2",
+ "parking_lot 0.12.3",
"pin-project-lite",
"rand",
"snap",
@@ -8575,7 +8578,7 @@ dependencies = [
"proc-macro2",
"quote",
"rust-embed-utils",
- "syn 2.0.61",
+ "syn 2.0.66",
"walkdir",
]
@@ -8742,7 +8745,7 @@ dependencies = [
"log",
"ring 0.17.8",
"rustls-pki-types",
- "rustls-webpki 0.102.3",
+ "rustls-webpki 0.102.4",
"subtle",
"zeroize",
]
@@ -8797,9 +8800,9 @@ dependencies = [
[[package]]
name = "rustls-webpki"
-version = "0.102.3"
+version = "0.102.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f3bce581c0dd41bce533ce695a1437fa16a7ab5ac3ccfa99fe1a620a7885eabf"
+checksum = "ff448f7e92e913c4b7d4c6d8e4540a1724b319b4152b8aef6d4cf8339712b33e"
dependencies = [
"ring 0.17.8",
"rustls-pki-types",
@@ -8988,7 +8991,7 @@ dependencies = [
"num_enum",
"once_cell",
"page_size",
- "parking_lot 0.12.2",
+ "parking_lot 0.12.3",
"paste",
"puruspe",
"rand",
@@ -9054,7 +9057,7 @@ dependencies = [
"num_enum",
"once_cell",
"optional",
- "parking_lot 0.12.2",
+ "parking_lot 0.12.3",
"paste",
"rand",
"result-like",
@@ -9091,9 +9094,9 @@ dependencies = [
[[package]]
name = "rustversion"
-version = "1.0.16"
+version = "1.0.17"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "092474d1a01ea8278f69e6a358998405fae5b8b963ddaeb2b0b04a128bf1dfb0"
+checksum = "955d28af4278de8121b7ebeb796b6a45735dc01436d898801014aced2773a3d6"
[[package]]
name = "rustyline"
@@ -9244,9 +9247,9 @@ dependencies = [
[[package]]
name = "schemars"
-version = "0.8.19"
+version = "0.8.21"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "fc6e7ed6919cb46507fb01ff1654309219f62b4d603822501b0b80d42f6f21ef"
+checksum = "09c024468a378b7e36765cd36702b7a90cc3cba11654f6685c8f233408e89e92"
dependencies = [
"dyn-clone",
"indexmap 1.9.3",
@@ -9257,14 +9260,14 @@ dependencies = [
[[package]]
name = "schemars_derive"
-version = "0.8.19"
+version = "0.8.21"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "185f2b7aa7e02d418e453790dde16890256bbd2bcd04b7dc5348811052b53f49"
+checksum = "b1eee588578aff73f856ab961cd2f79e36bc45d7ded33a7562adba4667aecc0e"
dependencies = [
"proc-macro2",
"quote",
"serde_derive_internals",
- "syn 2.0.61",
+ "syn 2.0.66",
]
[[package]]
@@ -9295,11 +9298,11 @@ dependencies = [
"console",
"criterion",
"crossbeam-utils",
- "datafusion 37.0.0",
- "datafusion-common 37.0.0",
- "datafusion-expr 37.0.0",
- "datafusion-functions 37.0.0",
- "datafusion-physical-expr 37.0.0",
+ "datafusion 38.0.0",
+ "datafusion-common 38.0.0",
+ "datafusion-expr 38.0.0",
+ "datafusion-functions 38.0.0",
+ "datafusion-physical-expr 38.0.0",
"datatypes",
"futures",
"lazy_static",
@@ -9322,7 +9325,7 @@ dependencies = [
"serde",
"servers",
"session",
- "snafu 0.8.2",
+ "snafu 0.8.3",
"sql",
"table",
"tokio",
@@ -9365,7 +9368,7 @@ dependencies = [
"heck 0.4.1",
"proc-macro2",
"quote",
- "syn 2.0.61",
+ "syn 2.0.66",
]
[[package]]
@@ -9396,6 +9399,9 @@ name = "semver"
version = "1.0.23"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "61697e0a1c7e512e84a621326239844a24d8207b4669b41bc18b32ea5cbf988b"
+dependencies = [
+ "serde",
+]
[[package]]
name = "seq-macro"
@@ -9405,33 +9411,33 @@ checksum = "a3f0bf26fd526d2a95683cd0f87bf103b8539e2ca1ef48ce002d67aad59aa0b4"
[[package]]
name = "serde"
-version = "1.0.201"
+version = "1.0.203"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "780f1cebed1629e4753a1a38a3c72d30b97ec044f0aef68cb26650a3c5cf363c"
+checksum = "7253ab4de971e72fb7be983802300c30b5a7f0c2e56fab8abfc6a214307c0094"
dependencies = [
"serde_derive",
]
[[package]]
name = "serde_derive"
-version = "1.0.201"
+version = "1.0.203"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c5e405930b9796f1c00bee880d03fc7e0bb4b9a11afc776885ffe84320da2865"
+checksum = "500cbc0ebeb6f46627f50f3f5811ccf6bf00643be300b4c3eabc0ef55dc5b5ba"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.61",
+ "syn 2.0.66",
]
[[package]]
name = "serde_derive_internals"
-version = "0.29.0"
+version = "0.29.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "330f01ce65a3a5fe59a60c82f3c9a024b573b8a6e875bd233fe5f934e71d54e3"
+checksum = "18d26a20a969b9e3fdf2fc2d9f21eda6c40e2de84c9408bb5d3b05d499aae711"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.61",
+ "syn 2.0.66",
]
[[package]]
@@ -9464,14 +9470,14 @@ checksum = "6c64451ba24fc7a6a2d60fc75dd9c83c90903b19028d4eff35e88fc1e86564e9"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.61",
+ "syn 2.0.66",
]
[[package]]
name = "serde_spanned"
-version = "0.6.5"
+version = "0.6.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "eb3622f419d1296904700073ea6cc23ad690adbd66f13ea683df73298736f0c1"
+checksum = "79e674e01f999af37c49f70a6ede167a8a60b2503e56c5599532a65baa5969a0"
dependencies = [
"serde",
]
@@ -9485,7 +9491,7 @@ dependencies = [
"proc-macro2",
"quote",
"serde",
- "syn 2.0.61",
+ "syn 2.0.66",
]
[[package]]
@@ -9524,10 +9530,10 @@ version = "3.8.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "65569b702f41443e8bc8bbb1c5779bd0450bbe723b56198980e80ec45780bce2"
dependencies = [
- "darling 0.20.8",
+ "darling 0.20.9",
"proc-macro2",
"quote",
- "syn 2.0.61",
+ "syn 2.0.66",
]
[[package]]
@@ -9580,8 +9586,8 @@ dependencies = [
"common-version",
"criterion",
"dashmap",
- "datafusion 37.0.0",
- "datafusion-common 37.0.0",
+ "datafusion 38.0.0",
+ "datafusion-common 38.0.0",
"datatypes",
"derive_builder 0.12.0",
"futures",
@@ -9603,7 +9609,7 @@ dependencies = [
"openmetrics-parser",
"opensrv-mysql",
"opentelemetry-proto 0.5.0",
- "parking_lot 0.12.2",
+ "parking_lot 0.12.3",
"permutation",
"pgwire",
"pin-project",
@@ -9611,7 +9617,7 @@ dependencies = [
"pprof",
"prometheus",
"promql-parser",
- "prost 0.12.4",
+ "prost 0.12.6",
"query",
"rand",
"regex",
@@ -9625,7 +9631,7 @@ dependencies = [
"serde",
"serde_json",
"session",
- "snafu 0.8.2",
+ "snafu 0.8.3",
"snap",
"sql",
"strum 0.25.0",
@@ -9659,7 +9665,7 @@ dependencies = [
"common-telemetry",
"common-time",
"derive_builder 0.12.0",
- "snafu 0.8.2",
+ "snafu 0.8.3",
"sql",
]
@@ -9823,11 +9829,11 @@ dependencies = [
[[package]]
name = "snafu"
-version = "0.8.2"
+version = "0.8.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "75976f4748ab44f6e5332102be424e7c2dc18daeaf7e725f2040c3ebb133512e"
+checksum = "418b8136fec49956eba89be7da2847ec1909df92a9ae4178b5ff0ff092c8d95e"
dependencies = [
- "snafu-derive 0.8.2",
+ "snafu-derive 0.8.3",
]
[[package]]
@@ -9844,14 +9850,14 @@ dependencies = [
[[package]]
name = "snafu-derive"
-version = "0.8.2"
+version = "0.8.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b4b19911debfb8c2fb1107bc6cb2d61868aaf53a988449213959bb1b5b1ed95f"
+checksum = "1a4812a669da00d17d8266a0439eddcacbc88b17f732f927e52eeb9d196f7fb5"
dependencies = [
- "heck 0.4.1",
+ "heck 0.5.0",
"proc-macro2",
"quote",
- "syn 2.0.61",
+ "syn 2.0.66",
]
[[package]]
@@ -9938,18 +9944,18 @@ dependencies = [
"common-macro",
"common-query",
"common-time",
- "datafusion 37.0.0",
- "datafusion-common 37.0.0",
- "datafusion-expr 37.0.0",
- "datafusion-physical-expr 37.0.0",
- "datafusion-sql 37.0.0",
+ "datafusion 38.0.0",
+ "datafusion-common 38.0.0",
+ "datafusion-expr 38.0.0",
+ "datafusion-physical-expr 38.0.0",
+ "datafusion-sql 38.0.0",
"datatypes",
"hex",
"itertools 0.10.5",
"lazy_static",
"regex",
- "snafu 0.8.2",
- "sqlparser 0.44.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=e4e496b8d62416ad50ce70a1b460c7313610cf5d)",
+ "snafu 0.8.3",
+ "sqlparser 0.45.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=54a267ac89c09b11c0c88934690530807185d3e7)",
"sqlparser_derive 0.1.1",
"table",
]
@@ -10011,14 +10017,24 @@ dependencies = [
[[package]]
name = "sqlparser"
-version = "0.44.0"
-source = "git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=e4e496b8d62416ad50ce70a1b460c7313610cf5d#e4e496b8d62416ad50ce70a1b460c7313610cf5d"
+version = "0.45.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f7bbffee862a796d67959a89859d6b1046bb5016d63e23835ad0da182777bbe0"
+dependencies = [
+ "log",
+ "sqlparser_derive 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "sqlparser"
+version = "0.45.0"
+source = "git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=54a267ac89c09b11c0c88934690530807185d3e7#54a267ac89c09b11c0c88934690530807185d3e7"
dependencies = [
"lazy_static",
"log",
"regex",
- "sqlparser 0.44.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "sqlparser_derive 0.2.2 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=e4e496b8d62416ad50ce70a1b460c7313610cf5d)",
+ "sqlparser 0.45.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "sqlparser_derive 0.2.2 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=54a267ac89c09b11c0c88934690530807185d3e7)",
]
[[package]]
@@ -10040,17 +10056,17 @@ checksum = "01b2e185515564f15375f593fb966b5718bc624ba77fe49fa4616ad619690554"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.61",
+ "syn 2.0.66",
]
[[package]]
name = "sqlparser_derive"
version = "0.2.2"
-source = "git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=e4e496b8d62416ad50ce70a1b460c7313610cf5d#e4e496b8d62416ad50ce70a1b460c7313610cf5d"
+source = "git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=54a267ac89c09b11c0c88934690530807185d3e7#54a267ac89c09b11c0c88934690530807185d3e7"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.61",
+ "syn 2.0.66",
]
[[package]]
@@ -10211,14 +10227,14 @@ dependencies = [
"common-query",
"common-recordbatch",
"common-wal",
- "datafusion-expr 37.0.0",
- "datafusion-physical-plan 37.0.0",
+ "datafusion-expr 38.0.0",
+ "datafusion-physical-plan 38.0.0",
"datatypes",
"derive_builder 0.12.0",
"futures",
"serde",
"serde_json",
- "snafu 0.8.2",
+ "snafu 0.8.3",
"strum 0.25.0",
"tokio",
]
@@ -10258,20 +10274,20 @@ checksum = "f91138e76242f575eb1d3b38b4f1362f10d3a43f47d182a5b359af488a02293b"
dependencies = [
"new_debug_unreachable",
"once_cell",
- "parking_lot 0.12.2",
+ "parking_lot 0.12.3",
"phf_shared 0.10.0",
"precomputed-hash",
]
[[package]]
name = "stringprep"
-version = "0.1.4"
+version = "0.1.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "bb41d74e231a107a1b4ee36bd1214b11285b77768d2e3824aedafa988fd36ee6"
+checksum = "7b4df3d392d81bd458a8a621b8bffbd2302a12ffe288a9d931670948749463b1"
dependencies = [
- "finl_unicode",
"unicode-bidi",
"unicode-normalization",
+ "unicode-properties",
]
[[package]]
@@ -10339,7 +10355,7 @@ dependencies = [
"proc-macro2",
"quote",
"rustversion",
- "syn 2.0.61",
+ "syn 2.0.66",
]
[[package]]
@@ -10352,7 +10368,7 @@ dependencies = [
"proc-macro2",
"quote",
"rustversion",
- "syn 2.0.61",
+ "syn 2.0.66",
]
[[package]]
@@ -10374,14 +10390,14 @@ dependencies = [
"common-error",
"common-macro",
"common-telemetry",
- "datafusion 37.0.0",
- "datafusion-common 37.0.0",
- "datafusion-expr 37.0.0",
+ "datafusion 38.0.0",
+ "datafusion-common 38.0.0",
+ "datafusion-expr 38.0.0",
"datafusion-substrait",
"datatypes",
"promql",
- "prost 0.12.4",
- "snafu 0.8.2",
+ "prost 0.12.6",
+ "snafu 0.8.3",
"substrait 0.17.1",
"tokio",
]
@@ -10395,37 +10411,37 @@ dependencies = [
"git2",
"heck 0.4.1",
"prettyplease 0.2.20",
- "prost 0.12.4",
- "prost-build 0.12.4",
- "prost-types 0.12.4",
+ "prost 0.12.6",
+ "prost-build 0.12.6",
+ "prost-types 0.12.6",
"schemars",
"semver",
"serde",
"serde_json",
"serde_yaml",
- "syn 2.0.61",
+ "syn 2.0.66",
"typify 0.0.14",
"walkdir",
]
[[package]]
name = "substrait"
-version = "0.30.0"
+version = "0.34.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ba959c71b2a1a341a94e1f362615d7e5f1a4de9d25d82fceea8160f79f1e1dfb"
+checksum = "2c7ccf682a18309d9039bc16e14d9c7ea3a461e65dfc46e2b611b401b945ef2d"
dependencies = [
"heck 0.5.0",
"prettyplease 0.2.20",
- "prost 0.12.4",
- "prost-build 0.12.4",
- "prost-types 0.12.4",
+ "prost 0.12.6",
+ "prost-build 0.12.6",
+ "prost-types 0.12.6",
"schemars",
"semver",
"serde",
"serde_json",
"serde_yaml",
- "syn 2.0.61",
- "typify 0.0.16",
+ "syn 2.0.66",
+ "typify 0.1.0",
"walkdir",
]
@@ -10471,9 +10487,9 @@ dependencies = [
[[package]]
name = "syn"
-version = "2.0.61"
+version = "2.0.66"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c993ed8ccba56ae856363b1845da7266a7cb78e1d146c8a32d54b45a8b831fc9"
+checksum = "c42f3f41a2de00b01c0aaad383c5a45241efc8b2d1eda5661812fda5f3cdcff5"
dependencies = [
"proc-macro2",
"quote",
@@ -10508,7 +10524,7 @@ dependencies = [
"proc-macro-error",
"proc-macro2",
"quote",
- "syn 2.0.61",
+ "syn 2.0.66",
]
[[package]]
@@ -10570,10 +10586,10 @@ dependencies = [
"common-telemetry",
"common-test-util",
"common-time",
- "datafusion 37.0.0",
- "datafusion-common 37.0.0",
- "datafusion-expr 37.0.0",
- "datafusion-physical-expr 37.0.0",
+ "datafusion 38.0.0",
+ "datafusion-common 38.0.0",
+ "datafusion-expr 38.0.0",
+ "datafusion-physical-expr 38.0.0",
"datatypes",
"derive_builder 0.12.0",
"futures",
@@ -10584,7 +10600,7 @@ dependencies = [
"paste",
"serde",
"serde_json",
- "snafu 0.8.2",
+ "snafu 0.8.3",
"store-api",
"tokio",
"tokio-util",
@@ -10614,7 +10630,7 @@ version = "0.3.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "96374855068f47402c3121c6eed88d29cb1de8f3ab27090e273e420bdabcf050"
dependencies = [
- "parking_lot 0.12.2",
+ "parking_lot 0.12.3",
]
[[package]]
@@ -10689,9 +10705,9 @@ dependencies = [
"reqwest",
"serde",
"serde_json",
- "snafu 0.8.2",
+ "snafu 0.8.3",
"sql",
- "sqlparser 0.44.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=e4e496b8d62416ad50ce70a1b460c7313610cf5d)",
+ "sqlparser 0.45.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=54a267ac89c09b11c0c88934690530807185d3e7)",
"sqlx",
"tinytemplate",
"tokio",
@@ -10726,8 +10742,8 @@ dependencies = [
"common-telemetry",
"common-test-util",
"common-wal",
- "datafusion 37.0.0",
- "datafusion-expr 37.0.0",
+ "datafusion 38.0.0",
+ "datafusion-expr 38.0.0",
"datanode",
"datatypes",
"dotenv",
@@ -10745,7 +10761,7 @@ dependencies = [
"operator",
"partition",
"paste",
- "prost 0.12.4",
+ "prost 0.12.6",
"query",
"rand",
"rstest",
@@ -10754,7 +10770,7 @@ dependencies = [
"serde_json",
"servers",
"session",
- "snafu 0.8.2",
+ "snafu 0.8.3",
"sql",
"sqlx",
"store-api",
@@ -10794,22 +10810,22 @@ checksum = "23d434d3f8967a09480fb04132ebe0a3e088c173e6d0ee7897abbdf4eab0f8b9"
[[package]]
name = "thiserror"
-version = "1.0.60"
+version = "1.0.61"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "579e9083ca58dd9dcf91a9923bb9054071b9ebbd800b342194c9feb0ee89fc18"
+checksum = "c546c80d6be4bc6a00c0f01730c08df82eaa7a7a61f11d656526506112cc1709"
dependencies = [
"thiserror-impl",
]
[[package]]
name = "thiserror-impl"
-version = "1.0.60"
+version = "1.0.61"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e2470041c06ec3ac1ab38d0356a6119054dedaea53e12fbefc0de730a1c08524"
+checksum = "46c3384250002a6d5af4d114f2845d37b57521033f30d5c3f46c4d70e1197533"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.61",
+ "syn 2.0.66",
]
[[package]]
@@ -10970,7 +10986,7 @@ dependencies = [
"libc",
"mio",
"num_cpus",
- "parking_lot 0.12.2",
+ "parking_lot 0.12.3",
"pin-project-lite",
"signal-hook-registry",
"socket2 0.5.7",
@@ -10997,7 +11013,7 @@ checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.61",
+ "syn 2.0.66",
]
[[package]]
@@ -11019,7 +11035,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d767da47381602cc481653456823b3ebb600e83d5dd4e0293da9b5566c6c00f0"
dependencies = [
"lazy_static",
- "parking_lot 0.12.2",
+ "parking_lot 0.12.3",
"prometheus",
"tokio",
"tokio-metrics",
@@ -11038,7 +11054,7 @@ dependencies = [
"futures-channel",
"futures-util",
"log",
- "parking_lot 0.12.2",
+ "parking_lot 0.12.3",
"percent-encoding",
"phf",
"pin-project-lite",
@@ -11148,21 +11164,21 @@ dependencies = [
[[package]]
name = "toml"
-version = "0.8.12"
+version = "0.8.13"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e9dd1545e8208b4a5af1aa9bbd0b4cf7e9ea08fabc5d0a5c67fcaafa17433aa3"
+checksum = "a4e43f8cc456c9704c851ae29c67e17ef65d2c30017c17a9765b89c382dc8bba"
dependencies = [
"serde",
"serde_spanned",
"toml_datetime",
- "toml_edit 0.22.12",
+ "toml_edit 0.22.13",
]
[[package]]
name = "toml_datetime"
-version = "0.6.5"
+version = "0.6.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3550f4e9685620ac18a50ed434eb3aec30db8ba93b0287467bca5826ea25baf1"
+checksum = "4badfd56924ae69bcc9039335b2e017639ce3f9b001c393c1b2d1ef846ce2cbf"
dependencies = [
"serde",
]
@@ -11191,9 +11207,9 @@ dependencies = [
[[package]]
name = "toml_edit"
-version = "0.22.12"
+version = "0.22.13"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d3328d4f68a705b2a4498da1d580585d39a6510f98318a2cec3018a7ec61ddef"
+checksum = "c127785850e8c20836d49732ae6abfa47616e60bf9d9f57c43c250361a9db96c"
dependencies = [
"indexmap 2.2.6",
"serde",
@@ -11249,7 +11265,7 @@ dependencies = [
"hyper-timeout",
"percent-encoding",
"pin-project",
- "prost 0.12.4",
+ "prost 0.12.6",
"rustls-pemfile 2.1.2",
"rustls-pki-types",
"tokio",
@@ -11283,9 +11299,9 @@ checksum = "9d021fc044c18582b9a2408cd0dd05b1596e3ecdb5c4df822bb0183545683889"
dependencies = [
"prettyplease 0.2.20",
"proc-macro2",
- "prost-build 0.12.4",
+ "prost-build 0.12.6",
"quote",
- "syn 2.0.61",
+ "syn 2.0.66",
]
[[package]]
@@ -11296,9 +11312,9 @@ checksum = "be4ef6dd70a610078cb4e338a0f79d06bc759ff1b22d2120c2ff02ae264ba9c2"
dependencies = [
"prettyplease 0.2.20",
"proc-macro2",
- "prost-build 0.12.4",
+ "prost-build 0.12.6",
"quote",
- "syn 2.0.61",
+ "syn 2.0.66",
]
[[package]]
@@ -11307,8 +11323,8 @@ version = "0.11.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "548c227bd5c0fae5925812c4ec6c66ffcfced23ea370cb823f4d18f0fc1cb6a7"
dependencies = [
- "prost 0.12.4",
- "prost-types 0.12.4",
+ "prost 0.12.6",
+ "prost-types 0.12.6",
"tokio",
"tokio-stream",
"tonic 0.11.0",
@@ -11409,7 +11425,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.61",
+ "syn 2.0.66",
]
[[package]]
@@ -11620,6 +11636,12 @@ dependencies = [
"static_assertions",
]
+[[package]]
+name = "typeid"
+version = "1.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "059d83cc991e7a42fc37bd50941885db0888e34209f8cfd9aab07ddec03bc9cf"
+
[[package]]
name = "typenum"
version = "1.17.0"
@@ -11647,7 +11669,7 @@ checksum = "ac73887f47b9312552aa90ef477927ff014d63d1920ca8037c6c1951eab64bb1"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.61",
+ "syn 2.0.66",
]
[[package]]
@@ -11662,12 +11684,12 @@ dependencies = [
[[package]]
name = "typify"
-version = "0.0.16"
+version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5c61e9db210bbff218e6535c664b37ec47da449169b98e7866d0580d0db75529"
+checksum = "adb6beec125971dda80a086f90b4a70f60f222990ce4d63ad0fc140492f53444"
dependencies = [
- "typify-impl 0.0.16",
- "typify-macro 0.0.16",
+ "typify-impl 0.1.0",
+ "typify-macro 0.1.0",
]
[[package]]
@@ -11683,25 +11705,27 @@ dependencies = [
"regress 0.7.1",
"schemars",
"serde_json",
- "syn 2.0.61",
+ "syn 2.0.66",
"thiserror",
"unicode-ident",
]
[[package]]
name = "typify-impl"
-version = "0.0.16"
+version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "95e32f38493804f88e2dc7a5412eccd872ea5452b4db9b0a77de4df180f2a87e"
+checksum = "93bbb24e990654aff858d80fee8114f4322f7d7a1b1ecb45129e2fcb0d0ad5ae"
dependencies = [
- "heck 0.4.1",
+ "heck 0.5.0",
"log",
"proc-macro2",
"quote",
- "regress 0.8.0",
+ "regress 0.9.1",
"schemars",
+ "semver",
+ "serde",
"serde_json",
- "syn 2.0.61",
+ "syn 2.0.66",
"thiserror",
"unicode-ident",
]
@@ -11718,24 +11742,25 @@ dependencies = [
"serde",
"serde_json",
"serde_tokenstream",
- "syn 2.0.61",
+ "syn 2.0.66",
"typify-impl 0.0.14",
]
[[package]]
name = "typify-macro"
-version = "0.0.16"
+version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "cc09508b72f63d521d68e42c7f172c7416d67986df44b3c7d1f7f9963948ed32"
+checksum = "f8e6491896e955692d68361c68db2b263e3bec317ec0b684e0e2fa882fb6e31e"
dependencies = [
"proc-macro2",
"quote",
"schemars",
+ "semver",
"serde",
"serde_json",
"serde_tokenstream",
- "syn 2.0.61",
- "typify-impl 0.0.16",
+ "syn 2.0.66",
+ "typify-impl 0.1.0",
]
[[package]]
@@ -11911,6 +11936,12 @@ dependencies = [
"tinyvec",
]
+[[package]]
+name = "unicode-properties"
+version = "0.1.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e4259d9d4425d9f0661581b804cb85fe66a4c631cadd8f490d1c13a35d5d9291"
+
[[package]]
name = "unicode-segmentation"
version = "1.11.0"
@@ -12017,7 +12048,7 @@ checksum = "9881bea7cbe687e36c9ab3b778c36cd0487402e270304e8b1296d5085303c1a2"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.61",
+ "syn 2.0.66",
]
[[package]]
@@ -12132,7 +12163,7 @@ dependencies = [
"once_cell",
"proc-macro2",
"quote",
- "syn 2.0.61",
+ "syn 2.0.66",
"wasm-bindgen-shared",
]
@@ -12166,7 +12197,7 @@ checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.61",
+ "syn 2.0.66",
"wasm-bindgen-backend",
"wasm-bindgen-shared",
]
@@ -12296,9 +12327,9 @@ dependencies = [
[[package]]
name = "wide"
-version = "0.7.17"
+version = "0.7.21"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0f0e39d2c603fdc0504b12b458cf1f34e0b937ed2f4f2dc20796e3e86f34e11f"
+checksum = "cd8dc749a1b03f3c255a3064a4f5c0ee5ed09b7c6bc6d4525d31f779cd74d7fc"
dependencies = [
"bytemuck",
"safe_arch",
@@ -12720,14 +12751,14 @@ checksum = "15e934569e47891f7d9411f1a451d947a60e000ab3bd24fbb970f000387d1b3b"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.61",
+ "syn 2.0.66",
]
[[package]]
name = "zeroize"
-version = "1.7.0"
+version = "1.8.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "525b4ec142c6b68a2d10f01f7bbf6755599ca3f81ea53b8431b7dd348f5fdb2d"
+checksum = "ced3678a2879b30306d323f4542626697a464a97c0a07c9aebf7ebca65cd4dde"
dependencies = [
"zeroize_derive",
]
@@ -12740,7 +12771,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.61",
+ "syn 2.0.66",
]
[[package]]
diff --git a/Cargo.toml b/Cargo.toml
index 734d2438b0b1..01e17e5a8c92 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -104,15 +104,15 @@ clap = { version = "4.4", features = ["derive"] }
config = "0.13.0"
crossbeam-utils = "0.8"
dashmap = "5.4"
-datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
-datafusion-common = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
-datafusion-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
-datafusion-functions = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
-datafusion-optimizer = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
-datafusion-physical-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
-datafusion-physical-plan = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
-datafusion-sql = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
-datafusion-substrait = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
+datafusion = { git = "https://github.com/apache/datafusion.git", rev = "08e19f4956d32164be6fc66eb5a4c080eb0023d1" }
+datafusion-common = { git = "https://github.com/apache/datafusion.git", rev = "08e19f4956d32164be6fc66eb5a4c080eb0023d1" }
+datafusion-expr = { git = "https://github.com/apache/datafusion.git", rev = "08e19f4956d32164be6fc66eb5a4c080eb0023d1" }
+datafusion-functions = { git = "https://github.com/apache/datafusion.git", rev = "08e19f4956d32164be6fc66eb5a4c080eb0023d1" }
+datafusion-optimizer = { git = "https://github.com/apache/datafusion.git", rev = "08e19f4956d32164be6fc66eb5a4c080eb0023d1" }
+datafusion-physical-expr = { git = "https://github.com/apache/datafusion.git", rev = "08e19f4956d32164be6fc66eb5a4c080eb0023d1" }
+datafusion-physical-plan = { git = "https://github.com/apache/datafusion.git", rev = "08e19f4956d32164be6fc66eb5a4c080eb0023d1" }
+datafusion-sql = { git = "https://github.com/apache/datafusion.git", rev = "08e19f4956d32164be6fc66eb5a4c080eb0023d1" }
+datafusion-substrait = { git = "https://github.com/apache/datafusion.git", rev = "08e19f4956d32164be6fc66eb5a4c080eb0023d1" }
derive_builder = "0.12"
dotenv = "0.15"
# TODO(LFC): Wait for https://github.com/etcdv3/etcd-client/pull/76
@@ -162,7 +162,7 @@ smallvec = { version = "1", features = ["serde"] }
snafu = "0.8"
sysinfo = "0.30"
# on branch v0.44.x
-sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "e4e496b8d62416ad50ce70a1b460c7313610cf5d", features = [
+sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "54a267ac89c09b11c0c88934690530807185d3e7", features = [
"visitor",
] }
strum = { version = "0.25", features = ["derive"] }
diff --git a/benchmarks/Cargo.toml b/benchmarks/Cargo.toml
index dc5bf8ba9727..1ce91acaf786 100644
--- a/benchmarks/Cargo.toml
+++ b/benchmarks/Cargo.toml
@@ -12,7 +12,6 @@ api.workspace = true
arrow.workspace = true
chrono.workspace = true
clap.workspace = true
-client = { workspace = true, features = ["testing"] }
common-base.workspace = true
common-telemetry.workspace = true
common-wal.workspace = true
diff --git a/src/common/query/src/logical_plan/udaf.rs b/src/common/query/src/logical_plan/udaf.rs
index f2abcb673be8..298dbf1874eb 100644
--- a/src/common/query/src/logical_plan/udaf.rs
+++ b/src/common/query/src/logical_plan/udaf.rs
@@ -22,7 +22,7 @@ use std::sync::Arc;
use datafusion::arrow::datatypes::Field;
use datafusion_common::Result;
-use datafusion_expr::function::AccumulatorArgs;
+use datafusion_expr::function::{AccumulatorArgs, StateFieldsArgs};
use datafusion_expr::{
Accumulator, AccumulatorFactoryFunction, AggregateUDF as DfAggregateUdf, AggregateUDFImpl,
};
@@ -129,13 +129,13 @@ impl AggregateUDFImpl for DfUdafAdapter {
(self.accumulator)(acc_args)
}
- fn state_fields(&self, name: &str, _: ArrowDataType, _: Vec<Field>) -> Result<Vec<Field>> {
+ fn state_fields(&self, args: StateFieldsArgs) -> Result<Vec<Field>> {
let state_types = self.creator.state_types()?;
let fields = state_types
.into_iter()
.enumerate()
.map(|(i, t)| {
- let name = format!("{name}_{i}");
+ let name = format!("{}_{i}", args.name);
Field::new(name, t.as_arrow_type(), true)
})
.collect::<Vec<_>>();
diff --git a/src/common/query/src/logical_plan/udf.rs b/src/common/query/src/logical_plan/udf.rs
index 79714479c3be..276f753e7791 100644
--- a/src/common/query/src/logical_plan/udf.rs
+++ b/src/common/query/src/logical_plan/udf.rs
@@ -108,6 +108,10 @@ impl ScalarUDFImpl for DfUdfAdapter {
fn invoke(&self, args: &[DfColumnarValue]) -> datafusion_common::Result<DfColumnarValue> {
(self.fun)(args)
}
+
+ fn invoke_no_args(&self, number_rows: usize) -> datafusion_common::Result<DfColumnarValue> {
+ Ok((self.fun)(&[])?.into_array(number_rows)?.into())
+ }
}
impl From<ScalarUdf> for DfScalarUDF {
diff --git a/src/common/query/src/signature.rs b/src/common/query/src/signature.rs
index a234990bf69c..13eaf49e2bda 100644
--- a/src/common/query/src/signature.rs
+++ b/src/common/query/src/signature.rs
@@ -27,10 +27,6 @@ pub enum TypeSignature {
/// arbitrary number of arguments of an common type out of a list of valid types
// A function such as `concat` is `Variadic(vec![ConcreteDataType::String, ConcreteDataType::String])`
Variadic(Vec<ConcreteDataType>),
- /// arbitrary number of arguments of an arbitrary but equal type
- // A function such as `array` is `VariadicEqual`
- // The first argument decides the type used for coercion
- VariadicEqual,
/// One or more arguments with arbitrary types
VariadicAny,
/// fixed number of arguments of an arbitrary but equal type out of a list of valid types
@@ -67,6 +63,7 @@ impl Signature {
volatility,
}
}
+
/// variadic - Creates a variadic signature that represents an arbitrary number of arguments all from a type in common_types.
pub fn variadic(common_types: Vec<ConcreteDataType>, volatility: Volatility) -> Self {
Self {
@@ -74,13 +71,6 @@ impl Signature {
volatility,
}
}
- /// variadic_equal - Creates a variadic signature that represents an arbitrary number of arguments of the same type.
- pub fn variadic_equal(volatility: Volatility) -> Self {
- Self {
- type_signature: TypeSignature::VariadicEqual,
- volatility,
- }
- }
/// variadic_any - Creates a variadic signature that represents an arbitrary number of arguments of any type.
pub fn variadic_any(volatility: Volatility) -> Self {
@@ -131,7 +121,6 @@ impl From<TypeSignature> for DfTypeSignature {
TypeSignature::Variadic(types) => {
DfTypeSignature::Variadic(concrete_types_to_arrow_types(types))
}
- TypeSignature::VariadicEqual => DfTypeSignature::VariadicEqual,
TypeSignature::Uniform(n, types) => {
DfTypeSignature::Uniform(n, concrete_types_to_arrow_types(types))
}
diff --git a/src/common/recordbatch/src/adapter.rs b/src/common/recordbatch/src/adapter.rs
index 35d32e20843e..0af2c8ac33dd 100644
--- a/src/common/recordbatch/src/adapter.rs
+++ b/src/common/recordbatch/src/adapter.rs
@@ -292,7 +292,7 @@ impl ExecutionPlanVisitor for MetricCollector {
// skip if no metric available
let Some(metric) = plan.metrics() else {
self.record_batch_metrics.plan_metrics.push(PlanMetrics {
- plan: plan.name().to_string(),
+ plan: std::any::type_name::<Self>().to_string(),
level: self.current_level,
metrics: vec![],
});
diff --git a/src/frontend/src/instance/prom_store.rs b/src/frontend/src/instance/prom_store.rs
index 5055d5ac2215..97ddd238a500 100644
--- a/src/frontend/src/instance/prom_store.rs
+++ b/src/frontend/src/instance/prom_store.rs
@@ -218,7 +218,7 @@ impl PromStoreProtocolHandler for Instance {
let plan = output.meta.plan.clone();
query_results.push(to_query_result(&table_name, output).await?);
if let Some(ref plan) = plan {
- collect_plan_metrics(plan.clone(), &mut [&mut map]);
+ collect_plan_metrics(plan, &mut [&mut map]);
}
}
diff --git a/src/operator/src/expr_factory.rs b/src/operator/src/expr_factory.rs
index 8f9aab097f63..9f3d2b1c4077 100644
--- a/src/operator/src/expr_factory.rs
+++ b/src/operator/src/expr_factory.rs
@@ -322,11 +322,8 @@ fn find_primary_keys(
let constraints_pk = constraints
.iter()
.filter_map(|constraint| match constraint {
- TableConstraint::Unique {
- name: _,
- columns,
- is_primary: true,
- ..
+ TableConstraint::PrimaryKey {
+ name: _, columns, ..
} => Some(columns.iter().map(|ident| ident.value.clone())),
_ => None,
})
@@ -353,7 +350,6 @@ pub fn find_time_index(constraints: &[TableConstraint]) -> Result<String> {
TableConstraint::Unique {
name: Some(name),
columns,
- is_primary: false,
..
} => {
if name.value == TIME_INDEX {
diff --git a/src/promql/src/extension_plan/empty_metric.rs b/src/promql/src/extension_plan/empty_metric.rs
index f26337b2f64b..302510e63282 100644
--- a/src/promql/src/extension_plan/empty_metric.rs
+++ b/src/promql/src/extension_plan/empty_metric.rs
@@ -156,19 +156,19 @@ impl UserDefinedLogicalNodeCore for EmptyMetric {
)
}
- fn from_template(&self, expr: &[Expr], _inputs: &[LogicalPlan]) -> Self {
- Self {
+ fn with_exprs_and_inputs(
+ &self,
+ exprs: Vec<Expr>,
+ _inputs: Vec<LogicalPlan>,
+ ) -> DataFusionResult<Self> {
+ Ok(Self {
start: self.start,
end: self.end,
interval: self.interval,
- expr: if !expr.is_empty() {
- Some(expr[0].clone())
- } else {
- None
- },
+ expr: exprs.into_iter().next(),
time_index_schema: self.time_index_schema.clone(),
result_schema: self.result_schema.clone(),
- }
+ })
}
}
@@ -204,7 +204,7 @@ impl ExecutionPlan for EmptyMetricExec {
vec![]
}
- fn children(&self) -> Vec<Arc<dyn ExecutionPlan>> {
+ fn children(&self) -> Vec<&Arc<dyn ExecutionPlan>> {
vec![]
}
diff --git a/src/promql/src/extension_plan/histogram_fold.rs b/src/promql/src/extension_plan/histogram_fold.rs
index 13315a22a0f5..561fbe7720a8 100644
--- a/src/promql/src/extension_plan/histogram_fold.rs
+++ b/src/promql/src/extension_plan/histogram_fold.rs
@@ -95,17 +95,21 @@ impl UserDefinedLogicalNodeCore for HistogramFold {
)
}
- fn from_template(&self, _exprs: &[Expr], inputs: &[LogicalPlan]) -> Self {
- Self {
+ fn with_exprs_and_inputs(
+ &self,
+ _exprs: Vec<Expr>,
+ inputs: Vec<LogicalPlan>,
+ ) -> DataFusionResult<Self> {
+ Ok(Self {
le_column: self.le_column.clone(),
ts_column: self.ts_column.clone(),
- input: inputs[0].clone(),
+ input: inputs.into_iter().next().unwrap(),
field_column: self.field_column.clone(),
quantile: self.quantile,
// This method cannot return error. Otherwise we should re-calculate
// the output schema
output_schema: self.output_schema.clone(),
- }
+ })
}
}
@@ -279,8 +283,8 @@ impl ExecutionPlan for HistogramFoldExec {
vec![true; self.children().len()]
}
- fn children(&self) -> Vec<Arc<dyn ExecutionPlan>> {
- vec![self.input.clone()]
+ fn children(&self) -> Vec<&Arc<dyn ExecutionPlan>> {
+ vec![&self.input]
}
// cannot change schema with this method
diff --git a/src/promql/src/extension_plan/instant_manipulate.rs b/src/promql/src/extension_plan/instant_manipulate.rs
index 6591f6db06c6..1ab01f04aa80 100644
--- a/src/promql/src/extension_plan/instant_manipulate.rs
+++ b/src/promql/src/extension_plan/instant_manipulate.rs
@@ -83,18 +83,26 @@ impl UserDefinedLogicalNodeCore for InstantManipulate {
)
}
- fn from_template(&self, _exprs: &[Expr], inputs: &[LogicalPlan]) -> Self {
- assert!(!inputs.is_empty());
+ fn with_exprs_and_inputs(
+ &self,
+ _exprs: Vec<Expr>,
+ inputs: Vec<LogicalPlan>,
+ ) -> DataFusionResult<Self> {
+ if inputs.is_empty() {
+ return Err(DataFusionError::Internal(
+ "InstantManipulate should have at least one input".to_string(),
+ ));
+ }
- Self {
+ Ok(Self {
start: self.start,
end: self.end,
lookback_delta: self.lookback_delta,
interval: self.interval,
time_index_column: self.time_index_column.clone(),
field_column: self.field_column.clone(),
- input: inputs[0].clone(),
- }
+ input: inputs.into_iter().next().unwrap(),
+ })
}
}
@@ -207,8 +215,8 @@ impl ExecutionPlan for InstantManipulateExec {
vec![false; self.children().len()]
}
- fn children(&self) -> Vec<Arc<dyn ExecutionPlan>> {
- vec![self.input.clone()]
+ fn children(&self) -> Vec<&Arc<dyn ExecutionPlan>> {
+ vec![&self.input]
}
fn with_new_children(
diff --git a/src/promql/src/extension_plan/normalize.rs b/src/promql/src/extension_plan/normalize.rs
index 70ca8da660cc..5abaad427c0c 100644
--- a/src/promql/src/extension_plan/normalize.rs
+++ b/src/promql/src/extension_plan/normalize.rs
@@ -81,15 +81,23 @@ impl UserDefinedLogicalNodeCore for SeriesNormalize {
)
}
- fn from_template(&self, _exprs: &[Expr], inputs: &[LogicalPlan]) -> Self {
- assert!(!inputs.is_empty());
+ fn with_exprs_and_inputs(
+ &self,
+ _exprs: Vec<Expr>,
+ inputs: Vec<LogicalPlan>,
+ ) -> DataFusionResult<Self> {
+ if inputs.is_empty() {
+ return Err(DataFusionError::Internal(
+ "SeriesNormalize should have at least one input".to_string(),
+ ));
+ }
- Self {
+ Ok(Self {
offset: self.offset,
time_index_column_name: self.time_index_column_name.clone(),
need_filter_out_nan: self.need_filter_out_nan,
- input: inputs[0].clone(),
- }
+ input: inputs.into_iter().next().unwrap(),
+ })
}
}
@@ -173,8 +181,8 @@ impl ExecutionPlan for SeriesNormalizeExec {
self.input.properties()
}
- fn children(&self) -> Vec<Arc<dyn ExecutionPlan>> {
- vec![self.input.clone()]
+ fn children(&self) -> Vec<&Arc<dyn ExecutionPlan>> {
+ vec![&self.input]
}
fn with_new_children(
diff --git a/src/promql/src/extension_plan/range_manipulate.rs b/src/promql/src/extension_plan/range_manipulate.rs
index 6f644f308b3b..973fa5a38e30 100644
--- a/src/promql/src/extension_plan/range_manipulate.rs
+++ b/src/promql/src/extension_plan/range_manipulate.rs
@@ -231,19 +231,27 @@ impl UserDefinedLogicalNodeCore for RangeManipulate {
)
}
- fn from_template(&self, _exprs: &[Expr], inputs: &[LogicalPlan]) -> Self {
- assert!(!inputs.is_empty());
+ fn with_exprs_and_inputs(
+ &self,
+ _exprs: Vec<Expr>,
+ inputs: Vec<LogicalPlan>,
+ ) -> DataFusionResult<Self> {
+ if inputs.is_empty() {
+ return Err(DataFusionError::Internal(
+ "RangeManipulate should have at least one input".to_string(),
+ ));
+ }
- Self {
+ Ok(Self {
start: self.start,
end: self.end,
interval: self.interval,
range: self.range,
time_index: self.time_index.clone(),
field_columns: self.field_columns.clone(),
- input: inputs[0].clone(),
+ input: inputs.into_iter().next().unwrap(),
output_schema: self.output_schema.clone(),
- }
+ })
}
}
@@ -280,8 +288,8 @@ impl ExecutionPlan for RangeManipulateExec {
vec![true; self.children().len()]
}
- fn children(&self) -> Vec<Arc<dyn ExecutionPlan>> {
- vec![self.input.clone()]
+ fn children(&self) -> Vec<&Arc<dyn ExecutionPlan>> {
+ vec![&self.input]
}
fn required_input_distribution(&self) -> Vec<Distribution> {
diff --git a/src/promql/src/extension_plan/scalar_calculate.rs b/src/promql/src/extension_plan/scalar_calculate.rs
index c2abd3f639ed..6383631c30a0 100644
--- a/src/promql/src/extension_plan/scalar_calculate.rs
+++ b/src/promql/src/extension_plan/scalar_calculate.rs
@@ -215,18 +215,26 @@ impl UserDefinedLogicalNodeCore for ScalarCalculate {
write!(f, "ScalarCalculate: tags={:?}", self.tag_columns)
}
- fn from_template(&self, _expr: &[Expr], inputs: &[LogicalPlan]) -> Self {
- assert!(!inputs.is_empty());
- ScalarCalculate {
+ fn with_exprs_and_inputs(
+ &self,
+ exprs: Vec<Expr>,
+ inputs: Vec<LogicalPlan>,
+ ) -> DataFusionResult<Self> {
+ if !exprs.is_empty() {
+ return Err(DataFusionError::Internal(
+ "ScalarCalculate should not have any expressions".to_string(),
+ ));
+ }
+ Ok(ScalarCalculate {
start: self.start,
end: self.end,
interval: self.interval,
time_index: self.time_index.clone(),
tag_columns: self.tag_columns.clone(),
field_column: self.field_column.clone(),
- input: inputs[0].clone(),
+ input: inputs.into_iter().next().unwrap(),
output_schema: self.output_schema.clone(),
- }
+ })
}
}
@@ -264,8 +272,8 @@ impl ExecutionPlan for ScalarCalculateExec {
vec![Distribution::SinglePartition]
}
- fn children(&self) -> Vec<Arc<dyn ExecutionPlan>> {
- vec![self.input.clone()]
+ fn children(&self) -> Vec<&Arc<dyn ExecutionPlan>> {
+ vec![&self.input]
}
fn with_new_children(
diff --git a/src/promql/src/extension_plan/series_divide.rs b/src/promql/src/extension_plan/series_divide.rs
index d0524b5d5391..6dc531a8cbe5 100644
--- a/src/promql/src/extension_plan/series_divide.rs
+++ b/src/promql/src/extension_plan/series_divide.rs
@@ -67,13 +67,21 @@ impl UserDefinedLogicalNodeCore for SeriesDivide {
write!(f, "PromSeriesDivide: tags={:?}", self.tag_columns)
}
- fn from_template(&self, _exprs: &[Expr], inputs: &[LogicalPlan]) -> Self {
- assert!(!inputs.is_empty());
+ fn with_exprs_and_inputs(
+ &self,
+ _exprs: Vec<Expr>,
+ inputs: Vec<LogicalPlan>,
+ ) -> DataFusionResult<Self> {
+ if inputs.is_empty() {
+ return Err(datafusion::error::DataFusionError::Internal(
+ "SeriesDivide must have at least one input".to_string(),
+ ));
+ }
- Self {
+ Ok(Self {
tag_columns: self.tag_columns.clone(),
input: inputs[0].clone(),
- }
+ })
}
}
@@ -160,8 +168,8 @@ impl ExecutionPlan for SeriesDivideExec {
vec![true; self.children().len()]
}
- fn children(&self) -> Vec<Arc<dyn ExecutionPlan>> {
- vec![self.input.clone()]
+ fn children(&self) -> Vec<&Arc<dyn ExecutionPlan>> {
+ vec![&self.input]
}
fn with_new_children(
diff --git a/src/promql/src/extension_plan/union_distinct_on.rs b/src/promql/src/extension_plan/union_distinct_on.rs
index 78a2cc913d85..2da0fd54fa88 100644
--- a/src/promql/src/extension_plan/union_distinct_on.rs
+++ b/src/promql/src/extension_plan/union_distinct_on.rs
@@ -135,18 +135,28 @@ impl UserDefinedLogicalNodeCore for UnionDistinctOn {
)
}
- fn from_template(&self, _exprs: &[Expr], inputs: &[LogicalPlan]) -> Self {
- assert_eq!(inputs.len(), 2);
+ fn with_exprs_and_inputs(
+ &self,
+ _exprs: Vec<Expr>,
+ inputs: Vec<LogicalPlan>,
+ ) -> DataFusionResult<Self> {
+ if inputs.len() != 2 {
+ return Err(DataFusionError::Internal(
+ "UnionDistinctOn must have exactly 2 inputs".to_string(),
+ ));
+ }
- let left = inputs[0].clone();
- let right = inputs[1].clone();
- Self {
+ let mut inputs = inputs.into_iter();
+ let left = inputs.next().unwrap();
+ let right = inputs.next().unwrap();
+
+ Ok(Self {
left,
right,
compare_keys: self.compare_keys.clone(),
ts_col: self.ts_col.clone(),
output_schema: self.output_schema.clone(),
- }
+ })
}
}
@@ -181,8 +191,8 @@ impl ExecutionPlan for UnionDistinctOnExec {
self.properties.as_ref()
}
- fn children(&self) -> Vec<Arc<dyn ExecutionPlan>> {
- vec![self.left.clone(), self.right.clone()]
+ fn children(&self) -> Vec<&Arc<dyn ExecutionPlan>> {
+ vec![&self.left, &self.right]
}
fn with_new_children(
diff --git a/src/query/Cargo.toml b/src/query/Cargo.toml
index 00bfa6621a87..eed0d5a3398e 100644
--- a/src/query/Cargo.toml
+++ b/src/query/Cargo.toml
@@ -58,6 +58,7 @@ regex.workspace = true
session.workspace = true
snafu.workspace = true
sql.workspace = true
+sqlparser.workspace = true
store-api.workspace = true
substrait.workspace = true
table.workspace = true
diff --git a/src/query/src/analyze.rs b/src/query/src/analyze.rs
index a5c40b5bb3ed..b0158fd08709 100644
--- a/src/query/src/analyze.rs
+++ b/src/query/src/analyze.rs
@@ -97,8 +97,8 @@ impl ExecutionPlan for DistAnalyzeExec {
&self.properties
}
- fn children(&self) -> Vec<Arc<dyn ExecutionPlan>> {
- vec![self.input.clone()]
+ fn children(&self) -> Vec<&Arc<dyn ExecutionPlan>> {
+ vec![&self.input]
}
/// AnalyzeExec is handled specially so this value is ignored
@@ -210,7 +210,7 @@ fn create_output_batch(
builder.append_metric(0, 0, stage_0_metrics);
// Find merge scan and append its sub_stage_metrics
- input.apply(&mut |plan| {
+ input.apply(|plan| {
if let Some(merge_scan) = plan.as_any().downcast_ref::<MergeScanExec>() {
let sub_stage_metrics = merge_scan.sub_stage_metrics();
for (node, metric) in sub_stage_metrics.into_iter().enumerate() {
diff --git a/src/query/src/datafusion/planner.rs b/src/query/src/datafusion/planner.rs
index 0e98247398dd..65e78f130160 100644
--- a/src/query/src/datafusion/planner.rs
+++ b/src/query/src/datafusion/planner.rs
@@ -23,13 +23,12 @@ use common_query::logical_plan::create_aggregate_function;
use datafusion::catalog::TableReference;
use datafusion::error::Result as DfResult;
use datafusion::execution::context::SessionState;
-use datafusion::physical_plan::udf::ScalarUDF;
use datafusion::sql::planner::ContextProvider;
use datafusion::variable::VarType;
use datafusion_common::config::ConfigOptions;
use datafusion_common::DataFusionError;
use datafusion_expr::var_provider::is_system_variables;
-use datafusion_expr::{AggregateUDF, TableSource, WindowUDF};
+use datafusion_expr::{AggregateUDF, ScalarUDF, TableSource, WindowUDF};
use datafusion_sql::parser::Statement as DfStatement;
use session::context::QueryContextRef;
use snafu::ResultExt;
@@ -128,11 +127,14 @@ impl ContextProvider for DfContextProviderAdapter {
}
fn get_aggregate_meta(&self, name: &str) -> Option<Arc<AggregateUDF>> {
- self.engine_state.aggregate_function(name).map(|func| {
- Arc::new(
- create_aggregate_function(func.name(), func.args_count(), func.create()).into(),
- )
- })
+ self.engine_state.aggregate_function(name).map_or_else(
+ || self.session_state.aggregate_functions().get(name).cloned(),
+ |func| {
+ Some(Arc::new(
+ create_aggregate_function(func.name(), func.args_count(), func.create()).into(),
+ ))
+ },
+ )
}
fn get_window_meta(&self, _name: &str) -> Option<Arc<WindowUDF>> {
@@ -161,17 +163,17 @@ impl ContextProvider for DfContextProviderAdapter {
self.session_state.config_options()
}
- fn udfs_names(&self) -> Vec<String> {
+ fn udf_names(&self) -> Vec<String> {
// TODO(LFC): Impl it.
vec![]
}
- fn udafs_names(&self) -> Vec<String> {
+ fn udaf_names(&self) -> Vec<String> {
// TODO(LFC): Impl it.
vec![]
}
- fn udwfs_names(&self) -> Vec<String> {
+ fn udwf_names(&self) -> Vec<String> {
// TODO(LFC): Impl it.
vec![]
}
diff --git a/src/query/src/dist_plan/commutativity.rs b/src/query/src/dist_plan/commutativity.rs
index f59dc57268cb..618b98b34980 100644
--- a/src/query/src/dist_plan/commutativity.rs
+++ b/src/query/src/dist_plan/commutativity.rs
@@ -149,7 +149,6 @@ impl Categorizer {
| Expr::SimilarTo(_)
| Expr::IsUnknown(_)
| Expr::IsNotUnknown(_)
- | Expr::GetIndexedField(_)
| Expr::Case(_)
| Expr::Cast(_)
| Expr::TryCast(_)
diff --git a/src/query/src/dist_plan/merge_scan.rs b/src/query/src/dist_plan/merge_scan.rs
index 1af2516d233a..23d7fbb832cd 100644
--- a/src/query/src/dist_plan/merge_scan.rs
+++ b/src/query/src/dist_plan/merge_scan.rs
@@ -86,8 +86,12 @@ impl UserDefinedLogicalNodeCore for MergeScanLogicalPlan {
write!(f, "MergeScan [is_placeholder={}]", self.is_placeholder)
}
- fn from_template(&self, _exprs: &[datafusion_expr::Expr], _inputs: &[LogicalPlan]) -> Self {
- self.clone()
+ fn with_exprs_and_inputs(
+ &self,
+ _exprs: Vec<datafusion::prelude::Expr>,
+ _inputs: Vec<LogicalPlan>,
+ ) -> Result<Self> {
+ Ok(self.clone())
}
}
@@ -306,7 +310,7 @@ impl ExecutionPlan for MergeScanExec {
&self.properties
}
- fn children(&self) -> Vec<Arc<dyn ExecutionPlan>> {
+ fn children(&self) -> Vec<&Arc<dyn ExecutionPlan>> {
vec![]
}
diff --git a/src/query/src/dist_plan/planner.rs b/src/query/src/dist_plan/planner.rs
index 4bb0cccc020c..c3d8b00eaf2d 100644
--- a/src/query/src/dist_plan/planner.rs
+++ b/src/query/src/dist_plan/planner.rs
@@ -171,7 +171,7 @@ struct TableNameExtractor {
pub table_name: Option<TableName>,
}
-impl TreeNodeVisitor for TableNameExtractor {
+impl TreeNodeVisitor<'_> for TableNameExtractor {
type Node = LogicalPlan;
fn f_down(&mut self, node: &Self::Node) -> Result<TreeNodeRecursion> {
diff --git a/src/query/src/optimizer/count_wildcard.rs b/src/query/src/optimizer/count_wildcard.rs
index b8a491003b98..ccea20cb91b0 100644
--- a/src/query/src/optimizer/count_wildcard.rs
+++ b/src/query/src/optimizer/count_wildcard.rs
@@ -57,7 +57,7 @@ impl CountWildcardToTimeIndexRule {
};
plan.map_expressions(|expr| {
let original_name = name_preserver.save(&expr)?;
- let transformed_expr = expr.transform_up_mut(&mut |expr| match expr {
+ let transformed_expr = expr.transform_up(|expr| match expr {
Expr::WindowFunction(mut window_function)
if Self::is_count_star_window_aggregate(&window_function) =>
{
@@ -135,7 +135,7 @@ struct TimeIndexFinder {
table_alias: Option<TableReference>,
}
-impl TreeNodeVisitor for TimeIndexFinder {
+impl TreeNodeVisitor<'_> for TimeIndexFinder {
type Node = LogicalPlan;
fn f_down(&mut self, node: &Self::Node) -> DataFusionResult<TreeNodeRecursion> {
diff --git a/src/query/src/optimizer/order_hint.rs b/src/query/src/optimizer/order_hint.rs
index e2bf9105bb35..55bf314b48d7 100644
--- a/src/query/src/optimizer/order_hint.rs
+++ b/src/query/src/optimizer/order_hint.rs
@@ -75,9 +75,9 @@ impl OrderHintRule {
{
let mut opts = Vec::with_capacity(order_expr.len());
for sort in order_expr {
- let name = match sort.expr.try_into_col() {
- Ok(col) => col.name,
- Err(_) => return Ok(Transformed::no(plan)),
+ let name = match sort.expr.try_as_col() {
+ Some(col) => col.name.clone(),
+ None => return Ok(Transformed::no(plan)),
};
opts.push(OrderOption {
name,
@@ -108,7 +108,7 @@ struct OrderHintVisitor {
order_expr: Option<Vec<Sort>>,
}
-impl TreeNodeVisitor for OrderHintVisitor {
+impl TreeNodeVisitor<'_> for OrderHintVisitor {
type Node = LogicalPlan;
fn f_down(&mut self, node: &Self::Node) -> DataFusionResult<TreeNodeRecursion> {
diff --git a/src/query/src/optimizer/remove_duplicate.rs b/src/query/src/optimizer/remove_duplicate.rs
index 8d3215ac4139..7f3ba2966887 100644
--- a/src/query/src/optimizer/remove_duplicate.rs
+++ b/src/query/src/optimizer/remove_duplicate.rs
@@ -50,7 +50,7 @@ impl PhysicalOptimizerRule for RemoveDuplicate {
impl RemoveDuplicate {
fn do_optimize(plan: Arc<dyn ExecutionPlan>) -> DfResult<Arc<dyn ExecutionPlan>> {
let result = plan
- .transform_down_mut(&mut |plan| {
+ .transform_down(|plan| {
if plan.as_any().is::<CoalesceBatchesExec>()
|| plan.as_any().is::<RepartitionExec>()
{
diff --git a/src/query/src/optimizer/string_normalization.rs b/src/query/src/optimizer/string_normalization.rs
index 56fb36c3df14..1a755f33a23f 100644
--- a/src/query/src/optimizer/string_normalization.rs
+++ b/src/query/src/optimizer/string_normalization.rs
@@ -26,7 +26,7 @@ pub struct StringNormalizationRule;
impl AnalyzerRule for StringNormalizationRule {
fn analyze(&self, plan: LogicalPlan, _config: &ConfigOptions) -> Result<LogicalPlan> {
- plan.transform(&|plan| {
+ plan.transform(|plan| {
let mut converter = StringNormalizationConverter;
let inputs = plan.inputs().into_iter().cloned().collect::<Vec<_>>();
let expr = plan
diff --git a/src/query/src/planner.rs b/src/query/src/planner.rs
index 9643e63ef496..7155f22510ab 100644
--- a/src/query/src/planner.rs
+++ b/src/query/src/planner.rs
@@ -149,7 +149,7 @@ impl DfLogicalPlanner {
&query_ctx,
)?),
);
- PromPlanner::stmt_to_plan(table_provider, stmt)
+ PromPlanner::stmt_to_plan(table_provider, stmt, &self.session_state)
.await
.map(LogicalPlan::DfPlan)
.map_err(BoxedError::new)
diff --git a/src/query/src/promql/planner.rs b/src/query/src/promql/planner.rs
index 9d9fd66a31c8..2a0a16a2000c 100644
--- a/src/query/src/promql/planner.rs
+++ b/src/query/src/promql/planner.rs
@@ -13,7 +13,6 @@
// limitations under the License.
use std::collections::{BTreeSet, HashSet, VecDeque};
-use std::str::FromStr;
use std::sync::Arc;
use std::time::UNIX_EPOCH;
@@ -22,13 +21,14 @@ use catalog::table_source::DfTableSourceProvider;
use common_query::prelude::GREPTIME_VALUE;
use datafusion::common::{DFSchemaRef, Result as DfResult};
use datafusion::datasource::DefaultTableSource;
+use datafusion::execution::context::SessionState;
use datafusion::logical_expr::expr::{
AggregateFunction, AggregateFunctionDefinition, Alias, ScalarFunction,
};
use datafusion::logical_expr::expr_rewriter::normalize_cols;
use datafusion::logical_expr::{
- AggregateFunction as AggregateFunctionEnum, BinaryExpr, BuiltinScalarFunction, Cast, Extension,
- LogicalPlan, LogicalPlanBuilder, Operator, ScalarFunctionDefinition, ScalarUDF as ScalarUdfDef,
+ AggregateFunction as AggregateFunctionEnum, BinaryExpr, Cast, Extension, LogicalPlan,
+ LogicalPlanBuilder, Operator, ScalarUDF as ScalarUdfDef,
};
use datafusion::prelude as df_prelude;
use datafusion::prelude::{Column, Expr as DfExpr, JoinType};
@@ -151,17 +151,22 @@ impl PromPlanner {
pub async fn stmt_to_plan(
table_provider: DfTableSourceProvider,
stmt: EvalStmt,
+ session_state: &SessionState,
) -> Result<LogicalPlan> {
let mut planner = Self {
table_provider,
ctx: PromPlannerContext::from_eval_stmt(&stmt),
};
- planner.prom_expr_to_plan(stmt.expr).await
+ planner.prom_expr_to_plan(stmt.expr, session_state).await
}
#[async_recursion]
- pub async fn prom_expr_to_plan(&mut self, prom_expr: PromExpr) -> Result<LogicalPlan> {
+ pub async fn prom_expr_to_plan(
+ &mut self,
+ prom_expr: PromExpr,
+ session_state: &SessionState,
+ ) -> Result<LogicalPlan> {
let res = match &prom_expr {
PromExpr::Aggregate(AggregateExpr {
op,
@@ -170,7 +175,7 @@ impl PromPlanner {
param: _param,
modifier,
}) => {
- let input = self.prom_expr_to_plan(*expr.clone()).await?;
+ let input = self.prom_expr_to_plan(*expr.clone(), session_state).await?;
// calculate columns to group by
// Need to append time index column into group by columns
@@ -194,7 +199,7 @@ impl PromPlanner {
}
PromExpr::Unary(UnaryExpr { expr }) => {
// Unary Expr in PromQL implys the `-` operator
- let input = self.prom_expr_to_plan(*expr.clone()).await?;
+ let input = self.prom_expr_to_plan(*expr.clone(), session_state).await?;
self.projection_for_each_field_column(input, |col| {
Ok(DfExpr::Negative(Box::new(DfExpr::Column(col.into()))))
})?
@@ -250,7 +255,7 @@ impl PromPlanner {
}
// lhs is a literal, rhs is a column
(Some(mut expr), None) => {
- let input = self.prom_expr_to_plan(*rhs.clone()).await?;
+ let input = self.prom_expr_to_plan(*rhs.clone(), session_state).await?;
// check if the literal is a special time expr
if let Some(time_expr) = Self::try_build_special_time_expr(
lhs,
@@ -279,7 +284,7 @@ impl PromPlanner {
}
// lhs is a column, rhs is a literal
(None, Some(mut expr)) => {
- let input = self.prom_expr_to_plan(*lhs.clone()).await?;
+ let input = self.prom_expr_to_plan(*lhs.clone(), session_state).await?;
// check if the literal is a special time expr
if let Some(time_expr) = Self::try_build_special_time_expr(
rhs,
@@ -308,14 +313,16 @@ impl PromPlanner {
}
// both are columns. join them on time index
(None, None) => {
- let left_input = self.prom_expr_to_plan(*lhs.clone()).await?;
+ let left_input =
+ self.prom_expr_to_plan(*lhs.clone(), session_state).await?;
let left_field_columns = self.ctx.field_columns.clone();
let mut left_table_ref = self
.table_ref()
.unwrap_or_else(|_| TableReference::bare(""));
let left_context = self.ctx.clone();
- let right_input = self.prom_expr_to_plan(*rhs.clone()).await?;
+ let right_input =
+ self.prom_expr_to_plan(*rhs.clone(), session_state).await?;
let right_field_columns = self.ctx.field_columns.clone();
let mut right_table_ref = self
.table_ref()
@@ -399,7 +406,9 @@ impl PromPlanner {
}
}
}
- PromExpr::Paren(ParenExpr { expr }) => self.prom_expr_to_plan(*expr.clone()).await?,
+ PromExpr::Paren(ParenExpr { expr }) => {
+ self.prom_expr_to_plan(*expr.clone(), session_state).await?
+ }
PromExpr::Subquery(SubqueryExpr { .. }) => UnsupportedExprSnafu {
name: "Prom Subquery",
}
@@ -510,16 +519,18 @@ impl PromPlanner {
PromExpr::Call(Call { func, args }) => {
// some special functions that are not expression but a plan
match func.name {
- SPECIAL_HISTOGRAM_QUANTILE => return self.create_histogram_plan(args).await,
+ SPECIAL_HISTOGRAM_QUANTILE => {
+ return self.create_histogram_plan(args, session_state).await
+ }
SPECIAL_VECTOR_FUNCTION => return self.create_vector_plan(args).await,
- SCALAR_FUNCTION => return self.create_scalar_plan(args).await,
+ SCALAR_FUNCTION => return self.create_scalar_plan(args, session_state).await,
_ => {}
}
// transform function arguments
let args = self.create_function_args(&args.args)?;
let input = if let Some(prom_expr) = args.input {
- self.prom_expr_to_plan(prom_expr).await?
+ self.prom_expr_to_plan(prom_expr, session_state).await?
} else {
self.ctx.time_index_column = Some(SPECIAL_TIME_FUNCTION.to_string());
self.ctx.reset_table_name_and_schema();
@@ -537,7 +548,8 @@ impl PromPlanner {
),
})
};
- let mut func_exprs = self.create_function_expr(func, args.literals)?;
+ let mut func_exprs =
+ self.create_function_expr(func, args.literals, session_state)?;
func_exprs.insert(0, self.create_time_index_column_expr()?);
func_exprs.extend_from_slice(&self.create_tag_column_exprs()?);
@@ -551,7 +563,9 @@ impl PromPlanner {
}
PromExpr::Extension(promql_parser::parser::ast::Extension { expr }) => {
let children = expr.children();
- let plan = self.prom_expr_to_plan(children[0].clone()).await?;
+ let plan = self
+ .prom_expr_to_plan(children[0].clone(), session_state)
+ .await?;
// Wrapper for the explanation/analyze of the existing plan
// https://docs.rs/datafusion-expr/latest/datafusion_expr/logical_plan/builder/struct.LogicalPlanBuilder.html#method.explain
// if `analyze` is true, runs the actual plan and produces
@@ -1063,6 +1077,7 @@ impl PromPlanner {
&mut self,
func: &Function,
other_input_exprs: Vec<DfExpr>,
+ session_state: &SessionState,
) -> Result<Vec<DfExpr>> {
// TODO(ruihang): check function args list
let mut other_input_exprs: VecDeque<DfExpr> = other_input_exprs.into();
@@ -1071,30 +1086,30 @@ impl PromPlanner {
let field_column_pos = 0;
let mut exprs = Vec::with_capacity(self.ctx.field_columns.len());
let scalar_func = match func.name {
- "increase" => ScalarFunc::ExtrapolateUdf(Increase::scalar_udf(
+ "increase" => ScalarFunc::ExtrapolateUdf(Arc::new(Increase::scalar_udf(
self.ctx.range.context(ExpectRangeSelectorSnafu)?,
- )),
- "rate" => ScalarFunc::ExtrapolateUdf(Rate::scalar_udf(
+ ))),
+ "rate" => ScalarFunc::ExtrapolateUdf(Arc::new(Rate::scalar_udf(
self.ctx.range.context(ExpectRangeSelectorSnafu)?,
- )),
- "delta" => ScalarFunc::ExtrapolateUdf(Delta::scalar_udf(
+ ))),
+ "delta" => ScalarFunc::ExtrapolateUdf(Arc::new(Delta::scalar_udf(
self.ctx.range.context(ExpectRangeSelectorSnafu)?,
- )),
- "idelta" => ScalarFunc::Udf(IDelta::<false>::scalar_udf()),
- "irate" => ScalarFunc::Udf(IDelta::<true>::scalar_udf()),
- "resets" => ScalarFunc::Udf(Resets::scalar_udf()),
- "changes" => ScalarFunc::Udf(Changes::scalar_udf()),
- "deriv" => ScalarFunc::Udf(Deriv::scalar_udf()),
- "avg_over_time" => ScalarFunc::Udf(AvgOverTime::scalar_udf()),
- "min_over_time" => ScalarFunc::Udf(MinOverTime::scalar_udf()),
- "max_over_time" => ScalarFunc::Udf(MaxOverTime::scalar_udf()),
- "sum_over_time" => ScalarFunc::Udf(SumOverTime::scalar_udf()),
- "count_over_time" => ScalarFunc::Udf(CountOverTime::scalar_udf()),
- "last_over_time" => ScalarFunc::Udf(LastOverTime::scalar_udf()),
- "absent_over_time" => ScalarFunc::Udf(AbsentOverTime::scalar_udf()),
- "present_over_time" => ScalarFunc::Udf(PresentOverTime::scalar_udf()),
- "stddev_over_time" => ScalarFunc::Udf(StddevOverTime::scalar_udf()),
- "stdvar_over_time" => ScalarFunc::Udf(StdvarOverTime::scalar_udf()),
+ ))),
+ "idelta" => ScalarFunc::Udf(Arc::new(IDelta::<false>::scalar_udf())),
+ "irate" => ScalarFunc::Udf(Arc::new(IDelta::<true>::scalar_udf())),
+ "resets" => ScalarFunc::Udf(Arc::new(Resets::scalar_udf())),
+ "changes" => ScalarFunc::Udf(Arc::new(Changes::scalar_udf())),
+ "deriv" => ScalarFunc::Udf(Arc::new(Deriv::scalar_udf())),
+ "avg_over_time" => ScalarFunc::Udf(Arc::new(AvgOverTime::scalar_udf())),
+ "min_over_time" => ScalarFunc::Udf(Arc::new(MinOverTime::scalar_udf())),
+ "max_over_time" => ScalarFunc::Udf(Arc::new(MaxOverTime::scalar_udf())),
+ "sum_over_time" => ScalarFunc::Udf(Arc::new(SumOverTime::scalar_udf())),
+ "count_over_time" => ScalarFunc::Udf(Arc::new(CountOverTime::scalar_udf())),
+ "last_over_time" => ScalarFunc::Udf(Arc::new(LastOverTime::scalar_udf())),
+ "absent_over_time" => ScalarFunc::Udf(Arc::new(AbsentOverTime::scalar_udf())),
+ "present_over_time" => ScalarFunc::Udf(Arc::new(PresentOverTime::scalar_udf())),
+ "stddev_over_time" => ScalarFunc::Udf(Arc::new(StddevOverTime::scalar_udf())),
+ "stdvar_over_time" => ScalarFunc::Udf(Arc::new(StdvarOverTime::scalar_udf())),
"quantile_over_time" => {
let quantile_expr = match other_input_exprs.pop_front() {
Some(DfExpr::Literal(ScalarValue::Float64(Some(quantile)))) => quantile,
@@ -1103,7 +1118,7 @@ impl PromPlanner {
}
.fail()?,
};
- ScalarFunc::Udf(QuantileOverTime::scalar_udf(quantile_expr))
+ ScalarFunc::Udf(Arc::new(QuantileOverTime::scalar_udf(quantile_expr)))
}
"predict_linear" => {
let t_expr = match other_input_exprs.pop_front() {
@@ -1114,7 +1129,7 @@ impl PromPlanner {
}
.fail()?,
};
- ScalarFunc::Udf(PredictLinear::scalar_udf(t_expr))
+ ScalarFunc::Udf(Arc::new(PredictLinear::scalar_udf(t_expr)))
}
"holt_winters" => {
let sf_exp = match other_input_exprs.pop_front() {
@@ -1134,7 +1149,7 @@ impl PromPlanner {
}
.fail()?,
};
- ScalarFunc::Udf(HoltWinters::scalar_udf(sf_exp, tf_exp))
+ ScalarFunc::Udf(Arc::new(HoltWinters::scalar_udf(sf_exp, tf_exp)))
}
"time" => {
exprs.push(build_special_time_expr(
@@ -1201,9 +1216,7 @@ impl PromPlanner {
right: Box::new(interval_1day_lit_expr),
});
let date_trunc_expr = DfExpr::ScalarFunction(ScalarFunction {
- func_def: ScalarFunctionDefinition::UDF(
- datafusion_functions::datetime::date_trunc(),
- ),
+ func: datafusion_functions::datetime::date_trunc(),
args: vec![month_lit_expr, self.create_time_index_column_expr()?],
});
let date_trunc_plus_interval_expr = DfExpr::BinaryExpr(BinaryExpr {
@@ -1212,9 +1225,7 @@ impl PromPlanner {
right: Box::new(the_1month_minus_1day_expr),
});
let date_part_expr = DfExpr::ScalarFunction(ScalarFunction {
- func_def: ScalarFunctionDefinition::UDF(
- datafusion_functions::datetime::date_part(),
- ),
+ func: datafusion_functions::datetime::date_part(),
args: vec![day_lit_expr, date_trunc_plus_interval_expr],
});
@@ -1222,8 +1233,8 @@ impl PromPlanner {
ScalarFunc::GeneratedExpr
}
_ => {
- if let Ok(f) = BuiltinScalarFunction::from_str(func.name) {
- ScalarFunc::DataFusionBuiltin(f)
+ if let Some(f) = session_state.scalar_functions().get(func.name) {
+ ScalarFunc::DataFusionBuiltin(f.clone())
} else if let Some(f) = datafusion_functions::math::functions()
.iter()
.find(|f| f.name() == func.name)
@@ -1242,28 +1253,25 @@ impl PromPlanner {
let col_expr = DfExpr::Column(Column::from_name(value));
match scalar_func.clone() {
- ScalarFunc::DataFusionBuiltin(fun) => {
+ ScalarFunc::DataFusionBuiltin(func) => {
other_input_exprs.insert(field_column_pos, col_expr);
let fn_expr = DfExpr::ScalarFunction(ScalarFunction {
- func_def: ScalarFunctionDefinition::BuiltIn(fun),
+ func,
args: other_input_exprs.clone().into(),
});
exprs.push(fn_expr);
let _ = other_input_exprs.remove(field_column_pos);
}
- ScalarFunc::DataFusionUdf(f) => {
+ ScalarFunc::DataFusionUdf(func) => {
let args = itertools::chain!(
other_input_exprs.iter().take(field_column_pos).cloned(),
std::iter::once(col_expr),
other_input_exprs.iter().skip(field_column_pos).cloned()
)
.collect_vec();
- exprs.push(DfExpr::ScalarFunction(ScalarFunction {
- func_def: ScalarFunctionDefinition::UDF(f),
- args,
- }))
+ exprs.push(DfExpr::ScalarFunction(ScalarFunction { func, args }))
}
- ScalarFunc::Udf(fun) => {
+ ScalarFunc::Udf(func) => {
let ts_range_expr = DfExpr::Column(Column::from_name(
RangeManipulate::build_timestamp_range_name(
self.ctx.time_index_column.as_ref().unwrap(),
@@ -1272,14 +1280,14 @@ impl PromPlanner {
other_input_exprs.insert(field_column_pos, ts_range_expr);
other_input_exprs.insert(field_column_pos + 1, col_expr);
let fn_expr = DfExpr::ScalarFunction(ScalarFunction {
- func_def: ScalarFunctionDefinition::UDF(Arc::new(fun)),
+ func,
args: other_input_exprs.clone().into(),
});
exprs.push(fn_expr);
let _ = other_input_exprs.remove(field_column_pos + 1);
let _ = other_input_exprs.remove(field_column_pos);
}
- ScalarFunc::ExtrapolateUdf(fun) => {
+ ScalarFunc::ExtrapolateUdf(func) => {
let ts_range_expr = DfExpr::Column(Column::from_name(
RangeManipulate::build_timestamp_range_name(
self.ctx.time_index_column.as_ref().unwrap(),
@@ -1290,7 +1298,7 @@ impl PromPlanner {
other_input_exprs
.insert(field_column_pos + 2, self.create_time_index_column_expr()?);
let fn_expr = DfExpr::ScalarFunction(ScalarFunction {
- func_def: ScalarFunctionDefinition::UDF(Arc::new(fun)),
+ func,
args: other_input_exprs.clone().into(),
});
exprs.push(fn_expr);
@@ -1418,7 +1426,11 @@ impl PromPlanner {
}
/// Create a [SPECIAL_HISTOGRAM_QUANTILE] plan.
- async fn create_histogram_plan(&mut self, args: &PromFunctionArgs) -> Result<LogicalPlan> {
+ async fn create_histogram_plan(
+ &mut self,
+ args: &PromFunctionArgs,
+ session_state: &SessionState,
+ ) -> Result<LogicalPlan> {
if args.args.len() != 2 {
return FunctionInvalidArgumentSnafu {
fn_name: SPECIAL_HISTOGRAM_QUANTILE.to_string(),
@@ -1431,7 +1443,7 @@ impl PromPlanner {
}
})?;
let input = args.args[1].as_ref().clone();
- let input_plan = self.prom_expr_to_plan(input).await?;
+ let input_plan = self.prom_expr_to_plan(input, session_state).await?;
if !self.ctx.has_le_tag() {
return ColumnNotFoundSnafu {
@@ -1505,7 +1517,11 @@ impl PromPlanner {
}
/// Create a [SCALAR_FUNCTION] plan
- async fn create_scalar_plan(&mut self, args: &PromFunctionArgs) -> Result<LogicalPlan> {
+ async fn create_scalar_plan(
+ &mut self,
+ args: &PromFunctionArgs,
+ session_state: &SessionState,
+ ) -> Result<LogicalPlan> {
ensure!(
args.len() == 1,
FunctionInvalidArgumentSnafu {
@@ -1513,7 +1529,7 @@ impl PromPlanner {
}
);
let input = self
- .prom_expr_to_plan(args.args[0].as_ref().clone())
+ .prom_expr_to_plan(args.args[0].as_ref().clone(), session_state)
.await?;
ensure!(
self.ctx.field_columns.len() == 1,
@@ -1653,16 +1669,13 @@ impl PromPlanner {
token::T_LTE => Ok(Box::new(|lhs, rhs| Ok(lhs.lt_eq(rhs)))),
token::T_POW => Ok(Box::new(|lhs, rhs| {
Ok(DfExpr::ScalarFunction(ScalarFunction {
- func_def: ScalarFunctionDefinition::UDF(datafusion_functions::math::power()),
+ func: datafusion_functions::math::power(),
args: vec![lhs, rhs],
}))
})),
token::T_ATAN2 => Ok(Box::new(|lhs, rhs| {
Ok(DfExpr::ScalarFunction(ScalarFunction {
- // func_def: ScalarFunctionDefinition::BuiltIn(BuiltinScalarFunction::Atan2),
- func_def: datafusion_expr::ScalarFunctionDefinition::UDF(
- datafusion_functions::math::atan2(),
- ),
+ func: datafusion_functions::math::atan2(),
args: vec![lhs, rhs],
}))
})),
@@ -2153,7 +2166,7 @@ impl PromPlanner {
})?,
);
let fn_expr = DfExpr::ScalarFunction(ScalarFunction {
- func_def: ScalarFunctionDefinition::UDF(datafusion_functions::datetime::date_part()),
+ func: datafusion_functions::datetime::date_part(),
args: vec![lit_expr, input_expr],
});
Ok(fn_expr)
@@ -2168,13 +2181,13 @@ struct FunctionArgs {
#[derive(Debug, Clone)]
enum ScalarFunc {
- DataFusionBuiltin(BuiltinScalarFunction),
+ DataFusionBuiltin(Arc<ScalarUdfDef>),
/// The UDF that is defined by Datafusion itself.
DataFusionUdf(Arc<ScalarUdfDef>),
- Udf(ScalarUdfDef),
+ Udf(Arc<ScalarUdfDef>),
// todo(ruihang): maybe merge with Udf later
/// UDF that require extra information like range length to be evaluated.
- ExtrapolateUdf(ScalarUdfDef),
+ ExtrapolateUdf(Arc<ScalarUdfDef>),
/// Func that doesn't require input, like `time()`.
GeneratedExpr,
}
@@ -2187,8 +2200,10 @@ mod test {
use catalog::RegisterTableRequest;
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use common_query::test_util::DummyDecoder;
+ use datafusion::execution::runtime_env::RuntimeEnv;
use datatypes::prelude::ConcreteDataType;
use datatypes::schema::{ColumnSchema, Schema};
+ use df_prelude::SessionConfig;
use promql_parser::label::Labels;
use promql_parser::parser;
use session::context::QueryContext;
@@ -2197,6 +2212,10 @@ mod test {
use super::*;
+ fn build_session_state() -> SessionState {
+ SessionState::new_with_config_rt(SessionConfig::new(), Arc::new(RuntimeEnv::default()))
+ }
+
async fn build_test_table_provider(
table_name_tuples: &[(String, String)],
num_tag: usize,
@@ -2295,7 +2314,7 @@ mod test {
1,
)
.await;
- let plan = PromPlanner::stmt_to_plan(table_provider, eval_stmt)
+ let plan = PromPlanner::stmt_to_plan(table_provider, eval_stmt, &build_session_state())
.await
.unwrap();
@@ -2505,9 +2524,10 @@ mod test {
2,
)
.await;
- let plan = PromPlanner::stmt_to_plan(table_provider, eval_stmt.clone())
- .await
- .unwrap();
+ let plan =
+ PromPlanner::stmt_to_plan(table_provider, eval_stmt.clone(), &build_session_state())
+ .await
+ .unwrap();
let expected_no_without = String::from(
"Sort: some_metric.tag_1 ASC NULLS LAST, some_metric.timestamp ASC NULLS LAST [tag_1:Utf8, timestamp:Timestamp(Millisecond, None), TEMPLATE(some_metric.field_0):Float64;N, TEMPLATE(some_metric.field_1):Float64;N]\
\n Aggregate: groupBy=[[some_metric.tag_1, some_metric.timestamp]], aggr=[[TEMPLATE(some_metric.field_0), TEMPLATE(some_metric.field_1)]] [tag_1:Utf8, timestamp:Timestamp(Millisecond, None), TEMPLATE(some_metric.field_0):Float64;N, TEMPLATE(some_metric.field_1):Float64;N]\
@@ -2535,7 +2555,7 @@ mod test {
2,
)
.await;
- let plan = PromPlanner::stmt_to_plan(table_provider, eval_stmt)
+ let plan = PromPlanner::stmt_to_plan(table_provider, eval_stmt, &build_session_state())
.await
.unwrap();
let expected_without = String::from(
@@ -2660,7 +2680,7 @@ mod test {
1,
)
.await;
- let plan = PromPlanner::stmt_to_plan(table_provider, eval_stmt)
+ let plan = PromPlanner::stmt_to_plan(table_provider, eval_stmt, &build_session_state())
.await
.unwrap();
@@ -2710,7 +2730,7 @@ mod test {
1,
)
.await;
- let plan = PromPlanner::stmt_to_plan(table_provider, eval_stmt)
+ let plan = PromPlanner::stmt_to_plan(table_provider, eval_stmt, &build_session_state())
.await
.unwrap();
@@ -2954,9 +2974,13 @@ mod test {
3,
)
.await;
- let plan = PromPlanner::stmt_to_plan(table_provider, eval_stmt.clone())
- .await
- .unwrap();
+ let plan = PromPlanner::stmt_to_plan(
+ table_provider,
+ eval_stmt.clone(),
+ &build_session_state(),
+ )
+ .await
+ .unwrap();
let mut fields = plan.schema().field_names();
let mut expected = case.1.into_iter().map(String::from).collect::<Vec<_>>();
fields.sort();
@@ -2978,7 +3002,12 @@ mod test {
3,
)
.await;
- let plan = PromPlanner::stmt_to_plan(table_provider, eval_stmt.clone()).await;
+ let plan = PromPlanner::stmt_to_plan(
+ table_provider,
+ eval_stmt.clone(),
+ &build_session_state(),
+ )
+ .await;
assert!(plan.is_err(), "case: {:?}", case);
}
}
@@ -3030,7 +3059,8 @@ mod test {
)
.await;
- let plan = PromPlanner::stmt_to_plan(table_provider, eval_stmt).await;
+ let plan =
+ PromPlanner::stmt_to_plan(table_provider, eval_stmt, &build_session_state()).await;
assert!(plan.is_err(), "query: {:?}", query);
}
}
@@ -3096,6 +3126,7 @@ mod test {
interval: Duration::from_secs(5),
lookback_delta: Duration::from_secs(1),
},
+ &build_session_state(),
)
.await
.unwrap();
@@ -3124,6 +3155,7 @@ mod test {
interval: Duration::from_secs(5),
lookback_delta: Duration::from_secs(1),
},
+ &build_session_state(),
)
.await
.unwrap();
diff --git a/src/query/src/range_select/plan.rs b/src/query/src/range_select/plan.rs
index 5ba9b2248830..7bed19406d6e 100644
--- a/src/query/src/range_select/plan.rs
+++ b/src/query/src/range_select/plan.rs
@@ -496,8 +496,9 @@ impl RangeSelect {
DFSchema::new_with_metadata(by_fields, input.schema().metadata().clone())
.context(DataFusionSnafu)?,
);
- // If the results of project plan can be obtained directly from range plan without any additional calculations, no project plan is required.
- // We can simply project the final output of the range plan to produce the final result.
+ // If the results of project plan can be obtained directly from range plan without any additional
+ // calculations, no project plan is required. We can simply project the final output of the range
+ // plan to produce the final result.
let schema_project = projection_expr
.iter()
.map(|project_expr| {
@@ -506,7 +507,12 @@ impl RangeSelect {
.index_of_column_by_name(column.relation.as_ref(), &column.name)
.ok_or(())
} else {
- Err(())
+ let (qualifier, field) = project_expr
+ .to_field(input.schema().as_ref())
+ .map_err(|_| ())?;
+ schema_before_project
+ .index_of_column_by_name(qualifier.as_ref(), field.name())
+ .ok_or(())
}
})
.collect::<std::result::Result<Vec<usize>, ()>>()
@@ -584,9 +590,22 @@ impl UserDefinedLogicalNodeCore for RangeSelect {
)
}
- fn from_template(&self, exprs: &[Expr], inputs: &[LogicalPlan]) -> Self {
- assert!(!inputs.is_empty());
- assert!(exprs.len() == self.range_expr.len() + self.by.len() + 1);
+ fn with_exprs_and_inputs(
+ &self,
+ exprs: Vec<Expr>,
+ inputs: Vec<LogicalPlan>,
+ ) -> DataFusionResult<Self> {
+ if inputs.is_empty() {
+ return Err(DataFusionError::Plan(
+ "RangeSelect: inputs is empty".to_string(),
+ ));
+ }
+ if exprs.len() != self.range_expr.len() + self.by.len() + 1 {
+ return Err(DataFusionError::Plan(
+ "RangeSelect: exprs length not match".to_string(),
+ ));
+ }
+
let range_expr = exprs
.iter()
.zip(self.range_expr.iter())
@@ -601,7 +620,7 @@ impl UserDefinedLogicalNodeCore for RangeSelect {
.collect();
let time_expr = exprs[self.range_expr.len()].clone();
let by = exprs[self.range_expr.len() + 1..].to_vec();
- Self {
+ Ok(Self {
align: self.align,
align_to: self.align_to,
range_expr,
@@ -613,7 +632,7 @@ impl UserDefinedLogicalNodeCore for RangeSelect {
by_schema: self.by_schema.clone(),
schema_project: self.schema_project.clone(),
schema_before_project: self.schema_before_project.clone(),
- }
+ })
}
}
@@ -674,24 +693,11 @@ impl RangeSelect {
};
let expr = match &range_expr {
- Expr::AggregateFunction(
- aggr @ datafusion_expr::expr::AggregateFunction {
- func_def:
- AggregateFunctionDefinition::BuiltIn(AggregateFunction::FirstValue),
- ..
- },
- )
- | Expr::AggregateFunction(
- aggr @ datafusion_expr::expr::AggregateFunction {
- func_def:
- AggregateFunctionDefinition::BuiltIn(AggregateFunction::LastValue),
- ..
- },
- ) => {
- let is_last_value_func = matches!(
- aggr.func_def,
- AggregateFunctionDefinition::BuiltIn(AggregateFunction::LastValue)
- );
+ Expr::AggregateFunction(aggr)
+ if (aggr.func_def.name() == "last_value"
+ || aggr.func_def.name() == "first_value") =>
+ {
+ let is_last_value_func = aggr.func_def.name() == "last_value";
// Because we only need to find the first_value/last_value,
// the complexity of sorting the entire batch is O(nlogn).
@@ -795,10 +801,8 @@ impl RangeSelect {
&input_schema,
name,
false,
+ false,
),
- f => Err(DataFusionError::NotImplemented(format!(
- "Range function from {f:?}"
- ))),
}
}
_ => Err(DataFusionError::Plan(format!(
@@ -930,8 +934,8 @@ impl ExecutionPlan for RangeSelectExec {
&self.cache
}
- fn children(&self) -> Vec<Arc<dyn ExecutionPlan>> {
- vec![self.input.clone()]
+ fn children(&self) -> Vec<&Arc<dyn ExecutionPlan>> {
+ vec![&self.input]
}
fn with_new_children(
diff --git a/src/query/src/range_select/plan_rewrite.rs b/src/query/src/range_select/plan_rewrite.rs
index 087fa2c9010a..785e9ae2b43f 100644
--- a/src/query/src/range_select/plan_rewrite.rs
+++ b/src/query/src/range_select/plan_rewrite.rs
@@ -510,7 +510,7 @@ impl RangePlanRewriter {
fn have_range_in_exprs(exprs: &[Expr]) -> bool {
exprs.iter().any(|expr| {
let mut find_range = false;
- let _ = expr.apply(&mut |expr| {
+ let _ = expr.apply(|expr| {
Ok(match expr {
Expr::ScalarFunction(func) if func.name() == "range_fn" => {
find_range = true;
@@ -525,7 +525,7 @@ fn have_range_in_exprs(exprs: &[Expr]) -> bool {
fn interval_only_in_expr(expr: &Expr) -> bool {
let mut all_interval = true;
- let _ = expr.apply(&mut |expr| {
+ let _ = expr.apply(|expr| {
if !matches!(
expr,
Expr::Literal(ScalarValue::IntervalDayTime(_))
@@ -651,8 +651,8 @@ mod test {
let query =
r#"SELECT (covar(field_0 + field_1, field_1)/4) RANGE '5m' FROM test ALIGN '1h';"#;
let expected = String::from(
- "Projection: COVAR(test.field_0 + test.field_1,test.field_1) RANGE 5m / Int64(4) [COVAR(test.field_0 + test.field_1,test.field_1) RANGE 5m / Int64(4):Float64;N]\
- \n RangeSelect: range_exprs=[COVAR(test.field_0 + test.field_1,test.field_1) RANGE 5m], align=3600000ms, align_to=0ms, align_by=[test.tag_0, test.tag_1, test.tag_2, test.tag_3, test.tag_4], time_index=timestamp [COVAR(test.field_0 + test.field_1,test.field_1) RANGE 5m:Float64;N, timestamp:Timestamp(Millisecond, None), tag_0:Utf8, tag_1:Utf8, tag_2:Utf8, tag_3:Utf8, tag_4:Utf8]\
+ "Projection: covar_samp(test.field_0 + test.field_1,test.field_1) RANGE 5m / Int64(4) [covar_samp(test.field_0 + test.field_1,test.field_1) RANGE 5m / Int64(4):Float64;N]\
+ \n RangeSelect: range_exprs=[covar_samp(test.field_0 + test.field_1,test.field_1) RANGE 5m], align=3600000ms, align_to=0ms, align_by=[test.tag_0, test.tag_1, test.tag_2, test.tag_3, test.tag_4], time_index=timestamp [covar_samp(test.field_0 + test.field_1,test.field_1) RANGE 5m:Float64;N, timestamp:Timestamp(Millisecond, None), tag_0:Utf8, tag_1:Utf8, tag_2:Utf8, tag_3:Utf8, tag_4:Utf8]\
\n TableScan: test [tag_0:Utf8, tag_1:Utf8, tag_2:Utf8, tag_3:Utf8, tag_4:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N, field_1:Float64;N, field_2:Float64;N, field_3:Float64;N, field_4:Float64;N]"
);
query_plan_compare(query, expected).await;
diff --git a/src/query/src/sql/show_create_table.rs b/src/query/src/sql/show_create_table.rs
index 8002ef0d8e1b..74150465f1bc 100644
--- a/src/query/src/sql/show_create_table.rs
+++ b/src/query/src/sql/show_create_table.rs
@@ -24,6 +24,7 @@ use sql::dialect::GreptimeDbDialect;
use sql::parser::ParserContext;
use sql::statements::create::{CreateTable, TIME_INDEX};
use sql::statements::{self, OptionMap};
+use sqlparser::ast::KeyOrIndexDisplay;
use store_api::metric_engine_consts::{is_metric_engine, is_metric_engine_internal_column};
use table::metadata::{TableInfoRef, TableMeta};
use table::requests::{FILE_TABLE_META_KEY, TTL_KEY, WRITE_BUFFER_SIZE_KEY};
@@ -108,8 +109,11 @@ fn create_table_constraints(
constraints.push(TableConstraint::Unique {
name: Some(TIME_INDEX.into()),
columns: vec![Ident::with_quote(quote_style, column_name)],
- is_primary: false,
characteristics: None,
+ index_name: None,
+ index_type_display: KeyOrIndexDisplay::None,
+ index_type: None,
+ index_options: vec![],
});
}
if !table_meta.primary_key_indices.is_empty() {
@@ -124,11 +128,13 @@ fn create_table_constraints(
}
})
.collect();
- constraints.push(TableConstraint::Unique {
+ constraints.push(TableConstraint::PrimaryKey {
name: None,
columns,
- is_primary: true,
characteristics: None,
+ index_name: None,
+ index_type: None,
+ index_options: vec![],
});
}
diff --git a/src/script/src/python/ffi_types/pair_tests/sample_testcases.rs b/src/script/src/python/ffi_types/pair_tests/sample_testcases.rs
index f83aa2bf1868..3088faa4f486 100644
--- a/src/script/src/python/ffi_types/pair_tests/sample_testcases.rs
+++ b/src/script/src/python/ffi_types/pair_tests/sample_testcases.rs
@@ -1054,15 +1054,6 @@ ret"#
.to_string(),
expect: vector!(Float64Vector, [0.0, consts::LOG10_2, 0.47712125471966244,]),
},
- CodeBlockTestCase {
- input: ronish! {},
- script: r#"
-from greptime import *
-ret = 0.0<=random(3)<=1.0
-ret"#
- .to_string(),
- expect: vector!(BooleanVector, &[true, true, true]),
- },
CodeBlockTestCase {
input: ronish! {
"values": vector!(Int64Vector, [1, 2, 2, 3])
@@ -1130,30 +1121,6 @@ ret"#
.to_string(),
expect: vector!(Int64Vector, [10]),
},
- CodeBlockTestCase {
- input: ronish! {
- "a": vector!(Float64Vector, [1.0, 2.0, 3.0]),
- "b": vector!(Float64Vector, [1.0, 0.0, -1.0])
- },
- script: r#"
-from greptime import *
-ret = vector([covariance(a, b)])
-ret"#
- .to_string(),
- expect: vector!(Float64Vector, [-1.0]),
- },
- CodeBlockTestCase {
- input: ronish! {
- "a": vector!(Float64Vector, [1.0, 2.0, 3.0]),
- "b": vector!(Float64Vector, [1.0, 0.0, -1.0])
- },
- script: r#"
-from greptime import *
-ret = vector([covariance_pop(a, b)])
-ret"#
- .to_string(),
- expect: vector!(Float64Vector, [-0.6666666666666666]),
- },
CodeBlockTestCase {
input: ronish! {
"a": vector!(Float64Vector, [1.0, 2.0, 3.0]),
diff --git a/src/script/src/python/pyo3/builtins.rs b/src/script/src/python/pyo3/builtins.rs
index bc8f23107aa0..129675b76189 100644
--- a/src/script/src/python/pyo3/builtins.rs
+++ b/src/script/src/python/pyo3/builtins.rs
@@ -100,14 +100,14 @@ pub(crate) fn greptime_builtins(_py: Python<'_>, m: &PyModule) -> PyResult<()> {
log10,
random,
approx_distinct,
- median,
+ // median,
approx_percentile_cont,
array_agg,
avg,
correlation,
count,
- covariance,
- covariance_pop,
+ // covariance,
+ // covariance_pop,
max,
min,
stddev,
@@ -351,7 +351,7 @@ fn approx_distinct(py: Python<'_>, v0: &PyVector) -> PyResult<PyObject> {
*/
bind_aggr_expr!(approx_distinct, ApproxDistinct,[v0], v0, expr0=>0);
-bind_aggr_expr!(median, Median,[v0], v0, expr0=>0);
+// bind_aggr_expr!(median, Median,[v0], v0, expr0=>0);
#[pyfunction]
fn approx_percentile_cont(py: Python<'_>, values: &PyVector, percent: f64) -> PyResult<PyObject> {
@@ -391,9 +391,9 @@ bind_aggr_expr!(correlation, Correlation,[v0, v1], v0, expr0=>0, expr1=>1);
bind_aggr_expr!(count, Count,[v0], v0, expr0=>0);
-bind_aggr_expr!(covariance, Covariance,[v0, v1], v0, expr0=>0, expr1=>1);
+// bind_aggr_expr!(covariance, Covariance,[v0, v1], v0, expr0=>0, expr1=>1);
-bind_aggr_expr!(covariance_pop, CovariancePop,[v0, v1], v0, expr0=>0, expr1=>1);
+// bind_aggr_expr!(covariance_pop, CovariancePop,[v0, v1], v0, expr0=>0, expr1=>1);
bind_aggr_expr!(max, Max,[v0], v0, expr0=>0);
diff --git a/src/script/src/python/rspython/builtins.rs b/src/script/src/python/rspython/builtins.rs
index e08337f76bfe..db7d52b7ac16 100644
--- a/src/script/src/python/rspython/builtins.rs
+++ b/src/script/src/python/rspython/builtins.rs
@@ -619,17 +619,6 @@ pub(crate) mod greptime_builtin {
);
}
- #[pyfunction]
- fn median(values: PyVectorRef, vm: &VirtualMachine) -> PyResult<PyObjectRef> {
- bind_aggr_fn!(
- Median,
- vm,
- &[values.to_arrow_array()],
- values.arrow_data_type(),
- expr0
- );
- }
-
/// Not implement in datafusion
/// TODO(discord9): use greptime's own impl instead
/*
@@ -722,38 +711,6 @@ pub(crate) mod greptime_builtin {
);
}
- #[pyfunction]
- fn covariance(
- arg0: PyVectorRef,
- arg1: PyVectorRef,
- vm: &VirtualMachine,
- ) -> PyResult<PyObjectRef> {
- bind_aggr_fn!(
- Covariance,
- vm,
- &[arg0.to_arrow_array(), arg1.to_arrow_array()],
- arg0.arrow_data_type(),
- expr0,
- expr1
- );
- }
-
- #[pyfunction]
- fn covariance_pop(
- arg0: PyVectorRef,
- arg1: PyVectorRef,
- vm: &VirtualMachine,
- ) -> PyResult<PyObjectRef> {
- bind_aggr_fn!(
- CovariancePop,
- vm,
- &[arg0.to_arrow_array(), arg1.to_arrow_array()],
- arg0.arrow_data_type(),
- expr0,
- expr1
- );
- }
-
#[pyfunction]
fn max(values: PyVectorRef, vm: &VirtualMachine) -> PyResult<PyObjectRef> {
bind_aggr_fn!(
diff --git a/src/script/src/python/rspython/builtins/testcases.ron b/src/script/src/python/rspython/builtins/testcases.ron
index b5882048e8ec..0b053538b4fa 100644
--- a/src/script/src/python/rspython/builtins/testcases.ron
+++ b/src/script/src/python/rspython/builtins/testcases.ron
@@ -343,17 +343,6 @@ log10(values)"#,
ty: Float64
))
),
- TestCase(
- input: {},
- script: r#"
-from greptime import *
-random(42)"#,
- expect: Ok((
- value: LenFloatVec(42),
- ty: Float64
- ))
- ),
-
// UDAF(Aggerate function)
// approx function is indeterministic
TestCase(
@@ -468,44 +457,6 @@ count(values)"#,
ty: Int64
))
),
- TestCase(
- input: {
- "a": Var(
- ty: Float64,
- value: FloatVec([1.0, 2.0, 3.0])
- ),
- "b": Var(
- ty: Float64,
- value: FloatVec([1.0, 0.0, -1.0])
- ),
- },
- script: r#"
-from greptime import *
-covariance(a, b)"#,
- expect: Ok((
- value: Float(-1.0),
- ty: Float64
- ))
- ),
- TestCase(
- input: {
- "a": Var(
- ty: Float64,
- value: FloatVec([1.0, 2.0, 3.0])
- ),
- "b": Var(
- ty: Float64,
- value: FloatVec([1.0, 0.0, -1.0])
- ),
- },
- script: r#"
-from greptime import *
-covariance_pop(a, b)"#,
- expect: Ok((
- value: Float(-0.6666666666666666),
- ty: Float64
- ))
- ),
TestCase(
input: {
"values": Var(
@@ -955,21 +906,6 @@ clip(values, lower, upper)"#,
},
script: r#"
from greptime import *
-median(values)"#,
- expect: Ok((
- ty: Float64,
- value: Float(1.25)
- ))
- ),
- TestCase(
- input: {
- "values": Var(
- ty: Float64,
- value: FloatVec([-1.0, 2.0, 2.0, 0.5])
- )
- },
- script: r#"
-from greptime import *
diff(values)"#,
expect: Ok((
ty: Float64,
diff --git a/src/servers/src/http/handler.rs b/src/servers/src/http/handler.rs
index fa8fe98e4cf1..79f60639d272 100644
--- a/src/servers/src/http/handler.rs
+++ b/src/servers/src/http/handler.rs
@@ -168,7 +168,7 @@ pub async fn from_output(
let mut result_map = HashMap::new();
let mut tmp = vec![&mut merge_map, &mut result_map];
- collect_plan_metrics(physical_plan, &mut tmp);
+ collect_plan_metrics(&physical_plan, &mut tmp);
let re = result_map
.into_iter()
.map(|(k, v)| (k, Value::from(v)))
diff --git a/src/servers/src/http/header.rs b/src/servers/src/http/header.rs
index b9beb54887ea..f9ff1485efd6 100644
--- a/src/servers/src/http/header.rs
+++ b/src/servers/src/http/header.rs
@@ -126,7 +126,7 @@ fn collect_into_maps(name: &str, value: u64, maps: &mut [&mut HashMap<String, u6
}
}
-pub fn collect_plan_metrics(plan: Arc<dyn ExecutionPlan>, maps: &mut [&mut HashMap<String, u64>]) {
+pub fn collect_plan_metrics(plan: &Arc<dyn ExecutionPlan>, maps: &mut [&mut HashMap<String, u64>]) {
if let Some(m) = plan.metrics() {
m.iter().for_each(|m| match m.value() {
MetricValue::Count { name, count } => {
diff --git a/src/servers/src/http/prometheus.rs b/src/servers/src/http/prometheus.rs
index 210e180a28b5..cb1588704700 100644
--- a/src/servers/src/http/prometheus.rs
+++ b/src/servers/src/http/prometheus.rs
@@ -460,7 +460,7 @@ async fn retrieve_series_from_query_result(
}?;
if let Some(ref plan) = result.meta.plan {
- collect_plan_metrics(plan.clone(), &mut [metrics]);
+ collect_plan_metrics(plan, &mut [metrics]);
}
Ok(())
}
@@ -486,7 +486,7 @@ async fn retrieve_labels_name_from_query_result(
.fail(),
}?;
if let Some(ref plan) = result.meta.plan {
- collect_plan_metrics(plan.clone(), &mut [metrics]);
+ collect_plan_metrics(plan, &mut [metrics]);
}
Ok(())
}
@@ -830,7 +830,7 @@ async fn retrieve_label_values(
}?;
if let Some(ref plan) = result.meta.plan {
- collect_plan_metrics(plan.clone(), &mut [metrics]);
+ collect_plan_metrics(plan, &mut [metrics]);
}
Ok(())
diff --git a/src/servers/src/http/prometheus_resp.rs b/src/servers/src/http/prometheus_resp.rs
index c4da273df5a3..a5a8bdba91da 100644
--- a/src/servers/src/http/prometheus_resp.rs
+++ b/src/servers/src/http/prometheus_resp.rs
@@ -132,7 +132,7 @@ impl PrometheusJsonResponse {
if let Some(physical_plan) = result.meta.plan {
let mut result_map = HashMap::new();
let mut tmp = vec![&mut result_map];
- collect_plan_metrics(physical_plan, &mut tmp);
+ collect_plan_metrics(&physical_plan, &mut tmp);
let re = result_map
.into_iter()
diff --git a/src/servers/src/prom_store.rs b/src/servers/src/prom_store.rs
index fc08b921a0a3..d548a843b59f 100644
--- a/src/servers/src/prom_store.rs
+++ b/src/servers/src/prom_store.rs
@@ -107,11 +107,11 @@ pub fn query_to_plan(dataframe: DataFrame, q: &Query) -> Result<LogicalPlan> {
}
// Case sensitive regexp match
MatcherType::Re => {
- conditions.push(regexp_match(col(name), lit(value)).is_not_null());
+ conditions.push(regexp_match(col(name), lit(value), None).is_not_null());
}
// Case sensitive regexp not match
MatcherType::Nre => {
- conditions.push(regexp_match(col(name), lit(value)).is_null());
+ conditions.push(regexp_match(col(name), lit(value), None).is_null());
}
}
}
diff --git a/src/servers/tests/py_script/mod.rs b/src/servers/tests/py_script/mod.rs
index e5682220e5c5..fe1ac75c8d6e 100644
--- a/src/servers/tests/py_script/mod.rs
+++ b/src/servers/tests/py_script/mod.rs
@@ -28,6 +28,7 @@ use table::test_util::MemTable;
use crate::create_testing_instance;
+#[ignore = "rust-python backend is not active support at present"]
#[tokio::test]
async fn test_insert_py_udf_and_query() -> Result<()> {
let catalog = "greptime";
diff --git a/src/sql/src/parsers/create_parser.rs b/src/sql/src/parsers/create_parser.rs
index 8dc3f0c662c0..6cc2ff97ce53 100644
--- a/src/sql/src/parsers/create_parser.rs
+++ b/src/sql/src/parsers/create_parser.rs
@@ -19,7 +19,7 @@ use datafusion_common::ScalarValue;
use datatypes::arrow::datatypes::{DataType as ArrowDataType, IntervalUnit};
use itertools::Itertools;
use snafu::{ensure, OptionExt, ResultExt};
-use sqlparser::ast::{ColumnOption, ColumnOptionDef, DataType, Expr};
+use sqlparser::ast::{ColumnOption, ColumnOptionDef, DataType, Expr, KeyOrIndexDisplay};
use sqlparser::dialect::keywords::Keyword;
use sqlparser::keywords::ALL_KEYWORDS;
use sqlparser::parser::IsOptional::Mandatory;
@@ -483,8 +483,11 @@ impl<'a> ParserContext<'a> {
value: column.name.value.clone(),
quote_style: None,
}],
- is_primary: false,
characteristics: None,
+ index_name: None,
+ index_type_display: KeyOrIndexDisplay::None,
+ index_type: None,
+ index_options: vec![],
};
constraints.push(constraint);
}
@@ -653,10 +656,12 @@ impl<'a> ParserContext<'a> {
.into_iter()
.map(Self::canonicalize_identifier)
.collect();
- Ok(Some(TableConstraint::Unique {
+ Ok(Some(TableConstraint::PrimaryKey {
name,
+ index_name: None,
+ index_type: None,
columns,
- is_primary: true,
+ index_options: vec![],
characteristics: None,
}))
}
@@ -696,8 +701,11 @@ impl<'a> ParserContext<'a> {
quote_style: None,
}),
columns,
- is_primary: false,
characteristics: None,
+ index_name: None,
+ index_type_display: KeyOrIndexDisplay::None,
+ index_type: None,
+ index_options: vec![],
}))
}
unexpected => {
@@ -741,7 +749,6 @@ fn validate_time_index(columns: &[ColumnDef], constraints: &[TableConstraint]) -
if let TableConstraint::Unique {
name: Some(ident),
columns,
- is_primary: false,
..
} = c
{
@@ -1035,20 +1042,11 @@ mod tests {
assert_column_def(&columns[3], "memory", "FLOAT64");
let constraints = &c.constraints;
- assert_matches!(
- &constraints[0],
- TableConstraint::Unique {
- is_primary: false,
- ..
- }
- );
- assert_matches!(
- &constraints[1],
- TableConstraint::Unique {
- is_primary: true,
+ assert!(matches!(&constraints[0], TableConstraint::Unique {
+ name: Some(name),
..
- }
- );
+ } if name.value == TIME_INDEX));
+ assert_matches!(&constraints[1], TableConstraint::PrimaryKey { .. });
}
_ => unreachable!(),
}
@@ -1354,16 +1352,10 @@ ENGINE=mito";
assert_eq!(c.constraints.len(), 2);
let tc = c.constraints[0].clone();
match tc {
- TableConstraint::Unique {
- name,
- columns,
- is_primary,
- ..
- } => {
+ TableConstraint::Unique { name, columns, .. } => {
assert_eq!(name.unwrap().to_string(), "__time_index");
assert_eq!(columns.len(), 1);
assert_eq!(&columns[0].value, "ts");
- assert!(!is_primary);
}
_ => panic!("should be time index constraint"),
};
@@ -1561,16 +1553,10 @@ ENGINE=mito";
if let Statement::CreateTable(c) = &result[0] {
let tc = c.constraints[0].clone();
match tc {
- TableConstraint::Unique {
- name,
- columns,
- is_primary,
- ..
- } => {
+ TableConstraint::Unique { name, columns, .. } => {
assert_eq!(name.unwrap().to_string(), "__time_index");
assert_eq!(columns.len(), 1);
assert_eq!(&columns[0].value, "ts");
- assert!(!is_primary);
}
_ => panic!("should be time index constraint"),
}
@@ -1677,20 +1663,11 @@ ENGINE=mito";
assert_column_def(&columns[3], "memory", "FLOAT64");
let constraints = &c.constraints;
- assert_matches!(
- &constraints[0],
- TableConstraint::Unique {
- is_primary: false,
+ assert!(matches!(&constraints[0], TableConstraint::Unique {
+ name: Some(name),
..
- }
- );
- assert_matches!(
- &constraints[1],
- TableConstraint::Unique {
- is_primary: true,
- ..
- }
- );
+ } if name.value == TIME_INDEX));
+ assert_matches!(&constraints[1], TableConstraint::PrimaryKey { .. });
assert_eq!(1, c.options.len());
assert_eq!(
[("ttl", "10s")].into_iter().collect::<HashMap<_, _>>(),
diff --git a/src/sql/src/parsers/utils.rs b/src/sql/src/parsers/utils.rs
index 97b317dd369a..7332d3e0e8d5 100644
--- a/src/sql/src/parsers/utils.rs
+++ b/src/sql/src/parsers/utils.rs
@@ -98,15 +98,15 @@ impl ContextProvider for StubContextProvider {
unimplemented!()
}
- fn udfs_names(&self) -> Vec<String> {
+ fn udf_names(&self) -> Vec<String> {
self.state.scalar_functions().keys().cloned().collect()
}
- fn udafs_names(&self) -> Vec<String> {
+ fn udaf_names(&self) -> Vec<String> {
self.state.aggregate_functions().keys().cloned().collect()
}
- fn udwfs_names(&self) -> Vec<String> {
+ fn udwf_names(&self) -> Vec<String> {
self.state.window_functions().keys().cloned().collect()
}
}
diff --git a/src/sql/src/statements/create.rs b/src/sql/src/statements/create.rs
index 4259d61cc501..9bcf65c67578 100644
--- a/src/sql/src/statements/create.rs
+++ b/src/sql/src/statements/create.rs
@@ -72,7 +72,6 @@ pub const TIME_INDEX: &str = "__time_index";
pub fn is_time_index(constraint: &TableConstraint) -> bool {
matches!(constraint, TableConstraint::Unique {
name: Some(name),
- is_primary: false,
..
} if name.value == TIME_INDEX)
}
diff --git a/src/table/src/table/scan.rs b/src/table/src/table/scan.rs
index c612ff3746cb..a68e0221ce69 100644
--- a/src/table/src/table/scan.rs
+++ b/src/table/src/table/scan.rs
@@ -84,7 +84,7 @@ impl ExecutionPlan for RegionScanExec {
&self.properties
}
- fn children(&self) -> Vec<Arc<dyn ExecutionPlan>> {
+ fn children(&self) -> Vec<&Arc<dyn ExecutionPlan>> {
vec![]
}
diff --git a/tests/cases/distributed/explain/join_10_tables.result b/tests/cases/distributed/explain/join_10_tables.result
index f44fc41fa79a..172d43aa1d79 100644
--- a/tests/cases/distributed/explain/join_10_tables.result
+++ b/tests/cases/distributed/explain/join_10_tables.result
@@ -75,7 +75,6 @@ limit 1;
+-+-+
| logical_plan_| Limit: skip=0, fetch=1_|
|_|_Sort: t_1.ts DESC NULLS FIRST, fetch=1_|
-|_|_Projection: t_1.ts, t_1.vin, t_1.val, t_2.ts, t_2.vin, t_2.val, t_3.ts, t_3.vin, t_3.val, t_4.ts, t_4.vin, t_4.val, t_5.ts, t_5.vin, t_5.val, t_6.ts, t_6.vin, t_6.val, t_7.ts, t_7.vin, t_7.val, t_8.ts, t_8.vin, t_8.val, t_9.ts, t_9.vin, t_9.val, t_10.ts, t_10.vin, t_10.val |
|_|_Inner Join: t_9.ts = t_10.ts, t_9.vin = t_10.vin_|
|_|_Inner Join: t_8.ts = t_9.ts, t_8.vin = t_9.vin_|
|_|_Inner Join: t_7.ts = t_8.ts, t_7.vin = t_8.vin_|
@@ -99,7 +98,7 @@ limit 1;
|_|_MergeScan [is_placeholder=false]_|
| physical_plan | GlobalLimitExec: skip=0, fetch=1_|
|_|_SortPreservingMergeExec: [ts@0 DESC], fetch=1_|
-|_|_SortExec: TopK(fetch=1), expr=[ts@0 DESC]_|
+|_|_SortExec: TopK(fetch=1), expr=[ts@0 DESC], preserve_partitioning=[true]_|
|_|_CoalesceBatchesExec: target_batch_size=8192_|
|_|_REDACTED
|_|_CoalesceBatchesExec: target_batch_size=8192_|
diff --git a/tests/cases/distributed/explain/multi_partitions.result b/tests/cases/distributed/explain/multi_partitions.result
index 1053ede24beb..660a4eee3045 100644
--- a/tests/cases/distributed/explain/multi_partitions.result
+++ b/tests/cases/distributed/explain/multi_partitions.result
@@ -27,7 +27,7 @@ explain SELECT * FROM demo WHERE ts > cast(1000000000 as timestamp) ORDER BY hos
+-+-+
| logical_plan_| Sort: demo.host ASC NULLS LAST_|
|_|_MergeScan [is_placeholder=false]_|
-| physical_plan | SortExec: expr=[host@0 ASC NULLS LAST]_|
+| physical_plan | SortExec: expr=[host@0 ASC NULLS LAST], preserve_partitioning=[false]_|
|_|_MergeScanExec: REDACTED
|_|_|
+-+-+
diff --git a/tests/cases/distributed/explain/order_by.result b/tests/cases/distributed/explain/order_by.result
index 4d08b3e86195..221ac05fcd79 100644
--- a/tests/cases/distributed/explain/order_by.result
+++ b/tests/cases/distributed/explain/order_by.result
@@ -16,7 +16,7 @@ EXPLAIN SELECT DISTINCT i%2 FROM integers ORDER BY 1;
|_|_Aggregate: groupBy=[[integers.i % Int64(2)]], aggr=[[]]_|
|_|_MergeScan [is_placeholder=false]_|
| physical_plan | SortPreservingMergeExec: [integers.i % Int64(2)@0 ASC NULLS LAST]_|
-|_|_SortExec: expr=[integers.i % Int64(2)@0 ASC NULLS LAST]_|
+|_|_SortExec: expr=[integers.i % Int64(2)@0 ASC NULLS LAST], preserve_partitioning=[true]_|
|_|_AggregateExec: mode=FinalPartitioned, gby=[integers.i % Int64(2)@0 as integers.i % Int64(2)], aggr=[] |
|_|_CoalesceBatchesExec: target_batch_size=8192_|
|_|_RepartitionExec: partitioning=REDACTED
@@ -63,7 +63,7 @@ EXPLAIN SELECT DISTINCT a, b FROM test ORDER BY a, b;
|_|_Aggregate: groupBy=[[test.a, test.b]], aggr=[[]]_|
|_|_MergeScan [is_placeholder=false]_|
| physical_plan | SortPreservingMergeExec: [a@0 ASC NULLS LAST,b@1 ASC NULLS LAST]_|
-|_|_SortExec: expr=[a@0 ASC NULLS LAST,b@1 ASC NULLS LAST]_|
+|_|_SortExec: expr=[a@0 ASC NULLS LAST,b@1 ASC NULLS LAST], preserve_partitioning=[true] |
|_|_AggregateExec: mode=FinalPartitioned, gby=[a@0 as a, b@1 as b], aggr=[]_|
|_|_CoalesceBatchesExec: target_batch_size=8192_|
|_|_RepartitionExec: partitioning=REDACTED
diff --git a/tests/cases/distributed/explain/subqueries.result b/tests/cases/distributed/explain/subqueries.result
index ad31923c97fe..f0e41496a44c 100644
--- a/tests/cases/distributed/explain/subqueries.result
+++ b/tests/cases/distributed/explain/subqueries.result
@@ -13,14 +13,12 @@ EXPLAIN SELECT * FROM integers WHERE i IN ((SELECT i FROM integers)) ORDER BY i;
| plan_type_| plan_|
+-+-+
| logical_plan_| Sort: integers.i ASC NULLS LAST_|
-|_|_Projection: integers.i, integers.j_|
|_|_LeftSemi Join: integers.i = __correlated_sq_1.i_|
|_|_MergeScan [is_placeholder=false]_|
|_|_SubqueryAlias: __correlated_sq_1_|
-|_|_Projection: integers.i_|
|_|_MergeScan [is_placeholder=false]_|
| physical_plan | SortPreservingMergeExec: [i@0 ASC NULLS LAST]_|
-|_|_SortExec: expr=[i@0 ASC NULLS LAST]_|
+|_|_SortExec: expr=[i@0 ASC NULLS LAST], preserve_partitioning=[true]_|
|_|_CoalesceBatchesExec: target_batch_size=8192_|
|_|_REDACTED
|_|_CoalesceBatchesExec: target_batch_size=8192_|
@@ -43,7 +41,6 @@ EXPLAIN SELECT * FROM integers i1 WHERE EXISTS(SELECT i FROM integers WHERE i=i1
| plan_type_| plan_|
+-+-+
| logical_plan_| Sort: i1.i ASC NULLS LAST_|
-|_|_Projection: i1.i, i1.j_|
|_|_LeftSemi Join: i1.i = __correlated_sq_1.i_|
|_|_SubqueryAlias: i1_|
|_|_MergeScan [is_placeholder=false]_|
@@ -51,7 +48,7 @@ EXPLAIN SELECT * FROM integers i1 WHERE EXISTS(SELECT i FROM integers WHERE i=i1
|_|_Projection: integers.i_|
|_|_MergeScan [is_placeholder=false]_|
| physical_plan | SortPreservingMergeExec: [i@0 ASC NULLS LAST]_|
-|_|_SortExec: expr=[i@0 ASC NULLS LAST]_|
+|_|_SortExec: expr=[i@0 ASC NULLS LAST], preserve_partitioning=[true]_|
|_|_CoalesceBatchesExec: target_batch_size=8192_|
|_|_REDACTED
|_|_CoalesceBatchesExec: target_batch_size=8192_|
@@ -92,7 +89,7 @@ order by t.i desc;
|_|_Projection:_|
|_|_MergeScan [is_placeholder=false]_|
| physical_plan | SortPreservingMergeExec: [i@0 DESC]_|
-|_|_SortExec: expr=[i@0 DESC]_|
+|_|_SortExec: expr=[i@0 DESC], preserve_partitioning=[true]_|
|_|_CrossJoinExec_|
|_|_CoalescePartitionsExec_|
|_|_CoalesceBatchesExec: target_batch_size=8192_|
@@ -117,8 +114,7 @@ EXPLAIN INSERT INTO other SELECT i, 2 FROM integers WHERE i=(SELECT MAX(i) FROM
| | Projection: integers.i |
| | MergeScan [is_placeholder=false] |
| | SubqueryAlias: __scalar_sq_1 |
-| | Projection: MAX(integers.i) |
-| | MergeScan [is_placeholder=false] |
+| | MergeScan [is_placeholder=false] |
+--------------+-------------------------------------------------------------------+
drop table other;
diff --git a/tests/cases/distributed/optimizer/order_by.result b/tests/cases/distributed/optimizer/order_by.result
index 5c03ac107c16..c5c8b09adf00 100644
--- a/tests/cases/distributed/optimizer/order_by.result
+++ b/tests/cases/distributed/optimizer/order_by.result
@@ -12,50 +12,50 @@ explain select * from numbers;
-- SQLNESS REPLACE (peers.*) REDACTED
explain select * from numbers order by number desc;
-+---------------+-------------------------------------------------------+
-| plan_type | plan |
-+---------------+-------------------------------------------------------+
-| logical_plan | MergeScan [is_placeholder=false] |
-| physical_plan | SortExec: expr=[number@0 DESC] |
-| | SinglePartitionScanner: <SendableRecordBatchStream> |
-| | |
-+---------------+-------------------------------------------------------+
++---------------+---------------------------------------------------------------+
+| plan_type | plan |
++---------------+---------------------------------------------------------------+
+| logical_plan | MergeScan [is_placeholder=false] |
+| physical_plan | SortExec: expr=[number@0 DESC], preserve_partitioning=[false] |
+| | SinglePartitionScanner: <SendableRecordBatchStream> |
+| | |
++---------------+---------------------------------------------------------------+
-- SQLNESS REPLACE (peers.*) REDACTED
explain select * from numbers order by number asc;
-+---------------+-------------------------------------------------------+
-| plan_type | plan |
-+---------------+-------------------------------------------------------+
-| logical_plan | MergeScan [is_placeholder=false] |
-| physical_plan | SortExec: expr=[number@0 ASC NULLS LAST] |
-| | SinglePartitionScanner: <SendableRecordBatchStream> |
-| | |
-+---------------+-------------------------------------------------------+
++---------------+-------------------------------------------------------------------------+
+| plan_type | plan |
++---------------+-------------------------------------------------------------------------+
+| logical_plan | MergeScan [is_placeholder=false] |
+| physical_plan | SortExec: expr=[number@0 ASC NULLS LAST], preserve_partitioning=[false] |
+| | SinglePartitionScanner: <SendableRecordBatchStream> |
+| | |
++---------------+-------------------------------------------------------------------------+
-- SQLNESS REPLACE (peers.*) REDACTED
explain select * from numbers order by number desc limit 10;
-+---------------+---------------------------------------------------------+
-| plan_type | plan |
-+---------------+---------------------------------------------------------+
-| logical_plan | MergeScan [is_placeholder=false] |
-| physical_plan | GlobalLimitExec: skip=0, fetch=10 |
-| | SortExec: TopK(fetch=10), expr=[number@0 DESC] |
-| | SinglePartitionScanner: <SendableRecordBatchStream> |
-| | |
-+---------------+---------------------------------------------------------+
++---------------+---------------------------------------------------------------------------------+
+| plan_type | plan |
++---------------+---------------------------------------------------------------------------------+
+| logical_plan | MergeScan [is_placeholder=false] |
+| physical_plan | GlobalLimitExec: skip=0, fetch=10 |
+| | SortExec: TopK(fetch=10), expr=[number@0 DESC], preserve_partitioning=[false] |
+| | SinglePartitionScanner: <SendableRecordBatchStream> |
+| | |
++---------------+---------------------------------------------------------------------------------+
-- SQLNESS REPLACE (peers.*) REDACTED
explain select * from numbers order by number asc limit 10;
-+---------------+------------------------------------------------------------+
-| plan_type | plan |
-+---------------+------------------------------------------------------------+
-| logical_plan | MergeScan [is_placeholder=false] |
-| physical_plan | GlobalLimitExec: skip=0, fetch=10 |
-| | SortExec: TopK(fetch=10), expr=[number@0 ASC NULLS LAST] |
-| | SinglePartitionScanner: <SendableRecordBatchStream> |
-| | |
-+---------------+------------------------------------------------------------+
++---------------+-------------------------------------------------------------------------------------------+
+| plan_type | plan |
++---------------+-------------------------------------------------------------------------------------------+
+| logical_plan | MergeScan [is_placeholder=false] |
+| physical_plan | GlobalLimitExec: skip=0, fetch=10 |
+| | SortExec: TopK(fetch=10), expr=[number@0 ASC NULLS LAST], preserve_partitioning=[false] |
+| | SinglePartitionScanner: <SendableRecordBatchStream> |
+| | |
++---------------+-------------------------------------------------------------------------------------------+
diff --git a/tests/cases/standalone/common/aggregate/distinct_order_by.result b/tests/cases/standalone/common/aggregate/distinct_order_by.result
index 29028359e0f1..38efd531ef03 100644
--- a/tests/cases/standalone/common/aggregate/distinct_order_by.result
+++ b/tests/cases/standalone/common/aggregate/distinct_order_by.result
@@ -25,7 +25,7 @@ SELECT DISTINCT i%2 FROM integers ORDER BY 1;
-- +-----------------------+
SELECT DISTINCT i % 2 FROM integers WHERE i<3 ORDER BY i;
-Error: 3000(PlanQuery), Failed to plan SQL: Error during planning: For SELECT DISTINCT, ORDER BY expressions i must appear in select list
+Error: 3000(PlanQuery), Failed to plan SQL: Error during planning: For SELECT DISTINCT, ORDER BY expressions integers.i must appear in select list
SELECT DISTINCT ON (1) i % 2, i FROM integers WHERE i<3 ORDER BY i;
diff --git a/tests/cases/standalone/common/aggregate/sum.result b/tests/cases/standalone/common/aggregate/sum.result
index 4c54f1bef83c..5221e2776119 100644
--- a/tests/cases/standalone/common/aggregate/sum.result
+++ b/tests/cases/standalone/common/aggregate/sum.result
@@ -24,7 +24,11 @@ SELECT SUM(-1) FROM numbers;
SELECT SUM(-1) FROM numbers WHERE number=-1;
-Error: 3001(EngineExecuteQuery), DataFusion error: Cast error: Can't cast value -1 to type UInt32
++----------------+
+| SUM(Int64(-1)) |
++----------------+
+| |
++----------------+
SELECT SUM(-1) FROM numbers WHERE number>10000 limit 1000;
diff --git a/tests/cases/standalone/common/range/calculate.result b/tests/cases/standalone/common/range/calculate.result
index 97d527968b52..50030a29ee85 100644
--- a/tests/cases/standalone/common/range/calculate.result
+++ b/tests/cases/standalone/common/range/calculate.result
@@ -23,18 +23,18 @@ Affected Rows: 10
-- Test range expr calculate
SELECT ts, host, covar(val::DOUBLE, val::DOUBLE) RANGE '20s' FROM host ALIGN '10s' ORDER BY host, ts;
-+---------------------+-------+------------------------------------+
-| ts | host | COVAR(host.val,host.val) RANGE 20s |
-+---------------------+-------+------------------------------------+
-| 1969-12-31T23:59:50 | host1 | |
-| 1970-01-01T00:00:00 | host1 | 0.5 |
-| 1970-01-01T00:00:10 | host1 | 0.5 |
-| 1970-01-01T00:00:20 | host1 | |
-| 1969-12-31T23:59:50 | host2 | |
-| 1970-01-01T00:00:00 | host2 | 0.5 |
-| 1970-01-01T00:00:10 | host2 | 0.5 |
-| 1970-01-01T00:00:20 | host2 | |
-+---------------------+-------+------------------------------------+
++---------------------+-------+-----------------------------------------+
+| ts | host | covar_samp(host.val,host.val) RANGE 20s |
++---------------------+-------+-----------------------------------------+
+| 1969-12-31T23:59:50 | host1 | |
+| 1970-01-01T00:00:00 | host1 | 0.5 |
+| 1970-01-01T00:00:10 | host1 | 0.5 |
+| 1970-01-01T00:00:20 | host1 | |
+| 1969-12-31T23:59:50 | host2 | |
+| 1970-01-01T00:00:00 | host2 | 0.5 |
+| 1970-01-01T00:00:10 | host2 | 0.5 |
+| 1970-01-01T00:00:20 | host2 | |
++---------------------+-------+-----------------------------------------+
SELECT ts, host, 2 * min(val) RANGE '5s' FROM host ALIGN '5s' ORDER BY host, ts;
@@ -139,18 +139,18 @@ SELECT ts, host, (min(val) + max(val)) RANGE '20s' + 1.0 FROM host ALIGN '10s' O
SELECT ts, host, covar(ceil(val::DOUBLE), floor(val::DOUBLE)) RANGE '20s' FROM host ALIGN '10s' ORDER BY host, ts;
-+---------------------+-------+-------------------------------------------------+
-| ts | host | COVAR(ceil(host.val),floor(host.val)) RANGE 20s |
-+---------------------+-------+-------------------------------------------------+
-| 1969-12-31T23:59:50 | host1 | |
-| 1970-01-01T00:00:00 | host1 | 0.5 |
-| 1970-01-01T00:00:10 | host1 | 0.5 |
-| 1970-01-01T00:00:20 | host1 | |
-| 1969-12-31T23:59:50 | host2 | |
-| 1970-01-01T00:00:00 | host2 | 0.5 |
-| 1970-01-01T00:00:10 | host2 | 0.5 |
-| 1970-01-01T00:00:20 | host2 | |
-+---------------------+-------+-------------------------------------------------+
++---------------------+-------+------------------------------------------------------+
+| ts | host | covar_samp(ceil(host.val),floor(host.val)) RANGE 20s |
++---------------------+-------+------------------------------------------------------+
+| 1969-12-31T23:59:50 | host1 | |
+| 1970-01-01T00:00:00 | host1 | 0.5 |
+| 1970-01-01T00:00:10 | host1 | 0.5 |
+| 1970-01-01T00:00:20 | host1 | |
+| 1969-12-31T23:59:50 | host2 | |
+| 1970-01-01T00:00:00 | host2 | 0.5 |
+| 1970-01-01T00:00:10 | host2 | 0.5 |
+| 1970-01-01T00:00:20 | host2 | |
++---------------------+-------+------------------------------------------------------+
SELECT ts, host, floor(cos(ceil(sin(min(val) RANGE '5s')))) FROM host ALIGN '5s' ORDER BY host, ts;
diff --git a/tests/cases/standalone/common/range/error.result b/tests/cases/standalone/common/range/error.result
index ecdaac7d782e..020a9a7d4cc1 100644
--- a/tests/cases/standalone/common/range/error.result
+++ b/tests/cases/standalone/common/range/error.result
@@ -59,18 +59,18 @@ Error: 3000(PlanQuery), DataFusion error: Error during planning: Missing argumen
-- 2.3 type mismatch
SELECT covar(ceil(val), floor(val)) RANGE '20s' FROM host ALIGN '10s';
-+-------------------------------------------------+
-| COVAR(ceil(host.val),floor(host.val)) RANGE 20s |
-+-------------------------------------------------+
-| |
-| 0.5 |
-| 0.5 |
-| |
-| |
-| 0.5 |
-| 0.5 |
-| |
-+-------------------------------------------------+
++------------------------------------------------------+
+| covar_samp(ceil(host.val),floor(host.val)) RANGE 20s |
++------------------------------------------------------+
+| |
+| 0.5 |
+| 0.5 |
+| |
+| |
+| 0.5 |
+| 0.5 |
+| |
++------------------------------------------------------+
-- 2.4 nest query
SELECT min(max(val) RANGE '20s') RANGE '20s' FROM host ALIGN '10s';
diff --git a/tests/cases/standalone/common/range/special_aggr.result b/tests/cases/standalone/common/range/special_aggr.result
index 2240d744389d..449a5534c160 100644
--- a/tests/cases/standalone/common/range/special_aggr.result
+++ b/tests/cases/standalone/common/range/special_aggr.result
@@ -44,7 +44,7 @@ Affected Rows: 30
SELECT ts, host, first_value(val) RANGE '5s', last_value(val) RANGE '5s' FROM host ALIGN '5s' ORDER BY host, ts;
+---------------------+-------+--------------------------------+-------------------------------+
-| ts | host | FIRST_VALUE(host.val) RANGE 5s | LAST_VALUE(host.val) RANGE 5s |
+| ts | host | first_value(host.val) RANGE 5s | last_value(host.val) RANGE 5s |
+---------------------+-------+--------------------------------+-------------------------------+
| 1970-01-01T00:00:00 | host1 | 0 | 2 |
| 1970-01-01T00:00:05 | host1 | | |
@@ -61,7 +61,7 @@ SELECT ts, host, first_value(val) RANGE '5s', last_value(val) RANGE '5s' FROM ho
SELECT ts, host, first_value(addon ORDER BY val DESC) RANGE '5s', last_value(addon ORDER BY val DESC) RANGE '5s' FROM host ALIGN '5s' ORDER BY host, ts;
+---------------------+-------+-----------------------------------------------------------------------+----------------------------------------------------------------------+
-| ts | host | FIRST_VALUE(host.addon) ORDER BY [host.val DESC NULLS FIRST] RANGE 5s | LAST_VALUE(host.addon) ORDER BY [host.val DESC NULLS FIRST] RANGE 5s |
+| ts | host | first_value(host.addon) ORDER BY [host.val DESC NULLS FIRST] RANGE 5s | last_value(host.addon) ORDER BY [host.val DESC NULLS FIRST] RANGE 5s |
+---------------------+-------+-----------------------------------------------------------------------+----------------------------------------------------------------------+
| 1970-01-01T00:00:00 | host1 | 3 | 1 |
| 1970-01-01T00:00:05 | host1 | 4 | 4 |
@@ -78,7 +78,7 @@ SELECT ts, host, first_value(addon ORDER BY val DESC) RANGE '5s', last_value(add
SELECT ts, host, first_value(addon ORDER BY val DESC NULLS LAST) RANGE '5s', last_value(addon ORDER BY val DESC NULLS LAST) RANGE '5s' FROM host ALIGN '5s' ORDER BY host, ts;
+---------------------+-------+----------------------------------------------------------------------+---------------------------------------------------------------------+
-| ts | host | FIRST_VALUE(host.addon) ORDER BY [host.val DESC NULLS LAST] RANGE 5s | LAST_VALUE(host.addon) ORDER BY [host.val DESC NULLS LAST] RANGE 5s |
+| ts | host | first_value(host.addon) ORDER BY [host.val DESC NULLS LAST] RANGE 5s | last_value(host.addon) ORDER BY [host.val DESC NULLS LAST] RANGE 5s |
+---------------------+-------+----------------------------------------------------------------------+---------------------------------------------------------------------+
| 1970-01-01T00:00:00 | host1 | 3 | 1 |
| 1970-01-01T00:00:05 | host1 | 4 | 4 |
@@ -95,7 +95,7 @@ SELECT ts, host, first_value(addon ORDER BY val DESC NULLS LAST) RANGE '5s', las
SELECT ts, host, first_value(addon ORDER BY val ASC) RANGE '5s', last_value(addon ORDER BY val ASC) RANGE '5s' FROM host ALIGN '5s' ORDER BY host, ts;
+---------------------+-------+---------------------------------------------------------------------+--------------------------------------------------------------------+
-| ts | host | FIRST_VALUE(host.addon) ORDER BY [host.val ASC NULLS LAST] RANGE 5s | LAST_VALUE(host.addon) ORDER BY [host.val ASC NULLS LAST] RANGE 5s |
+| ts | host | first_value(host.addon) ORDER BY [host.val ASC NULLS LAST] RANGE 5s | last_value(host.addon) ORDER BY [host.val ASC NULLS LAST] RANGE 5s |
+---------------------+-------+---------------------------------------------------------------------+--------------------------------------------------------------------+
| 1970-01-01T00:00:00 | host1 | 1 | 3 |
| 1970-01-01T00:00:05 | host1 | 4 | 4 |
@@ -112,7 +112,7 @@ SELECT ts, host, first_value(addon ORDER BY val ASC) RANGE '5s', last_value(addo
SELECT ts, host, first_value(addon ORDER BY val ASC NULLS FIRST) RANGE '5s', last_value(addon ORDER BY val ASC NULLS FIRST) RANGE '5s' FROM host ALIGN '5s' ORDER BY host, ts;
+---------------------+-------+----------------------------------------------------------------------+---------------------------------------------------------------------+
-| ts | host | FIRST_VALUE(host.addon) ORDER BY [host.val ASC NULLS FIRST] RANGE 5s | LAST_VALUE(host.addon) ORDER BY [host.val ASC NULLS FIRST] RANGE 5s |
+| ts | host | first_value(host.addon) ORDER BY [host.val ASC NULLS FIRST] RANGE 5s | last_value(host.addon) ORDER BY [host.val ASC NULLS FIRST] RANGE 5s |
+---------------------+-------+----------------------------------------------------------------------+---------------------------------------------------------------------+
| 1970-01-01T00:00:00 | host1 | 1 | 3 |
| 1970-01-01T00:00:05 | host1 | 4 | 4 |
@@ -129,7 +129,7 @@ SELECT ts, host, first_value(addon ORDER BY val ASC NULLS FIRST) RANGE '5s', las
SELECT ts, host, first_value(addon ORDER BY val ASC, ts ASC) RANGE '5s', last_value(addon ORDER BY val ASC, ts ASC) RANGE '5s' FROM host ALIGN '5s' ORDER BY host, ts;
+---------------------+-------+---------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------+
-| ts | host | FIRST_VALUE(host.addon) ORDER BY [host.val ASC NULLS LAST, host.ts ASC NULLS LAST] RANGE 5s | LAST_VALUE(host.addon) ORDER BY [host.val ASC NULLS LAST, host.ts ASC NULLS LAST] RANGE 5s |
+| ts | host | first_value(host.addon) ORDER BY [host.val ASC NULLS LAST, host.ts ASC NULLS LAST] RANGE 5s | last_value(host.addon) ORDER BY [host.val ASC NULLS LAST, host.ts ASC NULLS LAST] RANGE 5s |
+---------------------+-------+---------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------+
| 1970-01-01T00:00:00 | host1 | 1 | 3 |
| 1970-01-01T00:00:05 | host1 | 4 | 6 |
@@ -231,9 +231,7 @@ SELECT ts, host, count(distinct *) RANGE '5s' FROM host ALIGN '5s' ORDER BY host
-- Test error first_value/last_value
SELECT ts, host, first_value(val, val) RANGE '5s' FROM host ALIGN '5s' ORDER BY host, ts;
-Error: 3000(PlanQuery), Failed to plan SQL: Error during planning: No function matches the given name and argument types 'FIRST_VALUE(Int64, Int64)'. You might need to add explicit type casts.
- Candidate functions:
- FIRST_VALUE(Int8/Int16/Int32/Int64/UInt8/UInt16/UInt32/UInt64/Float32/Float64)
+Error: 3001(EngineExecuteQuery), DataFusion error: Error during planning: Coercion from [Int64, Int64] to the signature OneOf([ArraySignature(Array), Uniform(1, [Int8, Int16, Int32, Int64, UInt8, UInt16, UInt32, UInt64, Float32, Float64])]) failed.
DROP TABLE host;
@@ -259,7 +257,7 @@ Affected Rows: 3
SELECT ts, first_value(val ORDER BY addon ASC) RANGE '5s', last_value(val ORDER BY addon ASC) RANGE '5s' FROM host ALIGN '5s';
+---------------------+---------------------------------------------------------------------+--------------------------------------------------------------------+
-| ts | FIRST_VALUE(host.val) ORDER BY [host.addon ASC NULLS LAST] RANGE 5s | LAST_VALUE(host.val) ORDER BY [host.addon ASC NULLS LAST] RANGE 5s |
+| ts | first_value(host.val) ORDER BY [host.addon ASC NULLS LAST] RANGE 5s | last_value(host.val) ORDER BY [host.addon ASC NULLS LAST] RANGE 5s |
+---------------------+---------------------------------------------------------------------+--------------------------------------------------------------------+
| 1970-01-01T00:00:00 | 2 | 0 |
+---------------------+---------------------------------------------------------------------+--------------------------------------------------------------------+
diff --git a/tests/cases/standalone/common/tql-explain-analyze/analyze.result b/tests/cases/standalone/common/tql-explain-analyze/analyze.result
index e8e388b916c0..e3bbc84e4257 100644
--- a/tests/cases/standalone/common/tql-explain-analyze/analyze.result
+++ b/tests/cases/standalone/common/tql-explain-analyze/analyze.result
@@ -21,12 +21,12 @@ TQL ANALYZE (0, 10, '5s') test;
| 0_| 0_|_PromInstantManipulateExec: range=[0..10000], lookback=[300000], interval=[5000], time index=[j] REDACTED
|_|_|_PromSeriesNormalizeExec: offset=[0], time index=[j], filter NaN: [false] REDACTED
|_|_|_PromSeriesDivideExec: tags=["k"] REDACTED
-|_|_|_SortExec: expr=[k@2 ASC NULLS LAST] REDACTED
+|_|_|_SortExec: expr=[k@2 ASC NULLS LAST], preserve_partitioning=[false] REDACTED
|_|_|_MergeScanExec: REDACTED
|_|_|_|
| 1_| 0_|_SortPreservingMergeExec: [k@2 DESC NULLS LAST,j@1 DESC NULLS LAST] REDACTED
|_|_|_CoalesceBatchesExec: target_batch_size=8192 REDACTED
-|_|_|_SortExec: expr=[k@2 DESC NULLS LAST,j@1 DESC NULLS LAST] REDACTED
+|_|_|_SortExec: expr=[k@2 DESC NULLS LAST,j@1 DESC NULLS LAST], preserve_partitioning=[true] REDACTED
|_|_|_CoalesceBatchesExec: target_batch_size=8192 REDACTED
|_|_|_FilterExec: j@1 >= -300000 AND j@1 <= 310000 REDACTED
|_|_|_RepartitionExec: partitioning=REDACTED
@@ -50,12 +50,12 @@ TQL ANALYZE (0, 10, '1s', '2s') test;
| 0_| 0_|_PromInstantManipulateExec: range=[0..10000], lookback=[2000], interval=[1000], time index=[j] REDACTED
|_|_|_PromSeriesNormalizeExec: offset=[0], time index=[j], filter NaN: [false] REDACTED
|_|_|_PromSeriesDivideExec: tags=["k"] REDACTED
-|_|_|_SortExec: expr=[k@2 ASC NULLS LAST] REDACTED
+|_|_|_SortExec: expr=[k@2 ASC NULLS LAST], preserve_partitioning=[false] REDACTED
|_|_|_MergeScanExec: REDACTED
|_|_|_|
| 1_| 0_|_SortPreservingMergeExec: [k@2 DESC NULLS LAST,j@1 DESC NULLS LAST] REDACTED
|_|_|_CoalesceBatchesExec: target_batch_size=8192 REDACTED
-|_|_|_SortExec: expr=[k@2 DESC NULLS LAST,j@1 DESC NULLS LAST] REDACTED
+|_|_|_SortExec: expr=[k@2 DESC NULLS LAST,j@1 DESC NULLS LAST], preserve_partitioning=[true] REDACTED
|_|_|_CoalesceBatchesExec: target_batch_size=8192 REDACTED
|_|_|_FilterExec: j@1 >= -2000 AND j@1 <= 12000 REDACTED
|_|_|_RepartitionExec: partitioning=REDACTED
@@ -78,12 +78,12 @@ TQL ANALYZE ('1970-01-01T00:00:00'::timestamp, '1970-01-01T00:00:00'::timestamp
| 0_| 0_|_PromInstantManipulateExec: range=[0..10000], lookback=[300000], interval=[5000], time index=[j] REDACTED
|_|_|_PromSeriesNormalizeExec: offset=[0], time index=[j], filter NaN: [false] REDACTED
|_|_|_PromSeriesDivideExec: tags=["k"] REDACTED
-|_|_|_SortExec: expr=[k@2 ASC NULLS LAST] REDACTED
+|_|_|_SortExec: expr=[k@2 ASC NULLS LAST], preserve_partitioning=[false] REDACTED
|_|_|_MergeScanExec: REDACTED
|_|_|_|
| 1_| 0_|_SortPreservingMergeExec: [k@2 DESC NULLS LAST,j@1 DESC NULLS LAST] REDACTED
|_|_|_CoalesceBatchesExec: target_batch_size=8192 REDACTED
-|_|_|_SortExec: expr=[k@2 DESC NULLS LAST,j@1 DESC NULLS LAST] REDACTED
+|_|_|_SortExec: expr=[k@2 DESC NULLS LAST,j@1 DESC NULLS LAST], preserve_partitioning=[true] REDACTED
|_|_|_CoalesceBatchesExec: target_batch_size=8192 REDACTED
|_|_|_FilterExec: j@1 >= -300000 AND j@1 <= 310000 REDACTED
|_|_|_RepartitionExec: partitioning=REDACTED
@@ -108,12 +108,12 @@ TQL ANALYZE VERBOSE (0, 10, '5s') test;
| 0_| 0_|_PromInstantManipulateExec: range=[0..10000], lookback=[300000], interval=[5000], time index=[j] REDACTED
|_|_|_PromSeriesNormalizeExec: offset=[0], time index=[j], filter NaN: [false] REDACTED
|_|_|_PromSeriesDivideExec: tags=["k"] REDACTED
-|_|_|_SortExec: expr=[k@2 ASC NULLS LAST] REDACTED
+|_|_|_SortExec: expr=[k@2 ASC NULLS LAST], preserve_partitioning=[false] REDACTED
|_|_|_MergeScanExec: REDACTED
|_|_|_|
| 1_| 0_|_SortPreservingMergeExec: [k@2 DESC NULLS LAST,j@1 DESC NULLS LAST] REDACTED
|_|_|_CoalesceBatchesExec: target_batch_size=8192 REDACTED
-|_|_|_SortExec: expr=[k@2 DESC NULLS LAST,j@1 DESC NULLS LAST] REDACTED
+|_|_|_SortExec: expr=[k@2 DESC NULLS LAST,j@1 DESC NULLS LAST], preserve_partitioning=[true] REDACTED
|_|_|_CoalesceBatchesExec: target_batch_size=8192 REDACTED
|_|_|_FilterExec: j@1 >= -300000 AND j@1 <= 310000 REDACTED
|_|_|_RepartitionExec: partitioning=REDACTED
diff --git a/tests/cases/standalone/common/tql-explain-analyze/explain.result b/tests/cases/standalone/common/tql-explain-analyze/explain.result
index a49624011d6c..c1b55e665c91 100644
--- a/tests/cases/standalone/common/tql-explain-analyze/explain.result
+++ b/tests/cases/standalone/common/tql-explain-analyze/explain.result
@@ -22,7 +22,7 @@ TQL EXPLAIN (0, 10, '5s') test;
| physical_plan | PromInstantManipulateExec: range=[0..0], lookback=[300000], interval=[300000], time index=[j] |
| | PromSeriesNormalizeExec: offset=[0], time index=[j], filter NaN: [false] |
| | PromSeriesDivideExec: tags=["k"] |
-| | SortExec: expr=[k@2 ASC NULLS LAST] |
+| | SortExec: expr=[k@2 ASC NULLS LAST], preserve_partitioning=[false] |
| | MergeScanExec: REDACTED
| | |
+---------------+-----------------------------------------------------------------------------------------------+
@@ -43,7 +43,7 @@ TQL EXPLAIN (0, 10, '1s', '2s') test;
| physical_plan | PromInstantManipulateExec: range=[0..0], lookback=[2000], interval=[300000], time index=[j] |
| | PromSeriesNormalizeExec: offset=[0], time index=[j], filter NaN: [false] |
| | PromSeriesDivideExec: tags=["k"] |
-| | SortExec: expr=[k@2 ASC NULLS LAST] |
+| | SortExec: expr=[k@2 ASC NULLS LAST], preserve_partitioning=[false] |
| | MergeScanExec: REDACTED
| | |
+---------------+---------------------------------------------------------------------------------------------+
@@ -63,7 +63,7 @@ TQL EXPLAIN ('1970-01-01T00:00:00'::timestamp, '1970-01-01T00:00:00'::timestamp
| physical_plan | PromInstantManipulateExec: range=[0..0], lookback=[300000], interval=[300000], time index=[j] |
| | PromSeriesNormalizeExec: offset=[0], time index=[j], filter NaN: [false] |
| | PromSeriesDivideExec: tags=["k"] |
-| | SortExec: expr=[k@2 ASC NULLS LAST] |
+| | SortExec: expr=[k@2 ASC NULLS LAST], preserve_partitioning=[false] |
| | MergeScanExec: REDACTED
| | |
+---------------+-----------------------------------------------------------------------------------------------+
@@ -120,6 +120,7 @@ TQL EXPLAIN VERBOSE (0, 10, '5s') test;
| logical_plan after simplify_expressions_| SAME TEXT AS ABOVE_|
| logical_plan after unwrap_cast_in_comparison_| SAME TEXT AS ABOVE_|
| logical_plan after common_sub_expression_eliminate_| SAME TEXT AS ABOVE_|
+| logical_plan after eliminate_group_by_constant_| SAME TEXT AS ABOVE_|
| logical_plan after optimize_projections_| SAME TEXT AS ABOVE_|
| logical_plan after OrderHintRule_| SAME TEXT AS ABOVE_|
| logical_plan_| PromInstantManipulate: range=[0..0], lookback=[300000], interval=[300000], time index=[j]_|
@@ -151,31 +152,32 @@ TQL EXPLAIN VERBOSE (0, 10, '5s') test;
|_|_PromInstantManipulateExec: range=[0..0], lookback=[300000], interval=[300000], time index=[j]_|
|_|_PromSeriesNormalizeExec: offset=[0], time index=[j], filter NaN: [false]_|
|_|_PromSeriesDivideExec: tags=["k"]_|
-|_|_SortExec: expr=[k@2 ASC NULLS LAST]_|
+|_|_SortExec: expr=[k@2 ASC NULLS LAST], preserve_partitioning=[false]_|
|_|_MergeScanExec: REDACTED
|_|_|
+| physical_plan after OptimizeAggregateOrder_| SAME TEXT AS ABOVE_|
| physical_plan after ProjectionPushdown_| SAME TEXT AS ABOVE_|
| physical_plan after coalesce_batches_| SAME TEXT AS ABOVE_|
| physical_plan after OutputRequirements_| PromInstantManipulateExec: range=[0..0], lookback=[300000], interval=[300000], time index=[j]_|
|_|_PromSeriesNormalizeExec: offset=[0], time index=[j], filter NaN: [false]_|
|_|_PromSeriesDivideExec: tags=["k"]_|
-|_|_SortExec: expr=[k@2 ASC NULLS LAST]_|
+|_|_SortExec: expr=[k@2 ASC NULLS LAST], preserve_partitioning=[false]_|
|_|_MergeScanExec: REDACTED
|_|_|
-| physical_plan after PipelineChecker_| SAME TEXT AS ABOVE_|
| physical_plan after LimitAggregation_| SAME TEXT AS ABOVE_|
| physical_plan after ProjectionPushdown_| SAME TEXT AS ABOVE_|
+| physical_plan after PipelineChecker_| SAME TEXT AS ABOVE_|
| physical_plan after RemoveDuplicateRule_| SAME TEXT AS ABOVE_|
| physical_plan_| PromInstantManipulateExec: range=[0..0], lookback=[300000], interval=[300000], time index=[j]_|
|_|_PromSeriesNormalizeExec: offset=[0], time index=[j], filter NaN: [false]_|
|_|_PromSeriesDivideExec: tags=["k"]_|
-|_|_SortExec: expr=[k@2 ASC NULLS LAST]_|
+|_|_SortExec: expr=[k@2 ASC NULLS LAST], preserve_partitioning=[false]_|
|_|_MergeScanExec: REDACTED
|_|_|
| physical_plan_with_stats_| PromInstantManipulateExec: range=[0..0], lookback=[300000], interval=[300000], time index=[j], statistics=[Rows=Inexact(0), Bytes=Absent, [(Col[0]:),(Col[1]:),(Col[2]:)]] |
|_|_PromSeriesNormalizeExec: offset=[0], time index=[j], filter NaN: [false], statistics=[Rows=Absent, Bytes=Absent, [(Col[0]:),(Col[1]:),(Col[2]:)]]_|
|_|_PromSeriesDivideExec: tags=["k"], statistics=[Rows=Absent, Bytes=Absent, [(Col[0]:),(Col[1]:),(Col[2]:)]]_|
-|_|_SortExec: expr=[k@2 ASC NULLS LAST], statistics=[Rows=Absent, Bytes=Absent, [(Col[0]:),(Col[1]:),(Col[2]:)]]_|
+|_|_SortExec: expr=[k@2 ASC NULLS LAST], preserve_partitioning=[false], statistics=[Rows=Absent, Bytes=Absent, [(Col[0]:),(Col[1]:),(Col[2]:)]]_|
|_|_MergeScanExec: REDACTED
|_|_|
+-+-+
diff --git a/tests/cases/standalone/common/types/decimal/decimal_aggregates.result b/tests/cases/standalone/common/types/decimal/decimal_aggregates.result
index d246cc1d5947..e5d4fda8fc5e 100644
--- a/tests/cases/standalone/common/types/decimal/decimal_aggregates.result
+++ b/tests/cases/standalone/common/types/decimal/decimal_aggregates.result
@@ -3,9 +3,9 @@
SELECT arrow_typeof(FIRST_VALUE('0.1'::DECIMAL(4,1)));
+----------------------------------------+
-| arrow_typeof(FIRST_VALUE(Utf8("0.1"))) |
+| arrow_typeof(first_value(Utf8("0.1"))) |
+----------------------------------------+
-| Decimal128(4, 1) |
+| Float64 |
+----------------------------------------+
-- first_value
@@ -16,9 +16,9 @@ SELECT FIRST_VALUE(NULL::DECIMAL),
FIRST_VALUE('4567645908450368043562342564564938245.1'::DECIMAL(38,1))::VARCHAR;
+-------------------+--------------------------+--------------------------------+------------------------------------------+--------------------------------------------------------------+
-| FIRST_VALUE(NULL) | FIRST_VALUE(Utf8("0.1")) | FIRST_VALUE(Utf8("4938245.1")) | FIRST_VALUE(Utf8("45672564564938245.1")) | FIRST_VALUE(Utf8("4567645908450368043562342564564938245.1")) |
+| first_value(NULL) | first_value(Utf8("0.1")) | first_value(Utf8("4938245.1")) | first_value(Utf8("45672564564938245.1")) | first_value(Utf8("4567645908450368043562342564564938245.1")) |
+-------------------+--------------------------+--------------------------------+------------------------------------------+--------------------------------------------------------------+
-| | 0.1 | 4938245.1 | 45672564564938245.1 | 4567645908450368043562342564564938245.1 |
+| | 0.1 | 4938245.1 | 4.567256456493825e16 | 4.567645908450368e36 |
+-------------------+--------------------------+--------------------------------+------------------------------------------+--------------------------------------------------------------+
-- min
diff --git a/tests/cases/standalone/optimizer/order_by.result b/tests/cases/standalone/optimizer/order_by.result
index 49996d130e78..b937344b7700 100644
--- a/tests/cases/standalone/optimizer/order_by.result
+++ b/tests/cases/standalone/optimizer/order_by.result
@@ -10,47 +10,47 @@ explain select * from numbers;
explain select * from numbers order by number desc;
-+---------------+-------------------------------------------------------+
-| plan_type | plan |
-+---------------+-------------------------------------------------------+
-| logical_plan | MergeScan [is_placeholder=false] |
-| physical_plan | SortExec: expr=[number@0 DESC] |
-| | SinglePartitionScanner: <SendableRecordBatchStream> |
-| | |
-+---------------+-------------------------------------------------------+
++---------------+---------------------------------------------------------------+
+| plan_type | plan |
++---------------+---------------------------------------------------------------+
+| logical_plan | MergeScan [is_placeholder=false] |
+| physical_plan | SortExec: expr=[number@0 DESC], preserve_partitioning=[false] |
+| | SinglePartitionScanner: <SendableRecordBatchStream> |
+| | |
++---------------+---------------------------------------------------------------+
explain select * from numbers order by number asc;
-+---------------+-------------------------------------------------------+
-| plan_type | plan |
-+---------------+-------------------------------------------------------+
-| logical_plan | MergeScan [is_placeholder=false] |
-| physical_plan | SortExec: expr=[number@0 ASC NULLS LAST] |
-| | SinglePartitionScanner: <SendableRecordBatchStream> |
-| | |
-+---------------+-------------------------------------------------------+
++---------------+-------------------------------------------------------------------------+
+| plan_type | plan |
++---------------+-------------------------------------------------------------------------+
+| logical_plan | MergeScan [is_placeholder=false] |
+| physical_plan | SortExec: expr=[number@0 ASC NULLS LAST], preserve_partitioning=[false] |
+| | SinglePartitionScanner: <SendableRecordBatchStream> |
+| | |
++---------------+-------------------------------------------------------------------------+
explain select * from numbers order by number desc limit 10;
-+---------------+---------------------------------------------------------+
-| plan_type | plan |
-+---------------+---------------------------------------------------------+
-| logical_plan | MergeScan [is_placeholder=false] |
-| physical_plan | GlobalLimitExec: skip=0, fetch=10 |
-| | SortExec: TopK(fetch=10), expr=[number@0 DESC] |
-| | SinglePartitionScanner: <SendableRecordBatchStream> |
-| | |
-+---------------+---------------------------------------------------------+
++---------------+---------------------------------------------------------------------------------+
+| plan_type | plan |
++---------------+---------------------------------------------------------------------------------+
+| logical_plan | MergeScan [is_placeholder=false] |
+| physical_plan | GlobalLimitExec: skip=0, fetch=10 |
+| | SortExec: TopK(fetch=10), expr=[number@0 DESC], preserve_partitioning=[false] |
+| | SinglePartitionScanner: <SendableRecordBatchStream> |
+| | |
++---------------+---------------------------------------------------------------------------------+
explain select * from numbers order by number asc limit 10;
-+---------------+------------------------------------------------------------+
-| plan_type | plan |
-+---------------+------------------------------------------------------------+
-| logical_plan | MergeScan [is_placeholder=false] |
-| physical_plan | GlobalLimitExec: skip=0, fetch=10 |
-| | SortExec: TopK(fetch=10), expr=[number@0 ASC NULLS LAST] |
-| | SinglePartitionScanner: <SendableRecordBatchStream> |
-| | |
-+---------------+------------------------------------------------------------+
++---------------+-------------------------------------------------------------------------------------------+
+| plan_type | plan |
++---------------+-------------------------------------------------------------------------------------------+
+| logical_plan | MergeScan [is_placeholder=false] |
+| physical_plan | GlobalLimitExec: skip=0, fetch=10 |
+| | SortExec: TopK(fetch=10), expr=[number@0 ASC NULLS LAST], preserve_partitioning=[false] |
+| | SinglePartitionScanner: <SendableRecordBatchStream> |
+| | |
++---------------+-------------------------------------------------------------------------------------------+
|
build
|
bump datafusion 20240528 (#4061)
|
bc9a46dbb70f0b986c156173f9801a1e86d689a9
|
2022-12-26 10:44:12
|
Mike Yang
|
feat: support varbinary (#767)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index fb9a777a9ce5..ef8adf7e1b91 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -6684,10 +6684,12 @@ version = "0.1.0"
dependencies = [
"api",
"catalog",
+ "common-base",
"common-catalog",
"common-error",
"common-time",
"datatypes",
+ "hex",
"itertools",
"mito",
"once_cell",
diff --git a/src/sql/Cargo.toml b/src/sql/Cargo.toml
index ab44a1c4f772..e3dd573f7b63 100644
--- a/src/sql/Cargo.toml
+++ b/src/sql/Cargo.toml
@@ -7,10 +7,12 @@ license.workspace = true
[dependencies]
api = { path = "../api" }
catalog = { path = "../catalog" }
+common-base = { path = "../common/base" }
common-catalog = { path = "../common/catalog" }
common-error = { path = "../common/error" }
common-time = { path = "../common/time" }
datatypes = { path = "../datatypes" }
+hex = "0.4"
itertools = "0.10"
mito = { path = "../mito" }
once_cell = "1.10"
diff --git a/src/sql/src/statements.rs b/src/sql/src/statements.rs
index afcd0eb1bc76..bbdc6fc5c60d 100644
--- a/src/sql/src/statements.rs
+++ b/src/sql/src/statements.rs
@@ -24,6 +24,7 @@ pub mod statement;
use std::str::FromStr;
use api::helper::ColumnDataTypeWrapper;
+use common_base::bytes::Bytes;
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use common_time::Timestamp;
use datatypes::data_type::DataType;
@@ -127,6 +128,26 @@ fn parse_string_to_value(
}
}
+fn parse_hex_string(s: &str) -> Result<Value> {
+ match hex::decode(s) {
+ Ok(b) => Ok(Value::Binary(Bytes::from(b))),
+ Err(hex::FromHexError::InvalidHexCharacter { c, index }) => ParseSqlValueSnafu {
+ msg: format!(
+ "Fail to parse hex string to Byte: invalid character {c:?} at position {index}"
+ ),
+ }
+ .fail(),
+ Err(hex::FromHexError::OddLength) => ParseSqlValueSnafu {
+ msg: "Fail to parse hex string to Byte: odd number of digits".to_string(),
+ }
+ .fail(),
+ Err(e) => ParseSqlValueSnafu {
+ msg: format!("Fail to parse hex string to Byte {s}, {e:?}"),
+ }
+ .fail(),
+ }
+}
+
macro_rules! parse_number_to_value {
($data_type: expr, $n: ident, $(($Type: ident, $PrimitiveType: ident)), +) => {
match $data_type {
@@ -200,6 +221,7 @@ pub fn sql_value_to_value(
SqlValue::DoubleQuotedString(s) | SqlValue::SingleQuotedString(s) => {
parse_string_to_value(column_name, s.to_owned(), data_type)?
}
+ SqlValue::HexStringLiteral(s) => parse_hex_string(s)?,
_ => todo!("Other sql value"),
})
}
@@ -299,6 +321,7 @@ pub fn sql_data_type_to_concrete_data_type(data_type: &SqlDataType) -> Result<Co
SqlDataType::Double => Ok(ConcreteDataType::float64_datatype()),
SqlDataType::Boolean => Ok(ConcreteDataType::boolean_datatype()),
SqlDataType::Date => Ok(ConcreteDataType::date_datatype()),
+ SqlDataType::Varbinary(_) => Ok(ConcreteDataType::binary_datatype()),
SqlDataType::Custom(obj_name, _) => match &obj_name.0[..] {
[type_name] => {
if type_name
@@ -379,6 +402,10 @@ mod tests {
SqlDataType::Timestamp(None, TimezoneInfo::None),
ConcreteDataType::timestamp_millisecond_datatype(),
);
+ check_type(
+ SqlDataType::Varbinary(None),
+ ConcreteDataType::binary_datatype(),
+ );
}
#[test]
@@ -428,6 +455,23 @@ mod tests {
),
"v is {v:?}",
);
+
+ let sql_val = SqlValue::HexStringLiteral("48656c6c6f20776f726c6421".to_string());
+ let v = sql_value_to_value("a", &ConcreteDataType::binary_datatype(), &sql_val).unwrap();
+ assert_eq!(Value::Binary(Bytes::from(b"Hello world!".as_slice())), v);
+
+ let sql_val = SqlValue::HexStringLiteral("9AF".to_string());
+ let v = sql_value_to_value("a", &ConcreteDataType::binary_datatype(), &sql_val);
+ assert!(v.is_err());
+ assert!(
+ format!("{v:?}").contains("odd number of digits"),
+ "v is {v:?}"
+ );
+
+ let sql_val = SqlValue::HexStringLiteral("AG".to_string());
+ let v = sql_value_to_value("a", &ConcreteDataType::binary_datatype(), &sql_val);
+ assert!(v.is_err());
+ assert!(format!("{v:?}").contains("invalid character"), "v is {v:?}",);
}
#[test]
|
feat
|
support varbinary (#767)
|
316d8434829929b7e47fbe525b8b2c4e38360f3b
|
2024-01-08 16:24:27
|
tison
|
feat: support CSV format in sql HTTP API (#3062)
| false
|
diff --git a/src/client/src/error.rs b/src/client/src/error.rs
index ae573d037de3..cac8ebb0b513 100644
--- a/src/client/src/error.rs
+++ b/src/client/src/error.rs
@@ -16,7 +16,7 @@ use std::any::Any;
use common_error::ext::{BoxedError, ErrorExt};
use common_error::status_code::StatusCode;
-use common_error::{GREPTIME_ERROR_CODE, GREPTIME_ERROR_MSG};
+use common_error::{GREPTIME_DB_HEADER_ERROR_CODE, GREPTIME_DB_HEADER_ERROR_MSG};
use common_macro::stack_trace_debug;
use snafu::{Location, Snafu};
use tonic::{Code, Status};
@@ -115,7 +115,7 @@ impl From<Status> for Error {
.and_then(|v| String::from_utf8(v.as_bytes().to_vec()).ok())
}
- let code = get_metadata_value(&e, GREPTIME_ERROR_CODE)
+ let code = get_metadata_value(&e, GREPTIME_DB_HEADER_ERROR_CODE)
.and_then(|s| {
if let Ok(code) = s.parse::<u32>() {
StatusCode::from_u32(code)
@@ -125,8 +125,8 @@ impl From<Status> for Error {
})
.unwrap_or(StatusCode::Unknown);
- let msg =
- get_metadata_value(&e, GREPTIME_ERROR_MSG).unwrap_or_else(|| e.message().to_string());
+ let msg = get_metadata_value(&e, GREPTIME_DB_HEADER_ERROR_MSG)
+ .unwrap_or_else(|| e.message().to_string());
Self::Server { code, msg }
}
diff --git a/src/common/error/src/lib.rs b/src/common/error/src/lib.rs
index 9fd659fc65a6..aa3c915e84e3 100644
--- a/src/common/error/src/lib.rs
+++ b/src/common/error/src/lib.rs
@@ -19,7 +19,7 @@ pub mod format;
pub mod mock;
pub mod status_code;
-pub const GREPTIME_ERROR_CODE: &str = "x-greptime-err-code";
-pub const GREPTIME_ERROR_MSG: &str = "x-greptime-err-msg";
+pub const GREPTIME_DB_HEADER_ERROR_CODE: &str = "x-greptime-err-code";
+pub const GREPTIME_DB_HEADER_ERROR_MSG: &str = "x-greptime-err-msg";
pub use snafu;
diff --git a/src/meta-client/src/error.rs b/src/meta-client/src/error.rs
index a86ec822802f..fcb6e82ccf73 100644
--- a/src/meta-client/src/error.rs
+++ b/src/meta-client/src/error.rs
@@ -14,7 +14,7 @@
use common_error::ext::ErrorExt;
use common_error::status_code::StatusCode;
-use common_error::{GREPTIME_ERROR_CODE, GREPTIME_ERROR_MSG};
+use common_error::{GREPTIME_DB_HEADER_ERROR_CODE, GREPTIME_DB_HEADER_ERROR_MSG};
use common_macro::stack_trace_debug;
use snafu::{Location, Snafu};
use tonic::Status;
@@ -117,7 +117,7 @@ impl From<Status> for Error {
.and_then(|v| String::from_utf8(v.as_bytes().to_vec()).ok())
}
- let code = get_metadata_value(&e, GREPTIME_ERROR_CODE)
+ let code = get_metadata_value(&e, GREPTIME_DB_HEADER_ERROR_CODE)
.and_then(|s| {
if let Ok(code) = s.parse::<u32>() {
StatusCode::from_u32(code)
@@ -127,8 +127,8 @@ impl From<Status> for Error {
})
.unwrap_or(StatusCode::Internal);
- let msg =
- get_metadata_value(&e, GREPTIME_ERROR_MSG).unwrap_or_else(|| e.message().to_string());
+ let msg = get_metadata_value(&e, GREPTIME_DB_HEADER_ERROR_MSG)
+ .unwrap_or_else(|| e.message().to_string());
Self::MetaServer { code, msg }
}
diff --git a/src/servers/src/error.rs b/src/servers/src/error.rs
index 70b4401c9a73..5be38cee15fe 100644
--- a/src/servers/src/error.rs
+++ b/src/servers/src/error.rs
@@ -569,7 +569,7 @@ macro_rules! define_into_tonic_status {
($Error: ty) => {
impl From<$Error> for tonic::Status {
fn from(err: $Error) -> Self {
- use common_error::{GREPTIME_ERROR_CODE, GREPTIME_ERROR_MSG};
+ use common_error::{GREPTIME_DB_HEADER_ERROR_CODE, GREPTIME_DB_HEADER_ERROR_MSG};
use tonic::codegen::http::{HeaderMap, HeaderValue};
use tonic::metadata::MetadataMap;
@@ -578,11 +578,14 @@ macro_rules! define_into_tonic_status {
// If either of the status_code or error msg cannot convert to valid HTTP header value
// (which is a very rare case), just ignore. Client will use Tonic status code and message.
let status_code = err.status_code();
- headers.insert(GREPTIME_ERROR_CODE, HeaderValue::from(status_code as u32));
+ headers.insert(
+ GREPTIME_DB_HEADER_ERROR_CODE,
+ HeaderValue::from(status_code as u32),
+ );
let root_error = err.output_msg();
if let Ok(err_msg) = HeaderValue::from_bytes(root_error.as_bytes()) {
- let _ = headers.insert(GREPTIME_ERROR_MSG, err_msg);
+ let _ = headers.insert(GREPTIME_DB_HEADER_ERROR_MSG, err_msg);
}
let metadata = MetadataMap::from_headers(headers);
diff --git a/src/servers/src/http.rs b/src/servers/src/http.rs
index a390f406d778..1507834d816c 100644
--- a/src/servers/src/http.rs
+++ b/src/servers/src/http.rs
@@ -12,43 +12,26 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-pub mod authorize;
-pub mod handler;
-pub mod header;
-pub mod influxdb;
-pub mod mem_prof;
-pub mod opentsdb;
-pub mod otlp;
-pub mod pprof;
-pub mod prom_store;
-pub mod prometheus;
-pub mod script;
-
-#[cfg(feature = "dashboard")]
-mod dashboard;
-pub mod influxdb_result_v1;
-
use std::fmt::Display;
use std::net::SocketAddr;
use std::time::{Duration, Instant};
use aide::axum::{routing as apirouting, ApiRouter, IntoApiResponse};
use aide::openapi::{Info, OpenApi, Server as OpenAPIServer};
+use aide::OperationOutput;
use async_trait::async_trait;
use auth::UserProviderRef;
use axum::error_handling::HandleErrorLayer;
use axum::extract::{DefaultBodyLimit, MatchedPath};
use axum::http::Request;
use axum::middleware::{self, Next};
-use axum::response::{Html, IntoResponse, Json};
+use axum::response::{Html, IntoResponse, Json, Response};
use axum::{routing, BoxError, Extension, Router};
use common_base::readable_size::ReadableSize;
use common_base::Plugins;
-use common_error::ext::ErrorExt;
use common_error::status_code::StatusCode;
-use common_query::Output;
-use common_recordbatch::{util, RecordBatch};
-use common_telemetry::logging::{debug, error, info};
+use common_recordbatch::RecordBatch;
+use common_telemetry::logging::{error, info};
use common_time::timestamp::TimeUnit;
use common_time::Timestamp;
use datatypes::data_type::DataType;
@@ -66,6 +49,9 @@ use tower_http::trace::TraceLayer;
use self::authorize::AuthState;
use crate::configurator::ConfiguratorRef;
use crate::error::{AlreadyStartedSnafu, Error, Result, StartHttpSnafu, ToJsonSnafu};
+use crate::http::csv_result::CsvResponse;
+use crate::http::error_result::ErrorResponse;
+use crate::http::greptime_result_v1::GreptimedbV1Response;
use crate::http::influxdb::{influxdb_health, influxdb_ping, influxdb_write_v1, influxdb_write_v2};
use crate::http::influxdb_result_v1::InfluxdbV1Response;
use crate::http::prometheus::{
@@ -84,6 +70,25 @@ use crate::query_handler::{
};
use crate::server::Server;
+pub mod authorize;
+pub mod handler;
+pub mod header;
+pub mod influxdb;
+pub mod mem_prof;
+pub mod opentsdb;
+pub mod otlp;
+pub mod pprof;
+pub mod prom_store;
+pub mod prometheus;
+pub mod script;
+
+pub mod csv_result;
+#[cfg(feature = "dashboard")]
+mod dashboard;
+pub mod error_result;
+pub mod greptime_result_v1;
+pub mod influxdb_result_v1;
+
pub const HTTP_API_VERSION: &str = "v1";
pub const HTTP_API_PREFIX: &str = "/v1/";
/// Default http body limit (64M).
@@ -239,129 +244,16 @@ impl TryFrom<Vec<RecordBatch>> for HttpRecordsOutput {
#[derive(Serialize, Deserialize, Debug, JsonSchema, Eq, PartialEq)]
#[serde(rename_all = "lowercase")]
-pub enum JsonOutput {
+pub enum GreptimeQueryOutput {
AffectedRows(usize),
Records(HttpRecordsOutput),
}
-#[derive(Serialize, Deserialize, Debug, JsonSchema)]
-pub struct GreptimedbV1Response {
- code: u32,
- #[serde(skip_serializing_if = "Option::is_none")]
- error: Option<String>,
- #[serde(skip_serializing_if = "Vec::is_empty", default)]
- output: Vec<JsonOutput>,
- #[serde(skip_serializing_if = "Option::is_none")]
- execution_time_ms: Option<u64>,
-}
-
-impl GreptimedbV1Response {
- pub fn with_error(error: impl ErrorExt) -> Self {
- let code = error.status_code();
- if code.should_log_error() {
- error!(error; "Failed to handle HTTP request");
- } else {
- debug!("Failed to handle HTTP request, err: {:?}", error);
- }
-
- GreptimedbV1Response {
- error: Some(error.output_msg()),
- code: code as u32,
- output: vec![],
- execution_time_ms: None,
- }
- }
-
- fn with_error_message(err_msg: String, error_code: StatusCode) -> Self {
- GreptimedbV1Response {
- error: Some(err_msg),
- code: error_code as u32,
- output: vec![],
- execution_time_ms: None,
- }
- }
-
- fn with_output(output: Vec<JsonOutput>) -> Self {
- GreptimedbV1Response {
- error: None,
- code: StatusCode::Success as u32,
- output,
- execution_time_ms: None,
- }
- }
-
- fn with_execution_time(&mut self, execution_time: u64) {
- self.execution_time_ms = Some(execution_time);
- }
-
- /// Create a json response from query result
- pub async fn from_output(outputs: Vec<Result<Output>>) -> Self {
- // TODO(sunng87): this api response structure cannot represent error
- // well. It hides successful execution results from error response
- let mut results = Vec::with_capacity(outputs.len());
- for out in outputs {
- match out {
- Ok(Output::AffectedRows(rows)) => {
- results.push(JsonOutput::AffectedRows(rows));
- }
- Ok(Output::Stream(stream)) => {
- // TODO(sunng87): streaming response
- match util::collect(stream).await {
- Ok(rows) => match HttpRecordsOutput::try_from(rows) {
- Ok(rows) => {
- results.push(JsonOutput::Records(rows));
- }
- Err(err) => {
- return Self::with_error(err);
- }
- },
-
- Err(e) => {
- return Self::with_error(e);
- }
- }
- }
- Ok(Output::RecordBatches(rbs)) => match HttpRecordsOutput::try_from(rbs.take()) {
- Ok(rows) => {
- results.push(JsonOutput::Records(rows));
- }
- Err(err) => {
- return Self::with_error(err);
- }
- },
- Err(e) => {
- return Self::with_error(e);
- }
- }
- }
- Self::with_output(results)
- }
-
- pub fn code(&self) -> u32 {
- self.code
- }
-
- pub fn success(&self) -> bool {
- self.code == (StatusCode::Success as u32)
- }
-
- pub fn error(&self) -> Option<&String> {
- self.error.as_ref()
- }
-
- pub fn output(&self) -> &[JsonOutput] {
- &self.output
- }
-
- pub fn execution_time_ms(&self) -> Option<u64> {
- self.execution_time_ms
- }
-}
-
/// It allows the results of SQL queries to be presented in different formats.
-/// Currently, `greptimedb_v1` and `influxdb_v1` are supported.
-#[derive(Debug, Clone, Copy, PartialEq, Eq)]
+#[derive(Default, Debug, Clone, Copy, PartialEq, Eq)]
pub enum ResponseFormat {
+ Csv,
+ #[default]
GreptimedbV1,
InfluxdbV1,
}
@@ -369,11 +261,20 @@ pub enum ResponseFormat {
impl ResponseFormat {
pub fn parse(s: &str) -> Option<Self> {
match s {
+ "csv" => Some(ResponseFormat::Csv),
"greptimedb_v1" => Some(ResponseFormat::GreptimedbV1),
"influxdb_v1" => Some(ResponseFormat::InfluxdbV1),
_ => None,
}
}
+
+ pub fn as_str(&self) -> &'static str {
+ match self {
+ ResponseFormat::Csv => "csv",
+ ResponseFormat::GreptimedbV1 => "greptimedb_v1",
+ ResponseFormat::InfluxdbV1 => "influxdb_v1",
+ }
+ }
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
@@ -421,67 +322,60 @@ impl Display for Epoch {
}
#[derive(Serialize, Deserialize, Debug, JsonSchema)]
-#[serde(tag = "type")]
-pub enum JsonResponse {
+pub enum HttpResponse {
+ Csv(CsvResponse),
+ Error(ErrorResponse),
GreptimedbV1(GreptimedbV1Response),
InfluxdbV1(InfluxdbV1Response),
}
-impl From<GreptimedbV1Response> for JsonResponse {
- fn from(value: GreptimedbV1Response) -> Self {
- JsonResponse::GreptimedbV1(value)
+impl HttpResponse {
+ pub fn with_execution_time(self, execution_time: u64) -> Self {
+ match self {
+ HttpResponse::Csv(resp) => resp.with_execution_time(execution_time).into(),
+ HttpResponse::GreptimedbV1(resp) => resp.with_execution_time(execution_time).into(),
+ HttpResponse::InfluxdbV1(resp) => resp.with_execution_time(execution_time).into(),
+ HttpResponse::Error(resp) => resp.with_execution_time(execution_time).into(),
+ }
}
}
-impl From<InfluxdbV1Response> for JsonResponse {
- fn from(value: InfluxdbV1Response) -> Self {
- JsonResponse::InfluxdbV1(value)
+impl IntoResponse for HttpResponse {
+ fn into_response(self) -> Response {
+ match self {
+ HttpResponse::Csv(resp) => resp.into_response(),
+ HttpResponse::GreptimedbV1(resp) => resp.into_response(),
+ HttpResponse::InfluxdbV1(resp) => resp.into_response(),
+ HttpResponse::Error(resp) => resp.into_response(),
+ }
}
}
-impl JsonResponse {
- pub fn with_error(error: impl ErrorExt, response_format: ResponseFormat) -> Self {
- match response_format {
- ResponseFormat::GreptimedbV1 => GreptimedbV1Response::with_error(error).into(),
- ResponseFormat::InfluxdbV1 => InfluxdbV1Response::with_error(error).into(),
- }
+impl OperationOutput for HttpResponse {
+ type Inner = Response;
+}
+
+impl From<CsvResponse> for HttpResponse {
+ fn from(value: CsvResponse) -> Self {
+ HttpResponse::Csv(value)
}
+}
- pub fn with_error_message(
- err_msg: String,
- error_code: StatusCode,
- response_format: ResponseFormat,
- ) -> Self {
- match response_format {
- ResponseFormat::GreptimedbV1 => {
- GreptimedbV1Response::with_error_message(err_msg, error_code).into()
- }
- ResponseFormat::InfluxdbV1 => InfluxdbV1Response::with_error_message(err_msg).into(),
- }
+impl From<ErrorResponse> for HttpResponse {
+ fn from(value: ErrorResponse) -> Self {
+ HttpResponse::Error(value)
}
- pub async fn from_output(
- outputs: Vec<Result<Output>>,
- response_format: ResponseFormat,
- epoch: Option<Epoch>,
- ) -> Self {
- match response_format {
- ResponseFormat::GreptimedbV1 => GreptimedbV1Response::from_output(outputs).await.into(),
- ResponseFormat::InfluxdbV1 => {
- InfluxdbV1Response::from_output(outputs, epoch).await.into()
- }
- }
+}
+
+impl From<GreptimedbV1Response> for HttpResponse {
+ fn from(value: GreptimedbV1Response) -> Self {
+ HttpResponse::GreptimedbV1(value)
}
+}
- fn with_execution_time(mut self, execution_time: u128) -> Self {
- match &mut self {
- JsonResponse::GreptimedbV1(resp) => {
- resp.with_execution_time(execution_time as u64);
- }
- JsonResponse::InfluxdbV1(resp) => {
- resp.with_execution_time(execution_time as u64);
- }
- }
- self
+impl From<InfluxdbV1Response> for HttpResponse {
+ fn from(value: InfluxdbV1Response) -> Self {
+ HttpResponse::InfluxdbV1(value)
}
}
@@ -900,14 +794,13 @@ impl Server for HttpServer {
}
/// handle error middleware
-async fn handle_error(err: BoxError) -> Json<JsonResponse> {
+async fn handle_error(err: BoxError) -> Json<HttpResponse> {
error!(err; "Unhandled internal error");
-
- Json(JsonResponse::with_error_message(
- format!("Unhandled internal error: {err}"),
- StatusCode::Unexpected,
+ Json(HttpResponse::Error(ErrorResponse::from_error_message(
ResponseFormat::GreptimedbV1,
- ))
+ StatusCode::Unexpected,
+ format!("Unhandled internal error: {err}"),
+ )))
}
#[cfg(test)]
@@ -920,6 +813,7 @@ mod test {
use axum::http::StatusCode;
use axum::routing::get;
use axum_test_helper::TestClient;
+ use common_query::Output;
use common_recordbatch::RecordBatches;
use datatypes::prelude::*;
use datatypes::schema::{ColumnSchema, Schema};
@@ -1051,20 +945,24 @@ mod test {
];
let recordbatch = RecordBatch::new(schema.clone(), columns).unwrap();
- for format in [ResponseFormat::GreptimedbV1, ResponseFormat::InfluxdbV1] {
+ for format in [
+ ResponseFormat::GreptimedbV1,
+ ResponseFormat::InfluxdbV1,
+ ResponseFormat::Csv,
+ ] {
let recordbatches =
RecordBatches::try_new(schema.clone(), vec![recordbatch.clone()]).unwrap();
- let json_resp = JsonResponse::from_output(
- vec![Ok(Output::RecordBatches(recordbatches))],
- format,
- None,
- )
- .await;
+ let outputs = vec![Ok(Output::RecordBatches(recordbatches))];
+ let json_resp = match format {
+ ResponseFormat::Csv => CsvResponse::from_output(outputs).await,
+ ResponseFormat::GreptimedbV1 => GreptimedbV1Response::from_output(outputs).await,
+ ResponseFormat::InfluxdbV1 => InfluxdbV1Response::from_output(outputs, None).await,
+ };
match json_resp {
- JsonResponse::GreptimedbV1(json_resp) => {
- let json_output = &json_resp.output[0];
- if let JsonOutput::Records(r) = json_output {
+ HttpResponse::GreptimedbV1(resp) => {
+ let json_output = &resp.output[0];
+ if let GreptimeQueryOutput::Records(r) = json_output {
assert_eq!(r.num_rows(), 4);
assert_eq!(r.num_cols(), 2);
let schema = r.schema.as_ref().unwrap();
@@ -1076,8 +974,8 @@ mod test {
panic!("invalid output type");
}
}
- JsonResponse::InfluxdbV1(json_resp) => {
- let json_output = &json_resp.results()[0];
+ HttpResponse::InfluxdbV1(resp) => {
+ let json_output = &resp.results()[0];
assert_eq!(json_output.num_rows(), 4);
assert_eq!(json_output.num_cols(), 2);
assert_eq!(json_output.series[0].columns.clone()[0], "numbers");
@@ -1087,6 +985,21 @@ mod test {
);
assert_eq!(json_output.series[0].values[0][1], serde_json::Value::Null);
}
+ HttpResponse::Csv(resp) => {
+ let output = &resp.output()[0];
+ if let GreptimeQueryOutput::Records(r) = output {
+ assert_eq!(r.num_rows(), 4);
+ assert_eq!(r.num_cols(), 2);
+ let schema = r.schema.as_ref().unwrap();
+ assert_eq!(schema.column_schemas[0].name, "numbers");
+ assert_eq!(schema.column_schemas[0].data_type, "UInt32");
+ assert_eq!(r.rows[0][0], serde_json::Value::from(1));
+ assert_eq!(r.rows[0][1], serde_json::Value::Null);
+ } else {
+ panic!("invalid output type");
+ }
+ }
+ HttpResponse::Error(err) => unreachable!("{err:?}"),
}
}
}
diff --git a/src/servers/src/http/authorize.rs b/src/servers/src/http/authorize.rs
index 51040ba4899a..0d5908ea6b7c 100644
--- a/src/servers/src/http/authorize.rs
+++ b/src/servers/src/http/authorize.rs
@@ -17,7 +17,6 @@ use axum::extract::State;
use axum::http::{self, Request, StatusCode};
use axum::middleware::Next;
use axum::response::{IntoResponse, Response};
-use axum::Json;
use base64::prelude::BASE64_STANDARD;
use base64::Engine;
use common_catalog::consts::DEFAULT_SCHEMA_NAME;
@@ -30,11 +29,12 @@ use session::context::QueryContext;
use snafu::{ensure, OptionExt, ResultExt};
use super::header::GreptimeDbName;
-use super::{JsonResponse, ResponseFormat, PUBLIC_APIS};
+use super::{ResponseFormat, PUBLIC_APIS};
use crate::error::{
self, InvalidAuthorizationHeaderSnafu, InvalidParameterSnafu, InvisibleASCIISnafu,
NotFoundInfluxAuthSnafu, Result, UnsupportedAuthSchemeSnafu, UrlDecodeSnafu,
};
+use crate::http::error_result::ErrorResponse;
use crate::http::HTTP_API_PREFIX;
/// AuthState is a holder state for [`UserProviderRef`]
@@ -118,14 +118,12 @@ pub async fn check_http_auth<B>(
}
fn err_response(is_influxdb: bool, err: impl ErrorExt) -> impl IntoResponse {
- let format = if is_influxdb {
+ let ty = if is_influxdb {
ResponseFormat::InfluxdbV1
} else {
ResponseFormat::GreptimedbV1
};
-
- let body = JsonResponse::with_error(err, format);
- (StatusCode::UNAUTHORIZED, Json(body))
+ (StatusCode::UNAUTHORIZED, ErrorResponse::from_error(ty, err))
}
fn extract_catalog_and_schema<B>(request: &Request<B>) -> (&str, &str) {
diff --git a/src/servers/src/http/csv_result.rs b/src/servers/src/http/csv_result.rs
new file mode 100644
index 000000000000..7c26d055da77
--- /dev/null
+++ b/src/servers/src/http/csv_result.rs
@@ -0,0 +1,111 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::fmt::Write;
+
+use axum::http::{header, HeaderValue};
+use axum::response::{IntoResponse, Response};
+use common_error::status_code::StatusCode;
+use common_query::Output;
+use itertools::Itertools;
+use mime_guess::mime;
+use schemars::JsonSchema;
+use serde::{Deserialize, Serialize};
+
+use crate::http::error_result::ErrorResponse;
+use crate::http::header::{GREPTIME_DB_HEADER_EXECUTION_TIME, GREPTIME_DB_HEADER_FORMAT};
+use crate::http::{handler, GreptimeQueryOutput, HttpResponse, ResponseFormat};
+
+#[derive(Serialize, Deserialize, Debug, JsonSchema)]
+pub struct CsvResponse {
+ output: Vec<GreptimeQueryOutput>,
+ execution_time_ms: u64,
+}
+
+impl CsvResponse {
+ pub async fn from_output(outputs: Vec<crate::error::Result<Output>>) -> HttpResponse {
+ match handler::from_output(ResponseFormat::Csv, outputs).await {
+ Err(err) => HttpResponse::Error(err),
+ Ok(output) => {
+ if output.len() > 1 {
+ HttpResponse::Error(ErrorResponse::from_error_message(
+ ResponseFormat::Csv,
+ StatusCode::InvalidArguments,
+ "Multi-statements are not allowed".to_string(),
+ ))
+ } else {
+ HttpResponse::Csv(CsvResponse {
+ output,
+ execution_time_ms: 0,
+ })
+ }
+ }
+ }
+ }
+
+ pub fn output(&self) -> &[GreptimeQueryOutput] {
+ &self.output
+ }
+
+ pub fn with_execution_time(mut self, execution_time: u64) -> Self {
+ self.execution_time_ms = execution_time;
+ self
+ }
+
+ pub fn execution_time_ms(&self) -> u64 {
+ self.execution_time_ms
+ }
+}
+
+impl IntoResponse for CsvResponse {
+ fn into_response(mut self) -> Response {
+ debug_assert!(
+ self.output.len() <= 1,
+ "self.output has extra elements: {}",
+ self.output.len()
+ );
+
+ let execution_time = self.execution_time_ms;
+ let payload = match self.output.pop() {
+ None => "".to_string(),
+ Some(GreptimeQueryOutput::AffectedRows(n)) => {
+ format!("{n}\n")
+ }
+ Some(GreptimeQueryOutput::Records(records)) => {
+ let mut result = String::new();
+ for row in records.rows {
+ let row = row.iter().map(|v| v.to_string()).join(",");
+ writeln!(result, "{row}").unwrap();
+ }
+ result
+ }
+ };
+
+ let mut resp = (
+ [(
+ header::CONTENT_TYPE,
+ HeaderValue::from_static(mime::TEXT_CSV_UTF_8.as_ref()),
+ )],
+ payload,
+ )
+ .into_response();
+ resp.headers_mut()
+ .insert(GREPTIME_DB_HEADER_FORMAT, HeaderValue::from_static("CSV"));
+ resp.headers_mut().insert(
+ GREPTIME_DB_HEADER_EXECUTION_TIME,
+ HeaderValue::from(execution_time),
+ );
+ resp
+ }
+}
diff --git a/src/servers/src/http/error_result.rs b/src/servers/src/http/error_result.rs
new file mode 100644
index 000000000000..629594e66456
--- /dev/null
+++ b/src/servers/src/http/error_result.rs
@@ -0,0 +1,98 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use axum::http::HeaderValue;
+use axum::response::{IntoResponse, Response};
+use axum::Json;
+use common_error::ext::ErrorExt;
+use common_error::status_code::StatusCode;
+use common_error::{GREPTIME_DB_HEADER_ERROR_CODE, GREPTIME_DB_HEADER_ERROR_MSG};
+use common_telemetry::logging::{debug, error};
+use schemars::JsonSchema;
+use serde::{Deserialize, Serialize};
+
+use crate::http::header::{GREPTIME_DB_HEADER_EXECUTION_TIME, GREPTIME_DB_HEADER_FORMAT};
+use crate::http::ResponseFormat;
+
+#[derive(Serialize, Deserialize, Debug, JsonSchema)]
+pub struct ErrorResponse {
+ #[serde(skip)]
+ ty: ResponseFormat,
+ code: u32,
+ error: String,
+ execution_time_ms: u64,
+}
+
+impl ErrorResponse {
+ pub fn from_error(ty: ResponseFormat, error: impl ErrorExt) -> Self {
+ let code = error.status_code();
+
+ if code.should_log_error() {
+ error!(error; "Failed to handle HTTP request");
+ } else {
+ debug!("Failed to handle HTTP request, err: {:?}", error);
+ }
+
+ Self::from_error_message(ty, code, error.output_msg())
+ }
+
+ pub fn from_error_message(ty: ResponseFormat, code: StatusCode, msg: String) -> Self {
+ ErrorResponse {
+ ty,
+ code: code as u32,
+ error: msg,
+ execution_time_ms: 0,
+ }
+ }
+
+ pub fn with_execution_time(mut self, execution_time: u64) -> Self {
+ self.execution_time_ms = execution_time;
+ self
+ }
+
+ pub fn execution_time_ms(&self) -> u64 {
+ self.execution_time_ms
+ }
+
+ pub fn code(&self) -> u32 {
+ self.code
+ }
+
+ pub fn error(&self) -> &str {
+ &self.error
+ }
+}
+
+impl IntoResponse for ErrorResponse {
+ fn into_response(self) -> Response {
+ let ty = self.ty.as_str();
+ let code = self.code;
+ let msg = self.error.clone();
+ let execution_time = self.execution_time_ms;
+ let mut resp = Json(self).into_response();
+ resp.headers_mut()
+ .insert(GREPTIME_DB_HEADER_ERROR_CODE, HeaderValue::from(code));
+ resp.headers_mut().insert(
+ GREPTIME_DB_HEADER_ERROR_MSG,
+ HeaderValue::from_str(&msg).expect("malformed error msg"),
+ );
+ resp.headers_mut()
+ .insert(GREPTIME_DB_HEADER_FORMAT, HeaderValue::from_static(ty));
+ resp.headers_mut().insert(
+ GREPTIME_DB_HEADER_EXECUTION_TIME,
+ HeaderValue::from(execution_time),
+ );
+ resp
+ }
+}
diff --git a/src/servers/src/http/greptime_result_v1.rs b/src/servers/src/http/greptime_result_v1.rs
new file mode 100644
index 000000000000..53e16948b7b5
--- /dev/null
+++ b/src/servers/src/http/greptime_result_v1.rs
@@ -0,0 +1,71 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use axum::response::{IntoResponse, Response};
+use axum::Json;
+use common_query::Output;
+use reqwest::header::HeaderValue;
+use schemars::JsonSchema;
+use serde::{Deserialize, Serialize};
+
+use crate::http::header::{GREPTIME_DB_HEADER_EXECUTION_TIME, GREPTIME_DB_HEADER_FORMAT};
+use crate::http::{handler, GreptimeQueryOutput, HttpResponse, ResponseFormat};
+
+#[derive(Serialize, Deserialize, Debug, JsonSchema)]
+pub struct GreptimedbV1Response {
+ #[serde(skip_serializing_if = "Vec::is_empty", default)]
+ pub(crate) output: Vec<GreptimeQueryOutput>,
+ pub(crate) execution_time_ms: u64,
+}
+
+impl GreptimedbV1Response {
+ pub async fn from_output(outputs: Vec<crate::error::Result<Output>>) -> HttpResponse {
+ match handler::from_output(ResponseFormat::GreptimedbV1, outputs).await {
+ Ok(output) => HttpResponse::GreptimedbV1(Self {
+ output,
+ execution_time_ms: 0,
+ }),
+ Err(err) => HttpResponse::Error(err),
+ }
+ }
+
+ pub fn output(&self) -> &[GreptimeQueryOutput] {
+ &self.output
+ }
+
+ pub fn with_execution_time(mut self, execution_time: u64) -> Self {
+ self.execution_time_ms = execution_time;
+ self
+ }
+
+ pub fn execution_time_ms(&self) -> u64 {
+ self.execution_time_ms
+ }
+}
+
+impl IntoResponse for GreptimedbV1Response {
+ fn into_response(self) -> Response {
+ let execution_time = self.execution_time_ms;
+ let mut resp = Json(self).into_response();
+ resp.headers_mut().insert(
+ GREPTIME_DB_HEADER_FORMAT,
+ HeaderValue::from_static("greptimedb_v1"),
+ );
+ resp.headers_mut().insert(
+ GREPTIME_DB_HEADER_EXECUTION_TIME,
+ HeaderValue::from(execution_time),
+ );
+ resp
+ }
+}
diff --git a/src/servers/src/http/handler.rs b/src/servers/src/http/handler.rs
index f615e520cb98..3bcea4595d7b 100644
--- a/src/servers/src/http/handler.rs
+++ b/src/servers/src/http/handler.rs
@@ -22,12 +22,21 @@ use axum::response::{IntoResponse, Response};
use axum::{Extension, Form};
use common_error::ext::ErrorExt;
use common_error::status_code::StatusCode;
+use common_query::Output;
+use common_recordbatch::util;
use query::parser::PromQuery;
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use session::context::QueryContextRef;
-use crate::http::{ApiState, Epoch, GreptimeOptionsConfigState, JsonResponse, ResponseFormat};
+use crate::http::csv_result::CsvResponse;
+use crate::http::error_result::ErrorResponse;
+use crate::http::greptime_result_v1::GreptimedbV1Response;
+use crate::http::influxdb_result_v1::InfluxdbV1Response;
+use crate::http::{
+ ApiState, Epoch, GreptimeOptionsConfigState, GreptimeQueryOutput, HttpRecordsOutput,
+ HttpResponse, ResponseFormat,
+};
use crate::metrics_handler::MetricsHandler;
use crate::query_handler::sql::ServerSqlQueryHandlerRef;
@@ -35,7 +44,7 @@ use crate::query_handler::sql::ServerSqlQueryHandlerRef;
pub struct SqlQuery {
pub db: Option<String>,
pub sql: Option<String>,
- // (Optional) result format: [`gerptimedb_v1`, `influxdb_v1`],
+ // (Optional) result format: [`greptimedb_v1`, `influxdb_v1`, `csv`],
// the default value is `greptimedb_v1`
pub format: Option<String>,
// Returns epoch timestamps with the specified precision.
@@ -56,7 +65,7 @@ pub async fn sql(
Query(query_params): Query<SqlQuery>,
Extension(query_ctx): Extension<QueryContextRef>,
Form(form_params): Form<SqlQuery>,
-) -> Json<JsonResponse> {
+) -> HttpResponse {
let sql_handler = &state.sql_handler;
let start = Instant::now();
@@ -78,21 +87,82 @@ pub async fn sql(
.with_label_values(&[db.as_str()])
.start_timer();
- let resp = if let Some(sql) = &sql {
- if let Some(resp) = validate_schema(sql_handler.clone(), query_ctx.clone(), format).await {
- return Json(resp);
+ let result = if let Some(sql) = &sql {
+ if let Some((status, msg)) = validate_schema(sql_handler.clone(), query_ctx.clone()).await {
+ Err((status, msg))
+ } else {
+ Ok(sql_handler.do_query(sql, query_ctx).await)
}
-
- JsonResponse::from_output(sql_handler.do_query(sql, query_ctx).await, format, epoch).await
} else {
- JsonResponse::with_error_message(
- "sql parameter is required.".to_string(),
+ Err((
StatusCode::InvalidArguments,
- format,
- )
+ "sql parameter is required.".to_string(),
+ ))
+ };
+
+ let outputs = match result {
+ Err((status, msg)) => {
+ return HttpResponse::Error(
+ ErrorResponse::from_error_message(format, status, msg)
+ .with_execution_time(start.elapsed().as_millis() as u64),
+ );
+ }
+ Ok(outputs) => outputs,
};
- Json(resp.with_execution_time(start.elapsed().as_millis()))
+ let resp = match format {
+ ResponseFormat::Csv => CsvResponse::from_output(outputs).await,
+ ResponseFormat::GreptimedbV1 => GreptimedbV1Response::from_output(outputs).await,
+ ResponseFormat::InfluxdbV1 => InfluxdbV1Response::from_output(outputs, epoch).await,
+ };
+
+ resp.with_execution_time(start.elapsed().as_millis() as u64)
+}
+
+/// Create a response from query result
+pub async fn from_output(
+ ty: ResponseFormat,
+ outputs: Vec<crate::error::Result<Output>>,
+) -> Result<Vec<GreptimeQueryOutput>, ErrorResponse> {
+ // TODO(sunng87): this api response structure cannot represent error well.
+ // It hides successful execution results from error response
+ let mut results = Vec::with_capacity(outputs.len());
+ for out in outputs {
+ match out {
+ Ok(Output::AffectedRows(rows)) => {
+ results.push(GreptimeQueryOutput::AffectedRows(rows));
+ }
+ Ok(Output::Stream(stream)) => {
+ // TODO(sunng87): streaming response
+ match util::collect(stream).await {
+ Ok(rows) => match HttpRecordsOutput::try_from(rows) {
+ Ok(rows) => {
+ results.push(GreptimeQueryOutput::Records(rows));
+ }
+ Err(err) => {
+ return Err(ErrorResponse::from_error(ty, err));
+ }
+ },
+ Err(err) => {
+ return Err(ErrorResponse::from_error(ty, err));
+ }
+ }
+ }
+ Ok(Output::RecordBatches(rbs)) => match HttpRecordsOutput::try_from(rbs.take()) {
+ Ok(rows) => {
+ results.push(GreptimeQueryOutput::Records(rows));
+ }
+ Err(err) => {
+ return Err(ErrorResponse::from_error(ty, err));
+ }
+ },
+ Err(err) => {
+ return Err(ErrorResponse::from_error(ty, err));
+ }
+ }
+ }
+
+ Ok(results)
}
#[derive(Debug, Default, Serialize, Deserialize, JsonSchema)]
@@ -121,7 +191,7 @@ pub async fn promql(
State(state): State<ApiState>,
Query(params): Query<PromqlQuery>,
Extension(query_ctx): Extension<QueryContextRef>,
-) -> Json<JsonResponse> {
+) -> Response {
let sql_handler = &state.sql_handler;
let exec_start = Instant::now();
let db = query_ctx.get_db_string();
@@ -129,29 +199,23 @@ pub async fn promql(
.with_label_values(&[db.as_str()])
.start_timer();
- if let Some(resp) = validate_schema(
- sql_handler.clone(),
- query_ctx.clone(),
- ResponseFormat::GreptimedbV1,
- )
- .await
+ let resp = if let Some((status, msg)) =
+ validate_schema(sql_handler.clone(), query_ctx.clone()).await
{
- return Json(resp);
- }
-
- let prom_query = params.into();
- let resp = JsonResponse::from_output(
- sql_handler.do_promql_query(&prom_query, query_ctx).await,
- ResponseFormat::GreptimedbV1,
- None,
- )
- .await;
+ let resp = ErrorResponse::from_error_message(ResponseFormat::GreptimedbV1, status, msg);
+ HttpResponse::Error(resp)
+ } else {
+ let prom_query = params.into();
+ let outputs = sql_handler.do_promql_query(&prom_query, query_ctx).await;
+ GreptimedbV1Response::from_output(outputs).await
+ };
- Json(resp.with_execution_time(exec_start.elapsed().as_millis()))
+ resp.with_execution_time(exec_start.elapsed().as_millis() as u64)
+ .into_response()
}
pub(crate) fn sql_docs(op: TransformOperation) -> TransformOperation {
- op.response::<200, Json<JsonResponse>>()
+ op.response::<200, Json<HttpResponse>>()
}
/// Handler to export metrics
@@ -222,26 +286,23 @@ pub async fn config(State(state): State<GreptimeOptionsConfigState>) -> Response
async fn validate_schema(
sql_handler: ServerSqlQueryHandlerRef,
query_ctx: QueryContextRef,
- format: ResponseFormat,
-) -> Option<JsonResponse> {
+) -> Option<(StatusCode, String)> {
match sql_handler
.is_valid_schema(query_ctx.current_catalog(), query_ctx.current_schema())
.await
{
- Ok(false) => Some(JsonResponse::with_error_message(
- format!("Database not found: {}", query_ctx.get_db_string()),
+ Ok(true) => None,
+ Ok(false) => Some((
StatusCode::DatabaseNotFound,
- format,
+ format!("Database not found: {}", query_ctx.get_db_string()),
)),
- Err(e) => Some(JsonResponse::with_error_message(
+ Err(e) => Some((
+ StatusCode::Internal,
format!(
"Error checking database: {}, {}",
query_ctx.get_db_string(),
e.output_msg(),
),
- StatusCode::Internal,
- format,
)),
- _ => None,
}
}
diff --git a/src/servers/src/http/header.rs b/src/servers/src/http/header.rs
index 04db66955030..40bcfabf4bae 100644
--- a/src/servers/src/http/header.rs
+++ b/src/servers/src/http/header.rs
@@ -14,13 +14,16 @@
use headers::{Header, HeaderName, HeaderValue};
-pub static GREPTIME_DB_NAME_HEADER_NAME: HeaderName = HeaderName::from_static("x-greptime-db-name");
+pub const GREPTIME_DB_HEADER_FORMAT: &str = "x-greptime-format";
+pub const GREPTIME_DB_HEADER_EXECUTION_TIME: &str = "x-greptime-execution-time";
+
+pub static GREPTIME_DB_HEADER_NAME: HeaderName = HeaderName::from_static("x-greptime-name");
pub struct GreptimeDbName(Option<String>);
impl Header for GreptimeDbName {
fn name() -> &'static HeaderName {
- &GREPTIME_DB_NAME_HEADER_NAME
+ &GREPTIME_DB_HEADER_NAME
}
fn decode<'i, I>(values: &mut I) -> Result<Self, headers::Error>
diff --git a/src/servers/src/http/influxdb_result_v1.rs b/src/servers/src/http/influxdb_result_v1.rs
index 0b8c184ce738..cfcc8f1f94f8 100644
--- a/src/servers/src/http/influxdb_result_v1.rs
+++ b/src/servers/src/http/influxdb_result_v1.rs
@@ -12,17 +12,21 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+use axum::http::HeaderValue;
+use axum::response::{IntoResponse, Response};
+use axum::Json;
use common_error::ext::ErrorExt;
use common_query::Output;
use common_recordbatch::{util, RecordBatch};
-use common_telemetry::{debug, error};
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use serde_json::Value;
use snafu::ResultExt;
use crate::error::{Error, ToJsonSnafu};
-use crate::http::Epoch;
+use crate::http::error_result::ErrorResponse;
+use crate::http::header::{GREPTIME_DB_HEADER_EXECUTION_TIME, GREPTIME_DB_HEADER_FORMAT};
+use crate::http::{Epoch, HttpResponse, ResponseFormat};
#[derive(Debug, Default, Serialize, Deserialize, JsonSchema)]
pub struct SqlQuery {
@@ -125,55 +129,26 @@ impl InfluxdbOutput {
#[derive(Serialize, Deserialize, Debug, JsonSchema)]
pub struct InfluxdbV1Response {
results: Vec<InfluxdbOutput>,
- #[serde(skip_serializing_if = "Option::is_none")]
- error: Option<String>,
- #[serde(skip_serializing_if = "Option::is_none")]
- execution_time_ms: Option<u64>,
+ execution_time_ms: u64,
}
impl InfluxdbV1Response {
- pub fn with_error(error: impl ErrorExt) -> Self {
- let code = error.status_code();
- if code.should_log_error() {
- error!(error; "Failed to handle HTTP request");
- } else {
- debug!("Failed to handle HTTP request, err: {:?}", error);
- }
-
- InfluxdbV1Response {
- results: vec![],
- error: Some(error.output_msg()),
- execution_time_ms: None,
- }
- }
-
- pub fn with_error_message(err_msg: String) -> Self {
- InfluxdbV1Response {
- results: vec![],
- error: Some(err_msg),
- execution_time_ms: None,
- }
- }
-
- fn with_output(results: Vec<InfluxdbOutput>) -> Self {
- InfluxdbV1Response {
- results,
- error: None,
- execution_time_ms: None,
- }
- }
-
- pub fn with_execution_time(&mut self, execution_time: u64) {
- self.execution_time_ms = Some(execution_time);
+ pub fn with_execution_time(mut self, execution_time: u64) -> Self {
+ self.execution_time_ms = execution_time;
+ self
}
/// Create a influxdb v1 response from query result
pub async fn from_output(
outputs: Vec<crate::error::Result<Output>>,
epoch: Option<Epoch>,
- ) -> Self {
- // TODO(sunng87): this api response structure cannot represent error
- // well. It hides successful execution results from error response
+ ) -> HttpResponse {
+ fn make_error_response(error: impl ErrorExt) -> HttpResponse {
+ HttpResponse::Error(ErrorResponse::from_error(ResponseFormat::InfluxdbV1, error))
+ }
+
+ // TODO(sunng87): this api response structure cannot represent error well.
+ // It hides successful execution results from error response
let mut results = Vec::with_capacity(outputs.len());
for (statement_id, out) in outputs.into_iter().enumerate() {
let statement_id = statement_id as u32;
@@ -195,12 +170,11 @@ impl InfluxdbV1Response {
});
}
Err(err) => {
- return Self::with_error(err);
+ return make_error_response(err);
}
},
-
- Err(e) => {
- return Self::with_error(e);
+ Err(err) => {
+ return make_error_response(err);
}
}
}
@@ -213,31 +187,43 @@ impl InfluxdbV1Response {
});
}
Err(err) => {
- return Self::with_error(err);
+ return make_error_response(err);
}
}
}
- Err(e) => {
- return Self::with_error(e);
+ Err(err) => {
+ return make_error_response(err);
}
}
}
- Self::with_output(results)
- }
- pub fn success(&self) -> bool {
- self.error.is_none()
- }
-
- pub fn error(&self) -> Option<&String> {
- self.error.as_ref()
+ HttpResponse::InfluxdbV1(InfluxdbV1Response {
+ results,
+ execution_time_ms: 0,
+ })
}
pub fn results(&self) -> &[InfluxdbOutput] {
&self.results
}
- pub fn execution_time_ms(&self) -> Option<u64> {
+ pub fn execution_time_ms(&self) -> u64 {
self.execution_time_ms
}
}
+
+impl IntoResponse for InfluxdbV1Response {
+ fn into_response(self) -> Response {
+ let execution_time = self.execution_time_ms;
+ let mut resp = Json(self).into_response();
+ resp.headers_mut().insert(
+ GREPTIME_DB_HEADER_FORMAT,
+ HeaderValue::from_static("influxdb_v1"),
+ );
+ resp.headers_mut().insert(
+ GREPTIME_DB_HEADER_EXECUTION_TIME,
+ HeaderValue::from(execution_time),
+ );
+ resp
+ }
+}
diff --git a/src/servers/src/http/script.rs b/src/servers/src/http/script.rs
index 741cc13171fc..445bc380cf61 100644
--- a/src/servers/src/http/script.rs
+++ b/src/servers/src/http/script.rs
@@ -15,7 +15,7 @@
use std::collections::HashMap;
use std::time::Instant;
-use axum::extract::{Json, Query, RawBody, State};
+use axum::extract::{Query, RawBody, State};
use common_catalog::consts::DEFAULT_CATALOG_NAME;
use common_error::ext::ErrorExt;
use common_error::status_code::StatusCode;
@@ -25,18 +25,19 @@ use session::context::QueryContext;
use snafu::ResultExt;
use crate::error::{HyperSnafu, InvalidUtf8ValueSnafu};
-use crate::http::{ApiState, GreptimedbV1Response, JsonResponse, ResponseFormat};
+use crate::http::error_result::ErrorResponse;
+use crate::http::{ApiState, GreptimedbV1Response, HttpResponse, ResponseFormat};
macro_rules! json_err {
($e: expr) => {{
- return Json(JsonResponse::with_error($e, ResponseFormat::GreptimedbV1));
+ return HttpResponse::Error(ErrorResponse::from_error(ResponseFormat::GreptimedbV1, $e));
}};
($msg: expr, $code: expr) => {{
- return Json(JsonResponse::with_error_message(
- $msg.to_string(),
- $code,
+ return HttpResponse::Error(ErrorResponse::from_error_message(
ResponseFormat::GreptimedbV1,
+ $code,
+ $msg.to_string(),
));
}};
}
@@ -56,7 +57,7 @@ pub async fn scripts(
State(state): State<ApiState>,
Query(params): Query<ScriptQuery>,
RawBody(body): RawBody,
-) -> Json<JsonResponse> {
+) -> HttpResponse {
if let Some(script_handler) = &state.script_handler {
let catalog = params
.catalog
@@ -80,18 +81,16 @@ pub async fn scripts(
// Safety: schema and name are already checked above.
let query_ctx = QueryContext::with(&catalog, schema.unwrap());
- let body = match script_handler
+ match script_handler
.insert_script(query_ctx, name.unwrap(), &script)
.await
{
- Ok(()) => GreptimedbV1Response::with_output(vec![]).into(),
+ Ok(()) => GreptimedbV1Response::from_output(vec![]).await,
Err(e) => json_err!(
format!("Insert script error: {}", e.output_msg()),
e.status_code()
),
- };
-
- Json(body)
+ }
} else {
json_err!(
"Script execution not supported, missing script handler",
@@ -114,7 +113,7 @@ pub struct ScriptQuery {
pub async fn run_script(
State(state): State<ApiState>,
Query(params): Query<ScriptQuery>,
-) -> Json<JsonResponse> {
+) -> HttpResponse {
if let Some(script_handler) = &state.script_handler {
let catalog = params
.catalog
@@ -137,10 +136,8 @@ pub async fn run_script(
let output = script_handler
.execute_script(query_ctx, name.unwrap(), params.params)
.await;
- let resp =
- JsonResponse::from_output(vec![output], ResponseFormat::GreptimedbV1, None).await;
-
- Json(resp.with_execution_time(start.elapsed().as_millis()))
+ let resp = GreptimedbV1Response::from_output(vec![output]).await;
+ resp.with_execution_time(start.elapsed().as_millis() as u64)
} else {
json_err!(
"Script execution not supported, missing script handler",
diff --git a/src/servers/tests/http/authorize.rs b/src/servers/tests/http/authorize.rs
index 97f1c9e2e821..675c291de31a 100644
--- a/src/servers/tests/http/authorize.rs
+++ b/src/servers/tests/http/authorize.rs
@@ -49,7 +49,7 @@ async fn test_http_auth() {
let mut resp = auth_res.unwrap_err();
assert_eq!(resp.status(), StatusCode::UNAUTHORIZED);
assert_eq!(
- b"{\"type\":\"GreptimedbV1\",\"code\":7003,\"error\":\"Not found http or grpc authorization header\"}",
+ b"{\"code\":7003,\"error\":\"Not found http or grpc authorization header\",\"execution_time_ms\":0}",
resp.data().await.unwrap().unwrap().as_ref()
);
@@ -60,7 +60,7 @@ async fn test_http_auth() {
let mut resp = auth_res.unwrap_err();
assert_eq!(resp.status(), StatusCode::UNAUTHORIZED);
assert_eq!(
- b"{\"type\":\"GreptimedbV1\",\"code\":7000,\"error\":\"User not found, username: username\"}",
+ b"{\"code\":7000,\"error\":\"User not found, username: username\",\"execution_time_ms\":0}",
resp.data().await.unwrap().unwrap().as_ref(),
);
}
@@ -95,7 +95,7 @@ async fn test_schema_validating() {
let mut resp = result.unwrap_err();
assert_eq!(resp.status(), StatusCode::UNAUTHORIZED);
assert_eq!(
- b"{\"type\":\"GreptimedbV1\",\"code\":7005,\"error\":\"Access denied for user 'greptime' to database 'greptime-wrong'\"}",
+ b"{\"code\":7005,\"error\":\"Access denied for user 'greptime' to database 'greptime-wrong'\",\"execution_time_ms\":0}",
resp.data().await.unwrap().unwrap().as_ref()
);
}
@@ -113,7 +113,7 @@ async fn test_whitelist_no_auth() {
let mut resp = auth_res.unwrap_err();
assert_eq!(resp.status(), StatusCode::UNAUTHORIZED);
assert_eq!(
- b"{\"type\":\"GreptimedbV1\",\"code\":7003,\"error\":\"Not found http or grpc authorization header\"}",
+ b"{\"code\":7003,\"error\":\"Not found http or grpc authorization header\",\"execution_time_ms\":0}",
resp.data().await.unwrap().unwrap().as_ref()
);
@@ -134,6 +134,5 @@ fn mock_http_request(
if let Some(auth_header) = auth_header {
req = req.header(http::header::AUTHORIZATION, auth_header);
}
-
Ok(req.body(()).unwrap())
}
diff --git a/src/servers/tests/http/http_handler_test.rs b/src/servers/tests/http/http_handler_test.rs
index e29953ed7553..afcd44c26296 100644
--- a/src/servers/tests/http/http_handler_test.rs
+++ b/src/servers/tests/http/http_handler_test.rs
@@ -16,12 +16,16 @@ use std::collections::HashMap;
use axum::body::{Body, Bytes};
use axum::extract::{Json, Query, RawBody, State};
+use axum::http::header;
+use axum::response::IntoResponse;
use axum::Form;
+use headers::HeaderValue;
use http_body::combinators::UnsyncBoxBody;
use hyper::Response;
+use mime_guess::mime;
use servers::http::{
handler as http_handler, script as script_handler, ApiState, GreptimeOptionsConfigState,
- JsonOutput, JsonResponse,
+ GreptimeQueryOutput, HttpResponse,
};
use servers::metrics_handler::MetricsHandler;
use session::context::QueryContext;
@@ -42,39 +46,26 @@ async fn test_sql_not_provided() {
script_handler: None,
};
- for format in ["greptimedb_v1", "influxdb_v1"] {
+ for format in ["greptimedb_v1", "influxdb_v1", "csv"] {
let query = http_handler::SqlQuery {
db: None,
sql: None,
format: Some(format.to_string()),
epoch: None,
};
- let Json(json) = http_handler::sql(
+
+ let HttpResponse::Error(resp) = http_handler::sql(
State(api_state.clone()),
Query(query),
axum::Extension(ctx.clone()),
Form(http_handler::SqlQuery::default()),
)
- .await;
+ .await
+ else {
+ unreachable!("must be error response")
+ };
- match json {
- JsonResponse::GreptimedbV1(resp) => {
- assert!(!resp.success());
- assert_eq!(
- Some(&"sql parameter is required.".to_string()),
- resp.error()
- );
- assert!(resp.output().is_empty());
- }
- JsonResponse::InfluxdbV1(resp) => {
- assert!(!resp.success());
- assert_eq!(
- Some(&"sql parameter is required.".to_string()),
- resp.error()
- );
- assert!(resp.results().is_empty());
- }
- }
+ assert_eq!("sql parameter is required.", resp.error());
}
}
@@ -91,9 +82,9 @@ async fn test_sql_output_rows() {
script_handler: None,
};
- for format in ["greptimedb_v1", "influxdb_v1"] {
+ for format in ["greptimedb_v1", "influxdb_v1", "csv"] {
let query = create_query(format);
- let Json(json) = http_handler::sql(
+ let json = http_handler::sql(
State(api_state.clone()),
query,
axum::Extension(ctx.clone()),
@@ -102,16 +93,13 @@ async fn test_sql_output_rows() {
.await;
match json {
- JsonResponse::GreptimedbV1(resp) => {
- assert!(resp.success(), "{resp:?}");
- assert!(resp.error().is_none());
- match &resp.output()[0] {
- JsonOutput::Records(records) => {
- assert_eq!(1, records.num_rows());
- let json = serde_json::to_string_pretty(&records).unwrap();
- assert_eq!(
- json,
- r#"{
+ HttpResponse::GreptimedbV1(resp) => match &resp.output()[0] {
+ GreptimeQueryOutput::Records(records) => {
+ assert_eq!(1, records.num_rows());
+ let json = serde_json::to_string_pretty(&records).unwrap();
+ assert_eq!(
+ json,
+ r#"{
"schema": {
"column_schemas": [
{
@@ -126,15 +114,11 @@ async fn test_sql_output_rows() {
]
]
}"#
- );
- }
- _ => unreachable!(),
+ );
}
- }
- JsonResponse::InfluxdbV1(resp) => {
- assert!(resp.success(), "{resp:?}");
- assert!(resp.error().is_none());
-
+ _ => unreachable!(),
+ },
+ HttpResponse::InfluxdbV1(resp) => {
let json = serde_json::to_string_pretty(&resp.results()).unwrap();
assert_eq!(
json,
@@ -158,6 +142,19 @@ async fn test_sql_output_rows() {
]"#
);
}
+ HttpResponse::Csv(resp) => {
+ use http_body::Body as HttpBody;
+ let mut resp = resp.into_response();
+ assert_eq!(
+ resp.headers().get(header::CONTENT_TYPE),
+ Some(HeaderValue::from_static(mime::TEXT_CSV_UTF_8.as_ref())).as_ref(),
+ );
+ assert_eq!(
+ resp.body_mut().data().await.unwrap().unwrap(),
+ hyper::body::Bytes::from_static(b"4950\n"),
+ );
+ }
+ _ => unreachable!(),
}
}
}
@@ -175,9 +172,9 @@ async fn test_sql_form() {
script_handler: None,
};
- for format in ["greptimedb_v1", "influxdb_v1"] {
+ for format in ["greptimedb_v1", "influxdb_v1", "csv"] {
let form = create_form(format);
- let Json(json) = http_handler::sql(
+ let json = http_handler::sql(
State(api_state.clone()),
Query(http_handler::SqlQuery::default()),
axum::Extension(ctx.clone()),
@@ -186,16 +183,13 @@ async fn test_sql_form() {
.await;
match json {
- JsonResponse::GreptimedbV1(resp) => {
- assert!(resp.success(), "{resp:?}");
- assert!(resp.error().is_none());
- match &resp.output()[0] {
- JsonOutput::Records(records) => {
- assert_eq!(1, records.num_rows());
- let json = serde_json::to_string_pretty(&records).unwrap();
- assert_eq!(
- json,
- r#"{
+ HttpResponse::GreptimedbV1(resp) => match &resp.output()[0] {
+ GreptimeQueryOutput::Records(records) => {
+ assert_eq!(1, records.num_rows());
+ let json = serde_json::to_string_pretty(&records).unwrap();
+ assert_eq!(
+ json,
+ r#"{
"schema": {
"column_schemas": [
{
@@ -210,15 +204,11 @@ async fn test_sql_form() {
]
]
}"#
- );
- }
- _ => unreachable!(),
+ );
}
- }
- JsonResponse::InfluxdbV1(resp) => {
- assert!(resp.success(), "{resp:?}");
- assert!(resp.error().is_none());
-
+ _ => unreachable!(),
+ },
+ HttpResponse::InfluxdbV1(resp) => {
let json = serde_json::to_string_pretty(&resp.results()).unwrap();
assert_eq!(
json,
@@ -242,6 +232,19 @@ async fn test_sql_form() {
]"#
);
}
+ HttpResponse::Csv(resp) => {
+ use http_body::Body as HttpBody;
+ let mut resp = resp.into_response();
+ assert_eq!(
+ resp.headers().get(header::CONTENT_TYPE),
+ Some(HeaderValue::from_static(mime::TEXT_CSV_UTF_8.as_ref())).as_ref(),
+ );
+ assert_eq!(
+ resp.body_mut().data().await.unwrap().unwrap(),
+ hyper::body::Bytes::from_static(b"4950\n"),
+ );
+ }
+ _ => unreachable!(),
}
}
}
@@ -255,7 +258,7 @@ lazy_static::lazy_static! {
async fn test_metrics() {
TEST_METRIC.inc();
let stats = MetricsHandler;
- let text = http_handler::metrics(axum::extract::State(stats), Query(HashMap::default())).await;
+ let text = http_handler::metrics(State(stats), Query(HashMap::default())).await;
assert!(text.contains("test_metrics counter"));
}
@@ -266,7 +269,7 @@ async fn insert_script(
) {
let body = RawBody(Body::from(script.clone()));
let invalid_query = create_invalid_script_query();
- let Json(json) = script_handler::scripts(
+ let json = script_handler::scripts(
State(ApiState {
sql_handler: sql_handler.clone(),
script_handler: Some(script_handler.clone()),
@@ -275,16 +278,15 @@ async fn insert_script(
body,
)
.await;
- let JsonResponse::GreptimedbV1(json) = json else {
+ let HttpResponse::Error(json) = json else {
unreachable!()
};
- assert!(!json.success(), "{json:?}");
- assert_eq!(json.error().unwrap(), "invalid schema");
+ assert_eq!(json.error(), "invalid schema");
let body = RawBody(Body::from(script.clone()));
let exec = create_script_query();
// Insert the script
- let Json(json) = script_handler::scripts(
+ let json = script_handler::scripts(
State(ApiState {
sql_handler: sql_handler.clone(),
script_handler: Some(script_handler.clone()),
@@ -293,11 +295,9 @@ async fn insert_script(
body,
)
.await;
- let JsonResponse::GreptimedbV1(json) = json else {
+ let HttpResponse::GreptimedbV1(json) = json else {
unreachable!()
};
- assert!(json.success(), "{json:?}");
- assert!(json.error().is_none());
assert!(json.output().is_empty());
}
@@ -317,7 +317,7 @@ def test(n) -> vector[i64]:
insert_script(script.clone(), script_handler.clone(), sql_handler.clone()).await;
// Run the script
let exec = create_script_query();
- let Json(json) = script_handler::run_script(
+ let json = script_handler::run_script(
State(ApiState {
sql_handler,
script_handler: Some(script_handler),
@@ -325,14 +325,11 @@ def test(n) -> vector[i64]:
exec,
)
.await;
- let JsonResponse::GreptimedbV1(json) = json else {
+ let HttpResponse::GreptimedbV1(json) = json else {
unreachable!()
};
- assert!(json.success(), "{json:?}");
- assert!(json.error().is_none());
-
match &json.output()[0] {
- JsonOutput::Records(records) => {
+ GreptimeQueryOutput::Records(records) => {
let json = serde_json::to_string_pretty(&records).unwrap();
assert_eq!(5, records.num_rows());
assert_eq!(
@@ -387,7 +384,7 @@ def test(n, **params) -> vector[i64]:
// Run the script
let mut exec = create_script_query();
let _ = exec.0.params.insert("a".to_string(), "42".to_string());
- let Json(json) = script_handler::run_script(
+ let json = script_handler::run_script(
State(ApiState {
sql_handler,
script_handler: Some(script_handler),
@@ -395,14 +392,11 @@ def test(n, **params) -> vector[i64]:
exec,
)
.await;
- let JsonResponse::GreptimedbV1(json) = json else {
+ let HttpResponse::GreptimedbV1(json) = json else {
unreachable!()
};
- assert!(json.success(), "{json:?}");
- assert!(json.error().is_none());
-
match &json.output()[0] {
- JsonOutput::Records(records) => {
+ GreptimeQueryOutput::Records(records) => {
let json = serde_json::to_string_pretty(&records).unwrap();
assert_eq!(5, records.num_rows());
assert_eq!(
diff --git a/src/servers/tests/http/influxdb_test.rs b/src/servers/tests/http/influxdb_test.rs
index 06a9193ce350..a1a0015fa65a 100644
--- a/src/servers/tests/http/influxdb_test.rs
+++ b/src/servers/tests/http/influxdb_test.rs
@@ -26,6 +26,7 @@ use query::parser::PromQuery;
use query::plan::LogicalPlan;
use query::query_engine::DescribeResult;
use servers::error::{Error, Result};
+use servers::http::header::GREPTIME_DB_HEADER_FORMAT;
use servers::http::{HttpOptions, HttpServerBuilder};
use servers::influxdb::InfluxdbRequest;
use servers::query_handler::grpc::GrpcQueryHandler;
@@ -169,7 +170,11 @@ async fn test_influxdb_write() {
.await;
assert_eq!(result.status(), 401);
assert_eq!(
- "{\"type\":\"InfluxdbV1\",\"results\":[],\"error\":\"Username and password does not match, username: greptime\"}",
+ result.headers().get(GREPTIME_DB_HEADER_FORMAT).unwrap(),
+ "influxdb_v1",
+ );
+ assert_eq!(
+ "{\"code\":7002,\"error\":\"Username and password does not match, username: greptime\",\"execution_time_ms\":0}",
result.text().await
);
@@ -181,7 +186,11 @@ async fn test_influxdb_write() {
.await;
assert_eq!(result.status(), 401);
assert_eq!(
- "{\"type\":\"InfluxdbV1\",\"results\":[],\"error\":\"Not found influx http authorization info\"}",
+ result.headers().get(GREPTIME_DB_HEADER_FORMAT).unwrap(),
+ "influxdb_v1",
+ );
+ assert_eq!(
+ "{\"code\":7003,\"error\":\"Not found influx http authorization info\",\"execution_time_ms\":0}",
result.text().await
);
diff --git a/tests-integration/tests/http.rs b/tests-integration/tests/http.rs
index 42843ac22e81..8495c0b0f00b 100644
--- a/tests-integration/tests/http.rs
+++ b/tests-integration/tests/http.rs
@@ -19,10 +19,12 @@ use axum::http::StatusCode;
use axum_test_helper::TestClient;
use common_error::status_code::StatusCode as ErrorCode;
use serde_json::json;
+use servers::http::error_result::ErrorResponse;
+use servers::http::greptime_result_v1::GreptimedbV1Response;
use servers::http::handler::HealthResponse;
-use servers::http::influxdb_result_v1::InfluxdbOutput;
+use servers::http::influxdb_result_v1::{InfluxdbOutput, InfluxdbV1Response};
use servers::http::prometheus::{PrometheusJsonResponse, PrometheusResponse};
-use servers::http::{JsonOutput, JsonResponse};
+use servers::http::GreptimeQueryOutput;
use tests_integration::test_util::{
setup_test_http_app, setup_test_http_app_with_frontend,
setup_test_http_app_with_frontend_and_user_provider, setup_test_prom_app_with_frontend,
@@ -123,13 +125,9 @@ pub async fn test_sql_api(store_type: StorageType) {
let res = client.get("/v1/sql").send().await;
assert_eq!(res.status(), StatusCode::OK);
- let body = serde_json::from_str::<JsonResponse>(&res.text().await).unwrap();
- let JsonResponse::GreptimedbV1(body) = body else {
- unreachable!()
- };
+ let body = serde_json::from_str::<ErrorResponse>(&res.text().await).unwrap();
assert_eq!(body.code(), 1004);
- assert_eq!(body.error().unwrap(), "sql parameter is required.");
- let _ = body.execution_time_ms().unwrap();
+ assert_eq!(body.error(), "sql parameter is required.");
let res = client
.get("/v1/sql?sql=select * from numbers limit 10")
@@ -137,18 +135,12 @@ pub async fn test_sql_api(store_type: StorageType) {
.await;
assert_eq!(res.status(), StatusCode::OK);
- let body = serde_json::from_str::<JsonResponse>(&res.text().await).unwrap();
- let JsonResponse::GreptimedbV1(body) = body else {
- unreachable!()
- };
- assert!(body.success());
- let _ = body.execution_time_ms().unwrap();
-
+ let body = serde_json::from_str::<GreptimedbV1Response>(&res.text().await).unwrap();
let output = body.output();
assert_eq!(output.len(), 1);
assert_eq!(
output[0],
- serde_json::from_value::<JsonOutput>(json!({
+ serde_json::from_value::<GreptimeQueryOutput>(json!({
"records" :{"schema":{"column_schemas":[{"name":"number","data_type":"UInt32"}]},"rows":[[0],[1],[2],[3],[4],[5],[6],[7],[8],[9]]}
})).unwrap()
);
@@ -160,13 +152,7 @@ pub async fn test_sql_api(store_type: StorageType) {
.await;
assert_eq!(res.status(), StatusCode::OK);
- let body = serde_json::from_str::<JsonResponse>(&res.text().await).unwrap();
- let JsonResponse::InfluxdbV1(body) = body else {
- unreachable!()
- };
- assert!(body.success());
- let _ = body.execution_time_ms().unwrap();
-
+ let body = serde_json::from_str::<InfluxdbV1Response>(&res.text().await).unwrap();
let output = body.results();
assert_eq!(output.len(), 1);
assert_eq!(
@@ -190,18 +176,13 @@ pub async fn test_sql_api(store_type: StorageType) {
.await;
assert_eq!(res.status(), StatusCode::OK);
- let body = serde_json::from_str::<JsonResponse>(&res.text().await).unwrap();
- let JsonResponse::GreptimedbV1(body) = body else {
- unreachable!()
- };
- assert!(body.success());
- let _ = body.execution_time_ms().unwrap();
+ let body = serde_json::from_str::<GreptimedbV1Response>(&res.text().await).unwrap();
let output = body.output();
assert_eq!(output.len(), 1);
assert_eq!(
output[0],
- serde_json::from_value::<JsonOutput>(json!({
+ serde_json::from_value::<GreptimeQueryOutput>(json!({
"records":{"schema":{"column_schemas":[{"name":"host","data_type":"String"},{"name":"cpu","data_type":"Float64"},{"name":"memory","data_type":"Float64"},{"name":"ts","data_type":"TimestampMillisecond"}]},"rows":[["host",66.6,1024.0,0]]}
})).unwrap()
);
@@ -213,18 +194,13 @@ pub async fn test_sql_api(store_type: StorageType) {
.await;
assert_eq!(res.status(), StatusCode::OK);
- let body = serde_json::from_str::<JsonResponse>(&res.text().await).unwrap();
- let JsonResponse::GreptimedbV1(body) = body else {
- unreachable!()
- };
- assert!(body.success());
- let _ = body.execution_time_ms().unwrap();
+ let body = serde_json::from_str::<GreptimedbV1Response>(&res.text().await).unwrap();
let output = body.output();
assert_eq!(output.len(), 1);
assert_eq!(
output[0],
- serde_json::from_value::<JsonOutput>(json!({
+ serde_json::from_value::<GreptimeQueryOutput>(json!({
"records":{"schema":{"column_schemas":[{"name":"cpu","data_type":"Float64"},{"name":"ts","data_type":"TimestampMillisecond"}]},"rows":[[66.6,0]]}
})).unwrap()
);
@@ -236,17 +212,12 @@ pub async fn test_sql_api(store_type: StorageType) {
.await;
assert_eq!(res.status(), StatusCode::OK);
- let body = serde_json::from_str::<JsonResponse>(&res.text().await).unwrap();
- let JsonResponse::GreptimedbV1(body) = body else {
- unreachable!()
- };
- assert!(body.success());
- let _ = body.execution_time_ms().unwrap();
+ let body = serde_json::from_str::<GreptimedbV1Response>(&res.text().await).unwrap();
let output = body.output();
assert_eq!(output.len(), 1);
assert_eq!(
output[0],
- serde_json::from_value::<JsonOutput>(json!({
+ serde_json::from_value::<GreptimeQueryOutput>(json!({
"records":{"schema":{"column_schemas":[{"name":"c","data_type":"Float64"},{"name":"time","data_type":"TimestampMillisecond"}]},"rows":[[66.6,0]]}
})).unwrap()
);
@@ -258,23 +229,18 @@ pub async fn test_sql_api(store_type: StorageType) {
.await;
assert_eq!(res.status(), StatusCode::OK);
- let body = serde_json::from_str::<JsonResponse>(&res.text().await).unwrap();
- let JsonResponse::GreptimedbV1(body) = body else {
- unreachable!()
- };
- assert!(body.success());
- let _ = body.execution_time_ms().unwrap();
+ let body = serde_json::from_str::<GreptimedbV1Response>(&res.text().await).unwrap();
let outputs = body.output();
assert_eq!(outputs.len(), 2);
assert_eq!(
outputs[0],
- serde_json::from_value::<JsonOutput>(json!({
+ serde_json::from_value::<GreptimeQueryOutput>(json!({
"records":{"schema":{"column_schemas":[{"name":"cpu","data_type":"Float64"},{"name":"ts","data_type":"TimestampMillisecond"}]},"rows":[[66.6,0]]}
})).unwrap()
);
assert_eq!(
outputs[1],
- serde_json::from_value::<JsonOutput>(json!({
+ serde_json::from_value::<GreptimeQueryOutput>(json!({
"records":{"rows":[]}
}))
.unwrap()
@@ -287,14 +253,9 @@ pub async fn test_sql_api(store_type: StorageType) {
.await;
assert_eq!(res.status(), StatusCode::OK);
- let body = serde_json::from_str::<JsonResponse>(&res.text().await).unwrap();
- let JsonResponse::GreptimedbV1(body) = body else {
- unreachable!()
- };
- assert!(!body.success());
- let _ = body.execution_time_ms().unwrap();
+ let _body = serde_json::from_str::<ErrorResponse>(&res.text().await).unwrap();
// TODO(shuiyisong): fix this when return source err msg to client side
- // assert!(body.error().unwrap().contains("Table not found"));
+ // assert!(body.error().contains("Table not found"));
// test database given
let res = client
@@ -303,17 +264,12 @@ pub async fn test_sql_api(store_type: StorageType) {
.await;
assert_eq!(res.status(), StatusCode::OK);
- let body = serde_json::from_str::<JsonResponse>(&res.text().await).unwrap();
- let JsonResponse::GreptimedbV1(body) = body else {
- unreachable!()
- };
- assert!(body.success());
- let _ = body.execution_time_ms().unwrap();
+ let body = serde_json::from_str::<GreptimedbV1Response>(&res.text().await).unwrap();
let outputs = body.output();
assert_eq!(outputs.len(), 1);
assert_eq!(
outputs[0],
- serde_json::from_value::<JsonOutput>(json!({
+ serde_json::from_value::<GreptimeQueryOutput>(json!({
"records":{"schema":{"column_schemas":[{"name":"cpu","data_type":"Float64"},{"name":"ts","data_type":"TimestampMillisecond"}]},"rows":[[66.6,0]]}
})).unwrap()
);
@@ -324,10 +280,7 @@ pub async fn test_sql_api(store_type: StorageType) {
.send()
.await;
assert_eq!(res.status(), StatusCode::OK);
- let body = serde_json::from_str::<JsonResponse>(&res.text().await).unwrap();
- let JsonResponse::GreptimedbV1(body) = body else {
- unreachable!()
- };
+ let body = serde_json::from_str::<ErrorResponse>(&res.text().await).unwrap();
assert_eq!(body.code(), ErrorCode::DatabaseNotFound as u32);
// test catalog-schema given
@@ -337,17 +290,12 @@ pub async fn test_sql_api(store_type: StorageType) {
.await;
assert_eq!(res.status(), StatusCode::OK);
- let body = serde_json::from_str::<JsonResponse>(&res.text().await).unwrap();
- let JsonResponse::GreptimedbV1(body) = body else {
- unreachable!()
- };
- assert!(body.success());
- let _ = body.execution_time_ms().unwrap();
+ let body = serde_json::from_str::<GreptimedbV1Response>(&res.text().await).unwrap();
let outputs = body.output();
assert_eq!(outputs.len(), 1);
assert_eq!(
outputs[0],
- serde_json::from_value::<JsonOutput>(json!({
+ serde_json::from_value::<GreptimeQueryOutput>(json!({
"records":{"schema":{"column_schemas":[{"name":"cpu","data_type":"Float64"},{"name":"ts","data_type":"TimestampMillisecond"}]},"rows":[[66.6,0]]}
})).unwrap()
);
@@ -358,10 +306,7 @@ pub async fn test_sql_api(store_type: StorageType) {
.send()
.await;
assert_eq!(res.status(), StatusCode::OK);
- let body = serde_json::from_str::<JsonResponse>(&res.text().await).unwrap();
- let JsonResponse::GreptimedbV1(body) = body else {
- unreachable!()
- };
+ let body = serde_json::from_str::<ErrorResponse>(&res.text().await).unwrap();
assert_eq!(body.code(), ErrorCode::DatabaseNotFound as u32);
// test invalid schema
@@ -370,10 +315,7 @@ pub async fn test_sql_api(store_type: StorageType) {
.send()
.await;
assert_eq!(res.status(), StatusCode::OK);
- let body = serde_json::from_str::<JsonResponse>(&res.text().await).unwrap();
- let JsonResponse::GreptimedbV1(body) = body else {
- unreachable!()
- };
+ let body = serde_json::from_str::<ErrorResponse>(&res.text().await).unwrap();
assert_eq!(body.code(), ErrorCode::DatabaseNotFound as u32);
guard.remove_all().await;
@@ -389,13 +331,7 @@ pub async fn test_prometheus_promql_api(store_type: StorageType) {
.await;
assert_eq!(res.status(), StatusCode::OK);
- let body = serde_json::from_str::<JsonResponse>(&res.text().await).unwrap();
- let JsonResponse::GreptimedbV1(body) = body else {
- unreachable!()
- };
- assert!(body.success());
- let _ = body.execution_time_ms().unwrap();
-
+ let _body = serde_json::from_str::<GreptimedbV1Response>(&res.text().await).unwrap();
guard.remove_all().await;
}
@@ -605,11 +541,7 @@ def test(n) -> vector[f64]:
.await;
assert_eq!(res.status(), StatusCode::OK);
- let body = serde_json::from_str::<JsonResponse>(&res.text().await).unwrap();
- let JsonResponse::GreptimedbV1(body) = body else {
- unreachable!()
- };
- assert_eq!(body.code(), 0);
+ let body = serde_json::from_str::<GreptimedbV1Response>(&res.text().await).unwrap();
assert!(body.output().is_empty());
// call script
@@ -618,18 +550,12 @@ def test(n) -> vector[f64]:
.send()
.await;
assert_eq!(res.status(), StatusCode::OK);
- let body = serde_json::from_str::<JsonResponse>(&res.text().await).unwrap();
- let JsonResponse::GreptimedbV1(body) = body else {
- unreachable!()
- };
-
- assert_eq!(body.code(), 0);
- let _ = body.execution_time_ms().unwrap();
+ let body = serde_json::from_str::<GreptimedbV1Response>(&res.text().await).unwrap();
let output = body.output();
assert_eq!(output.len(), 1);
assert_eq!(
output[0],
- serde_json::from_value::<JsonOutput>(json!({
+ serde_json::from_value::<GreptimeQueryOutput>(json!({
"records":{"schema":{"column_schemas":[{"name":"n","data_type":"Float64"}]},"rows":[[1.0],[2.0],[3.0],[4.0],[5.0],[6.0],[7.0],[8.0],[9.0],[10.0]]}
})).unwrap()
);
|
feat
|
support CSV format in sql HTTP API (#3062)
|
6c621b7fcf2633c94852fbcf72782bb69e3887aa
|
2024-05-15 18:25:49
|
tison
|
ci: implement docbot in cyborg (#3937)
| false
|
diff --git a/.github/actions/setup-cyborg/action.yml b/.github/actions/setup-cyborg/action.yml
new file mode 100644
index 000000000000..7b0bcd6ba13e
--- /dev/null
+++ b/.github/actions/setup-cyborg/action.yml
@@ -0,0 +1,16 @@
+name: Setup cyborg environment
+description: Setup cyborg environment
+runs:
+ using: composite
+ steps:
+ - uses: actions/setup-node@v4
+ with:
+ node-version: 22
+ - uses: pnpm/action-setup@v3
+ with:
+ package_json_file: 'cyborg/package.json'
+ run_install: true
+ - name: Describe the Environment
+ working-directory: cyborg
+ shell: bash
+ run: pnpm tsx -v
diff --git a/.github/doc-label-config.yml b/.github/doc-label-config.yml
deleted file mode 100644
index 60f20533a1d2..000000000000
--- a/.github/doc-label-config.yml
+++ /dev/null
@@ -1,4 +0,0 @@
-Doc not needed:
- - '- \[x\] This PR does not require documentation updates.'
-Doc update required:
- - '- \[ \] This PR does not require documentation updates.'
diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md
index 391792c142de..c50137f87681 100644
--- a/.github/pull_request_template.md
+++ b/.github/pull_request_template.md
@@ -15,6 +15,6 @@ Please explain IN DETAIL what the changes are in this PR and why they are needed
## Checklist
-- [ ] I have written the necessary rustdoc comments.
-- [ ] I have added the necessary unit tests and integration tests.
-- [x] This PR does not require documentation updates.
+- [ ] I have written the necessary rustdoc comments.
+- [ ] I have added the necessary unit tests and integration tests.
+- [ ] This PR requires documentation updates.
diff --git a/.github/workflows/doc-issue.yml b/.github/workflows/doc-issue.yml
deleted file mode 100644
index 4b366e217fe6..000000000000
--- a/.github/workflows/doc-issue.yml
+++ /dev/null
@@ -1,39 +0,0 @@
-name: Create Issue in downstream repos
-
-on:
- issues:
- types:
- - labeled
- pull_request_target:
- types:
- - labeled
-
-jobs:
- doc_issue:
- if: github.event.label.name == 'doc update required'
- runs-on: ubuntu-20.04
- steps:
- - name: create an issue in doc repo
- uses: dacbd/[email protected]
- with:
- owner: GreptimeTeam
- repo: docs
- token: ${{ secrets.DOCS_REPO_TOKEN }}
- title: Update docs for ${{ github.event.issue.title || github.event.pull_request.title }}
- body: |
- A document change request is generated from
- ${{ github.event.issue.html_url || github.event.pull_request.html_url }}
- cloud_issue:
- if: github.event.label.name == 'cloud followup required'
- runs-on: ubuntu-20.04
- steps:
- - name: create an issue in cloud repo
- uses: dacbd/[email protected]
- with:
- owner: GreptimeTeam
- repo: greptimedb-cloud
- token: ${{ secrets.DOCS_REPO_TOKEN }}
- title: Followup changes in ${{ github.event.issue.title || github.event.pull_request.title }}
- body: |
- A followup request is generated from
- ${{ github.event.issue.html_url || github.event.pull_request.html_url }}
diff --git a/.github/workflows/doc-label.yml b/.github/workflows/doc-label.yml
deleted file mode 100644
index 9bea27af196c..000000000000
--- a/.github/workflows/doc-label.yml
+++ /dev/null
@@ -1,36 +0,0 @@
-name: "PR Doc Labeler"
-on:
- pull_request_target:
- types: [opened, edited, synchronize, ready_for_review, auto_merge_enabled, labeled, unlabeled]
-
-permissions:
- pull-requests: write
- contents: read
-
-jobs:
- triage:
- if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
- runs-on: ubuntu-latest
- steps:
- - uses: github/[email protected]
- with:
- configuration-path: .github/doc-label-config.yml
- enable-versioned-regex: false
- repo-token: ${{ secrets.GITHUB_TOKEN }}
- sync-labels: 1
- - name: create an issue in doc repo
- uses: dacbd/[email protected]
- if: ${{ github.event.action == 'opened' && contains(github.event.pull_request.body, '- [ ] This PR does not require documentation updates.') }}
- with:
- owner: GreptimeTeam
- repo: docs
- token: ${{ secrets.DOCS_REPO_TOKEN }}
- title: Update docs for ${{ github.event.issue.title || github.event.pull_request.title }}
- body: |
- A document change request is generated from
- ${{ github.event.issue.html_url || github.event.pull_request.html_url }}
- - name: Check doc labels
- uses: docker://agilepathway/pull-request-label-checker:latest
- with:
- one_of: Doc update required,Doc not needed
- repo_token: ${{ secrets.GITHUB_TOKEN }}
diff --git a/.github/workflows/docbot.yml b/.github/workflows/docbot.yml
new file mode 100644
index 000000000000..bd895c53d184
--- /dev/null
+++ b/.github/workflows/docbot.yml
@@ -0,0 +1,22 @@
+name: Follow Up Docs
+on:
+ pull_request_target:
+ types: [opened, edited]
+
+permissions:
+ pull-requests: write
+ contents: read
+
+jobs:
+ docbot:
+ runs-on: ubuntu-20.04
+ timeout-minutes: 10
+ steps:
+ - uses: actions/checkout@v4
+ - uses: ./.github/actions/setup-cyborg
+ - name: Maybe Follow Up Docs Issue
+ working-directory: cyborg
+ run: pnpm tsx bin/follow-up-docs-issue.ts
+ env:
+ DOCS_REPO_TOKEN: ${{ secrets.DOCS_REPO_TOKEN }}
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
diff --git a/.github/workflows/schedule.yml b/.github/workflows/schedule.yml
index b8b2dfe418e2..608e56ec0abd 100644
--- a/.github/workflows/schedule.yml
+++ b/.github/workflows/schedule.yml
@@ -16,16 +16,7 @@ jobs:
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
steps:
- uses: actions/checkout@v4
- - uses: actions/setup-node@v4
- with:
- node-version: 22
- - uses: pnpm/action-setup@v3
- with:
- package_json_file: 'cyborg/package.json'
- run_install: true
- - name: Describe the Environment
- working-directory: cyborg
- run: pnpm tsx -v
+ - uses: ./.github/actions/setup-cyborg
- name: Do Maintenance
working-directory: cyborg
run: pnpm tsx bin/schedule.ts
diff --git a/.github/workflows/semantic-pull-request.yml b/.github/workflows/semantic-pull-request.yml
index f67fb1d4072a..3dae57ff7683 100644
--- a/.github/workflows/semantic-pull-request.yml
+++ b/.github/workflows/semantic-pull-request.yml
@@ -13,16 +13,7 @@ jobs:
timeout-minutes: 10
steps:
- uses: actions/checkout@v4
- - uses: actions/setup-node@v4
- with:
- node-version: 22
- - uses: pnpm/action-setup@v3
- with:
- package_json_file: 'cyborg/package.json'
- run_install: true
- - name: Describe the Environment
- working-directory: cyborg
- run: pnpm tsx -v
+ - uses: ./.github/actions/setup-cyborg
- name: Check Pull Request
working-directory: cyborg
run: pnpm tsx bin/check-pull-request.ts
diff --git a/cyborg/bin/follow-up-docs-issue.ts b/cyborg/bin/follow-up-docs-issue.ts
new file mode 100644
index 000000000000..e08bfee0834a
--- /dev/null
+++ b/cyborg/bin/follow-up-docs-issue.ts
@@ -0,0 +1,106 @@
+/*
+ * Copyright 2023 Greptime Team
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import * as core from '@actions/core'
+import {handleError, obtainClient} from "@/common";
+import {context} from "@actions/github";
+import {PullRequestEditedEvent, PullRequestEvent, PullRequestOpenedEvent} from "@octokit/webhooks-types";
+// @ts-expect-error moduleResolution:nodenext issue 54523
+import {RequestError} from "@octokit/request-error";
+
+const needFollowUpDocs = "[x] This PR requires documentation updates."
+const labelDocsNotRequired = "docs-not-required"
+const labelDocsRequired = "docs-required"
+
+async function main() {
+ if (!context.payload.pull_request) {
+ throw new Error(`Only pull request event supported. ${context.eventName} is unsupported.`)
+ }
+
+ const client = obtainClient("GITHUB_TOKEN")
+ const docsClient = obtainClient("DOCS_REPO_TOKEN")
+ const payload = context.payload as PullRequestEvent
+ const { owner, repo, number, actor, title, html_url } = {
+ owner: payload.pull_request.base.user.login,
+ repo: payload.pull_request.base.repo.name,
+ number: payload.pull_request.number,
+ title: payload.pull_request.title,
+ html_url: payload.pull_request.html_url,
+ actor: payload.pull_request.user.login,
+ }
+ const followUpDocs = checkPullRequestEvent(payload)
+ if (followUpDocs) {
+ core.info("Follow up docs.")
+ await client.rest.issues.removeLabel({
+ owner, repo, issue_number: number, name: labelDocsNotRequired,
+ }).catch((e: RequestError) => {
+ if (e.status != 404) {
+ throw e;
+ }
+ core.debug(`Label ${labelDocsNotRequired} not exist.`)
+ })
+ await client.rest.issues.addLabels({
+ owner, repo, issue_number: number, labels: [labelDocsRequired],
+ })
+ await docsClient.rest.issues.create({
+ owner: 'GreptimeTeam',
+ repo: 'docs',
+ title: `Update docs for ${title}`,
+ body: `A document change request is generated from ${html_url}`,
+ assignee: actor,
+ }).then((res) => {
+ core.info(`Created issue ${res.data}`)
+ })
+ } else {
+ core.info("No need to follow up docs.")
+ await client.rest.issues.removeLabel({
+ owner, repo, issue_number: number, name: labelDocsRequired
+ }).catch((e: RequestError) => {
+ if (e.status != 404) {
+ throw e;
+ }
+ core.debug(`Label ${labelDocsRequired} not exist.`)
+ })
+ await client.rest.issues.addLabels({
+ owner, repo, issue_number: number, labels: [labelDocsNotRequired],
+ })
+ }
+}
+
+function checkPullRequestEvent(payload: PullRequestEvent) {
+ switch (payload.action) {
+ case "opened":
+ return checkPullRequestOpenedEvent(payload as PullRequestOpenedEvent)
+ case "edited":
+ return checkPullRequestEditedEvent(payload as PullRequestEditedEvent)
+ default:
+ throw new Error(`${payload.action} is unsupported.`)
+ }
+}
+
+function checkPullRequestOpenedEvent(event: PullRequestOpenedEvent): boolean {
+ // @ts-ignore
+ return event.pull_request.body?.includes(needFollowUpDocs)
+}
+
+function checkPullRequestEditedEvent(event: PullRequestEditedEvent): boolean {
+ const previous = event.changes.body?.from.includes(needFollowUpDocs)
+ const current = event.pull_request.body?.includes(needFollowUpDocs)
+ // from docs-not-need to docs-required
+ return (!previous) && current
+}
+
+main().catch(handleError)
diff --git a/cyborg/package.json b/cyborg/package.json
index d340a41d3573..742c6f06bf69 100644
--- a/cyborg/package.json
+++ b/cyborg/package.json
@@ -7,6 +7,7 @@
"dependencies": {
"@actions/core": "^1.10.1",
"@actions/github": "^6.0.0",
+ "@octokit/request-error": "^6.1.1",
"@octokit/webhooks-types": "^7.5.1",
"conventional-commit-types": "^3.0.0",
"conventional-commits-parser": "^5.0.0",
diff --git a/cyborg/pnpm-lock.yaml b/cyborg/pnpm-lock.yaml
index 651a3bcbd3d8..5ac1962cedc6 100644
--- a/cyborg/pnpm-lock.yaml
+++ b/cyborg/pnpm-lock.yaml
@@ -11,6 +11,9 @@ dependencies:
'@actions/github':
specifier: ^6.0.0
version: 6.0.0
+ '@octokit/request-error':
+ specifier: ^6.1.1
+ version: 6.1.1
'@octokit/webhooks-types':
specifier: ^7.5.1
version: 7.5.1
@@ -359,6 +362,13 @@ packages:
once: 1.4.0
dev: false
+ /@octokit/[email protected]:
+ resolution: {integrity: sha512-1mw1gqT3fR/WFvnoVpY/zUM2o/XkMs/2AszUUG9I69xn0JFLv6PGkPhNk5lbfvROs79wiS0bqiJNxfCZcRJJdg==}
+ engines: {node: '>= 18'}
+ dependencies:
+ '@octokit/types': 13.5.0
+ dev: false
+
/@octokit/[email protected]:
resolution: {integrity: sha512-9Bb014e+m2TgBeEJGEbdplMVWwPmL1FPtggHQRkV+WVsMggPtEkLKPlcVYm/o8xKLkpJ7B+6N8WfQMtDLX2Dpw==}
engines: {node: '>= 18'}
diff --git a/src/datatypes/src/data_type.rs b/src/datatypes/src/data_type.rs
index a73c7df86698..f74a94c7a14f 100644
--- a/src/datatypes/src/data_type.rs
+++ b/src/datatypes/src/data_type.rs
@@ -23,6 +23,7 @@ use arrow_schema::DECIMAL_DEFAULT_SCALE;
use common_decimal::decimal128::DECIMAL128_MAX_PRECISION;
use common_time::interval::IntervalUnit;
use common_time::timestamp::TimeUnit;
+use enum_dispatch::enum_dispatch;
use paste::paste;
use serde::{Deserialize, Serialize};
@@ -41,7 +42,7 @@ use crate::value::Value;
use crate::vectors::MutableVector;
#[derive(Clone, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, Serialize, Deserialize)]
-#[enum_dispatch::enum_dispatch(DataType)]
+#[enum_dispatch(DataType)]
pub enum ConcreteDataType {
Null(NullType),
Boolean(BooleanType),
|
ci
|
implement docbot in cyborg (#3937)
|
3197b8b53562f52b1955bad3571c9d3762593915
|
2024-10-15 15:18:13
|
Weny Xu
|
feat: introduce default customizers (#4831)
| false
|
diff --git a/src/cmd/src/metasrv.rs b/src/cmd/src/metasrv.rs
index 757bb273416e..664066132e50 100644
--- a/src/cmd/src/metasrv.rs
+++ b/src/cmd/src/metasrv.rs
@@ -49,8 +49,8 @@ impl Instance {
}
}
- pub fn mut_inner(&mut self) -> &mut MetasrvInstance {
- &mut self.instance
+ pub fn get_inner(&self) -> &MetasrvInstance {
+ &self.instance
}
}
@@ -90,6 +90,14 @@ impl Command {
pub fn load_options(&self, global_options: &GlobalOptions) -> Result<MetasrvOptions> {
self.subcmd.load_options(global_options)
}
+
+ pub fn config_file(&self) -> &Option<String> {
+ self.subcmd.config_file()
+ }
+
+ pub fn env_prefix(&self) -> &String {
+ self.subcmd.env_prefix()
+ }
}
#[derive(Parser)]
@@ -109,6 +117,18 @@ impl SubCommand {
SubCommand::Start(cmd) => cmd.load_options(global_options),
}
}
+
+ fn config_file(&self) -> &Option<String> {
+ match self {
+ SubCommand::Start(cmd) => &cmd.config_file,
+ }
+ }
+
+ fn env_prefix(&self) -> &String {
+ match self {
+ SubCommand::Start(cmd) => &cmd.env_prefix,
+ }
+ }
}
#[derive(Debug, Default, Parser)]
diff --git a/src/common/base/src/plugins.rs b/src/common/base/src/plugins.rs
index 98b40cd059b2..c392422b64d4 100644
--- a/src/common/base/src/plugins.rs
+++ b/src/common/base/src/plugins.rs
@@ -38,6 +38,18 @@ impl Plugins {
self.read().get::<T>().cloned()
}
+ pub fn get_or_insert<T, F>(&self, f: F) -> T
+ where
+ T: 'static + Send + Sync + Clone,
+ F: FnOnce() -> T,
+ {
+ let mut binding = self.write();
+ if !binding.contains::<T>() {
+ binding.insert(f());
+ }
+ binding.get::<T>().cloned().unwrap()
+ }
+
pub fn map_mut<T: 'static + Send + Sync, F, R>(&self, mapper: F) -> R
where
F: FnOnce(Option<&mut T>) -> R,
diff --git a/src/common/meta/src/leadership_notifier.rs b/src/common/meta/src/leadership_notifier.rs
index 0fed50d3880d..393bf3cb8201 100644
--- a/src/common/meta/src/leadership_notifier.rs
+++ b/src/common/meta/src/leadership_notifier.rs
@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-use std::sync::Arc;
+use std::sync::{Arc, Mutex};
use async_trait::async_trait;
use common_telemetry::{error, info};
@@ -24,6 +24,8 @@ pub type LeadershipChangeNotifierCustomizerRef = Arc<dyn LeadershipChangeNotifie
/// A trait for customizing the leadership change notifier.
pub trait LeadershipChangeNotifierCustomizer: Send + Sync {
fn customize(&self, notifier: &mut LeadershipChangeNotifier);
+
+ fn add_listener(&self, listener: Arc<dyn LeadershipChangeListener>);
}
/// A trait for handling leadership change events in a distributed system.
@@ -45,10 +47,28 @@ pub struct LeadershipChangeNotifier {
listeners: Vec<Arc<dyn LeadershipChangeListener>>,
}
-impl LeadershipChangeNotifierCustomizer for LeadershipChangeNotifier {
+#[derive(Default)]
+pub struct DefaultLeadershipChangeNotifierCustomizer {
+ listeners: Mutex<Vec<Arc<dyn LeadershipChangeListener>>>,
+}
+
+impl DefaultLeadershipChangeNotifierCustomizer {
+ pub fn new() -> Self {
+ Self {
+ listeners: Mutex::new(Vec::new()),
+ }
+ }
+}
+
+impl LeadershipChangeNotifierCustomizer for DefaultLeadershipChangeNotifierCustomizer {
fn customize(&self, notifier: &mut LeadershipChangeNotifier) {
info!("Customizing leadership change notifier");
- notifier.listeners.extend(self.listeners.clone());
+ let listeners = self.listeners.lock().unwrap().clone();
+ notifier.listeners.extend(listeners);
+ }
+
+ fn add_listener(&self, listener: Arc<dyn LeadershipChangeListener>) {
+ self.listeners.lock().unwrap().push(listener);
}
}
diff --git a/src/meta-srv/src/bootstrap.rs b/src/meta-srv/src/bootstrap.rs
index 79773498cfc8..8323a7f8dfad 100644
--- a/src/meta-srv/src/bootstrap.rs
+++ b/src/meta-srv/src/bootstrap.rs
@@ -149,8 +149,8 @@ impl MetasrvInstance {
self.plugins.clone()
}
- pub fn mut_inner(&mut self) -> &mut Metasrv {
- &mut self.metasrv
+ pub fn get_inner(&self) -> &Metasrv {
+ &self.metasrv
}
}
diff --git a/src/meta-srv/src/handler.rs b/src/meta-srv/src/handler.rs
index ce75ff5ff181..5499cb259b2f 100644
--- a/src/meta-srv/src/handler.rs
+++ b/src/meta-srv/src/handler.rs
@@ -14,7 +14,7 @@
use std::collections::{BTreeMap, HashSet};
use std::ops::Range;
-use std::sync::Arc;
+use std::sync::{Arc, Mutex};
use std::time::{Duration, Instant};
use api::v1::meta::mailbox_message::Payload;
@@ -210,7 +210,7 @@ impl Pushers {
}
#[derive(Clone)]
-struct NameCachedHandler {
+pub struct NameCachedHandler {
name: &'static str,
handler: Arc<dyn HeartbeatHandler>,
}
@@ -547,15 +547,27 @@ impl HeartbeatHandlerGroupBuilder {
})
}
+ fn add_handler_after_inner(&mut self, target: &str, handler: NameCachedHandler) -> Result<()> {
+ if let Some(pos) = self.handlers.iter().position(|x| x.name == target) {
+ self.handlers.insert(pos + 1, handler);
+ return Ok(());
+ }
+
+ error::HandlerNotFoundSnafu { name: target }.fail()
+ }
+
/// Adds the handler after the specified handler.
pub fn add_handler_after(
&mut self,
target: &'static str,
handler: impl HeartbeatHandler + 'static,
) -> Result<()> {
+ self.add_handler_after_inner(target, NameCachedHandler::new(handler))
+ }
+
+ fn add_handler_before_inner(&mut self, target: &str, handler: NameCachedHandler) -> Result<()> {
if let Some(pos) = self.handlers.iter().position(|x| x.name == target) {
- self.handlers
- .insert(pos + 1, NameCachedHandler::new(handler));
+ self.handlers.insert(pos, handler);
return Ok(());
}
@@ -568,8 +580,12 @@ impl HeartbeatHandlerGroupBuilder {
target: &'static str,
handler: impl HeartbeatHandler + 'static,
) -> Result<()> {
+ self.add_handler_before_inner(target, NameCachedHandler::new(handler))
+ }
+
+ fn replace_handler_inner(&mut self, target: &str, handler: NameCachedHandler) -> Result<()> {
if let Some(pos) = self.handlers.iter().position(|x| x.name == target) {
- self.handlers.insert(pos, NameCachedHandler::new(handler));
+ self.handlers[pos] = handler;
return Ok(());
}
@@ -582,25 +598,115 @@ impl HeartbeatHandlerGroupBuilder {
target: &'static str,
handler: impl HeartbeatHandler + 'static,
) -> Result<()> {
- if let Some(pos) = self.handlers.iter().position(|x| x.name == target) {
- self.handlers[pos] = NameCachedHandler::new(handler);
- return Ok(());
- }
+ self.replace_handler_inner(target, NameCachedHandler::new(handler))
+ }
- error::HandlerNotFoundSnafu { name: target }.fail()
+ fn add_handler_last_inner(&mut self, handler: NameCachedHandler) {
+ self.handlers.push(handler);
}
fn add_handler_last(&mut self, handler: impl HeartbeatHandler + 'static) {
- self.handlers.push(NameCachedHandler::new(handler));
+ self.add_handler_last_inner(NameCachedHandler::new(handler));
}
}
pub type HeartbeatHandlerGroupBuilderCustomizerRef =
Arc<dyn HeartbeatHandlerGroupBuilderCustomizer>;
+pub enum CustomizeHeartbeatGroupAction {
+ AddHandlerAfter {
+ target: String,
+ handler: NameCachedHandler,
+ },
+ AddHandlerBefore {
+ target: String,
+ handler: NameCachedHandler,
+ },
+ ReplaceHandler {
+ target: String,
+ handler: NameCachedHandler,
+ },
+ AddHandlerLast {
+ handler: NameCachedHandler,
+ },
+}
+
+impl CustomizeHeartbeatGroupAction {
+ pub fn new_add_handler_after(
+ target: &'static str,
+ handler: impl HeartbeatHandler + 'static,
+ ) -> Self {
+ Self::AddHandlerAfter {
+ target: target.to_string(),
+ handler: NameCachedHandler::new(handler),
+ }
+ }
+
+ pub fn new_add_handler_before(
+ target: &'static str,
+ handler: impl HeartbeatHandler + 'static,
+ ) -> Self {
+ Self::AddHandlerBefore {
+ target: target.to_string(),
+ handler: NameCachedHandler::new(handler),
+ }
+ }
+
+ pub fn new_replace_handler(
+ target: &'static str,
+ handler: impl HeartbeatHandler + 'static,
+ ) -> Self {
+ Self::ReplaceHandler {
+ target: target.to_string(),
+ handler: NameCachedHandler::new(handler),
+ }
+ }
+
+ pub fn new_add_handler_last(handler: impl HeartbeatHandler + 'static) -> Self {
+ Self::AddHandlerLast {
+ handler: NameCachedHandler::new(handler),
+ }
+ }
+}
+
/// The customizer of the [`HeartbeatHandlerGroupBuilder`].
pub trait HeartbeatHandlerGroupBuilderCustomizer: Send + Sync {
fn customize(&self, builder: &mut HeartbeatHandlerGroupBuilder) -> Result<()>;
+
+ fn add_action(&self, action: CustomizeHeartbeatGroupAction);
+}
+
+#[derive(Default)]
+pub struct DefaultHeartbeatHandlerGroupBuilderCustomizer {
+ actions: Mutex<Vec<CustomizeHeartbeatGroupAction>>,
+}
+
+impl HeartbeatHandlerGroupBuilderCustomizer for DefaultHeartbeatHandlerGroupBuilderCustomizer {
+ fn customize(&self, builder: &mut HeartbeatHandlerGroupBuilder) -> Result<()> {
+ info!("Customizing the heartbeat handler group builder");
+ let mut actions = self.actions.lock().unwrap();
+ for action in actions.drain(..) {
+ match action {
+ CustomizeHeartbeatGroupAction::AddHandlerAfter { target, handler } => {
+ builder.add_handler_after_inner(&target, handler)?;
+ }
+ CustomizeHeartbeatGroupAction::AddHandlerBefore { target, handler } => {
+ builder.add_handler_before_inner(&target, handler)?;
+ }
+ CustomizeHeartbeatGroupAction::ReplaceHandler { target, handler } => {
+ builder.replace_handler_inner(&target, handler)?;
+ }
+ CustomizeHeartbeatGroupAction::AddHandlerLast { handler } => {
+ builder.add_handler_last_inner(handler);
+ }
+ }
+ }
+ Ok(())
+ }
+
+ fn add_action(&self, action: CustomizeHeartbeatGroupAction) {
+ self.actions.lock().unwrap().push(action);
+ }
}
#[cfg(test)]
diff --git a/src/meta-srv/src/handler/filter_inactive_region_stats.rs b/src/meta-srv/src/handler/filter_inactive_region_stats.rs
index fc1518f8b6dc..8504db7d5b04 100644
--- a/src/meta-srv/src/handler/filter_inactive_region_stats.rs
+++ b/src/meta-srv/src/handler/filter_inactive_region_stats.rs
@@ -20,6 +20,8 @@ use crate::error::Result;
use crate::handler::{HandleControl, HeartbeatAccumulator, HeartbeatHandler};
use crate::metasrv::Context;
+pub const NAME: &str = "FilterInactiveRegionStatsHandler";
+
pub struct FilterInactiveRegionStatsHandler;
#[async_trait]
|
feat
|
introduce default customizers (#4831)
|
545a80c6e0ce808fa06b68615d2108784b259dd5
|
2024-09-09 17:44:17
|
jeremyhi
|
chore: remove unused method (#4703)
| false
|
diff --git a/src/common/grpc-expr/src/insert.rs b/src/common/grpc-expr/src/insert.rs
index 6170c5300858..56ed7e5bf02f 100644
--- a/src/common/grpc-expr/src/insert.rs
+++ b/src/common/grpc-expr/src/insert.rs
@@ -14,11 +14,10 @@
use api::helper;
use api::v1::column::Values;
-use api::v1::{AddColumns, Column, CreateTableExpr};
+use api::v1::{Column, CreateTableExpr};
use common_base::BitVec;
use datatypes::data_type::{ConcreteDataType, DataType};
use datatypes::prelude::VectorRef;
-use datatypes::schema::SchemaRef;
use snafu::{ensure, ResultExt};
use table::metadata::TableId;
use table::table_reference::TableReference;
@@ -27,11 +26,6 @@ use crate::error::{CreateVectorSnafu, Result, UnexpectedValuesLengthSnafu};
use crate::util;
use crate::util::ColumnExpr;
-pub fn find_new_columns(schema: &SchemaRef, columns: &[Column]) -> Result<Option<AddColumns>> {
- let column_exprs = ColumnExpr::from_columns(columns);
- util::extract_new_columns(schema, column_exprs)
-}
-
/// Try to build create table request from insert data.
pub fn build_create_expr_from_insertion(
catalog_name: &str,
@@ -114,7 +108,6 @@ mod tests {
use super::*;
use crate::error;
use crate::error::ColumnDataTypeSnafu;
- use crate::insert::find_new_columns;
#[inline]
fn build_column_schema(
@@ -281,11 +274,18 @@ mod tests {
let schema = Arc::new(SchemaBuilder::try_from(columns).unwrap().build().unwrap());
- assert!(find_new_columns(&schema, &[]).unwrap().is_none());
+ assert!(
+ util::extract_new_columns(&schema, ColumnExpr::from_columns(&[]))
+ .unwrap()
+ .is_none()
+ );
let insert_batch = mock_insert_batch();
- let add_columns = find_new_columns(&schema, &insert_batch.0).unwrap().unwrap();
+ let add_columns =
+ util::extract_new_columns(&schema, ColumnExpr::from_columns(&insert_batch.0))
+ .unwrap()
+ .unwrap();
assert_eq!(5, add_columns.add_columns.len());
let host_column = &add_columns.add_columns[0];
diff --git a/src/common/grpc-expr/src/lib.rs b/src/common/grpc-expr/src/lib.rs
index 7a2fea237b52..c8afaf98d9a3 100644
--- a/src/common/grpc-expr/src/lib.rs
+++ b/src/common/grpc-expr/src/lib.rs
@@ -19,4 +19,4 @@ pub mod insert;
pub mod util;
pub use alter::{alter_expr_to_request, create_table_schema};
-pub use insert::{build_create_expr_from_insertion, find_new_columns};
+pub use insert::build_create_expr_from_insertion;
|
chore
|
remove unused method (#4703)
|
8a86903c73e9ecc3fd6dec29f00c822b1b396e1d
|
2025-03-11 11:46:49
|
Ning Sun
|
feat: add description for each grafana panel (#5673)
| false
|
diff --git a/.github/workflows/grafana.yml b/.github/workflows/grafana.yml
new file mode 100644
index 000000000000..139ea85b0533
--- /dev/null
+++ b/.github/workflows/grafana.yml
@@ -0,0 +1,52 @@
+name: Check Grafana Panels
+
+on:
+ pull_request:
+ branches:
+ - main
+ paths:
+ - 'grafana/**' # Trigger only when files under the grafana/ directory change
+
+jobs:
+ check-panels:
+ runs-on: ubuntu-latest
+
+ steps:
+ # Check out the repository
+ - name: Checkout repository
+ uses: actions/checkout@v4
+
+ # Install jq (required for the script)
+ - name: Install jq
+ run: sudo apt-get install -y jq
+
+ # Make the check.sh script executable
+ - name: Make check.sh executable
+ run: chmod +x grafana/check.sh
+
+ # Run the check.sh script
+ - name: Run check.sh
+ run: ./grafana/check.sh
+
+ # Only run summary.sh for pull_request events (not for merge queues or final pushes)
+ - name: Check if this is a pull request
+ id: check-pr
+ run: |
+ if [[ "${{ github.event_name }}" == "pull_request" ]]; then
+ echo "is_pull_request=true" >> $GITHUB_OUTPUT
+ else
+ echo "is_pull_request=false" >> $GITHUB_OUTPUT
+ fi
+
+ # Make the summary.sh script executable
+ - name: Make summary.sh executable
+ if: steps.check-pr.outputs.is_pull_request == 'true'
+ run: chmod +x grafana/summary.sh
+
+ # Run the summary.sh script and add its output to the GitHub Job Summary
+ - name: Run summary.sh and add to Job Summary
+ if: steps.check-pr.outputs.is_pull_request == 'true'
+ run: |
+ SUMMARY=$(./grafana/summary.sh)
+ echo "### Summary of Grafana Panels" >> $GITHUB_STEP_SUMMARY
+ echo "$SUMMARY" >> $GITHUB_STEP_SUMMARY
diff --git a/grafana/check.sh b/grafana/check.sh
new file mode 100755
index 000000000000..9cab07391c2a
--- /dev/null
+++ b/grafana/check.sh
@@ -0,0 +1,19 @@
+#!/usr/bin/env bash
+
+BASEDIR=$(dirname "$0")
+
+# Use jq to check for panels with empty or missing descriptions
+invalid_panels=$(cat $BASEDIR/greptimedb-cluster.json | jq -r '
+ .panels[]
+ | select((.type == "stats" or .type == "timeseries") and (.description == "" or .description == null))
+')
+
+# Check if any invalid panels were found
+if [[ -n "$invalid_panels" ]]; then
+ echo "Error: The following panels have empty or missing descriptions:"
+ echo "$invalid_panels"
+ exit 1
+else
+ echo "All panels with type 'stats' or 'timeseries' have valid descriptions."
+ exit 0
+fi
diff --git a/grafana/greptimedb-cluster.json b/grafana/greptimedb-cluster.json
index 983a3d3688e1..dccf793938d8 100644
--- a/grafana/greptimedb-cluster.json
+++ b/grafana/greptimedb-cluster.json
@@ -104,6 +104,7 @@
"type": "mysql",
"uid": "${DS_MYSQL}"
},
+ "description": "Greptime DB version.",
"fieldConfig": {
"defaults": {
"color": {
@@ -140,7 +141,9 @@
"orientation": "auto",
"percentChangeColorMode": "standard",
"reduceOptions": {
- "calcs": ["lastNotNull"],
+ "calcs": [
+ "lastNotNull"
+ ],
"fields": "/^pkg_version$/",
"values": false
},
@@ -188,7 +191,7 @@
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
- "description": "",
+ "description": "Total memory allocated by frontend node. Calculated from jemalloc metrics and may vary from system metrics.",
"fieldConfig": {
"defaults": {
"color": {
@@ -222,7 +225,9 @@
"orientation": "auto",
"percentChangeColorMode": "standard",
"reduceOptions": {
- "calcs": ["lastNotNull"],
+ "calcs": [
+ "lastNotNull"
+ ],
"fields": "",
"values": false
},
@@ -253,7 +258,7 @@
"type": "mysql",
"uid": "${DS_MYSQL}"
},
- "description": "",
+ "description": "Total number of active frontend nodes in the cluster.",
"fieldConfig": {
"defaults": {
"color": {
@@ -290,7 +295,9 @@
"orientation": "auto",
"percentChangeColorMode": "standard",
"reduceOptions": {
- "calcs": ["lastNotNull"],
+ "calcs": [
+ "lastNotNull"
+ ],
"fields": "",
"values": false
},
@@ -338,7 +345,7 @@
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
- "description": "",
+ "description": "Total memory allocated by datanodes. Calculated from jemalloc metrics and may vary from system metrics.",
"fieldConfig": {
"defaults": {
"color": {
@@ -372,7 +379,9 @@
"orientation": "auto",
"percentChangeColorMode": "standard",
"reduceOptions": {
- "calcs": ["lastNotNull"],
+ "calcs": [
+ "lastNotNull"
+ ],
"fields": "",
"values": false
},
@@ -403,7 +412,7 @@
"type": "mysql",
"uid": "${DS_MYSQL}"
},
- "description": "",
+ "description": "Total number of active datanodes in the cluster.",
"fieldConfig": {
"defaults": {
"color": {
@@ -440,7 +449,9 @@
"orientation": "auto",
"percentChangeColorMode": "standard",
"reduceOptions": {
- "calcs": ["lastNotNull"],
+ "calcs": [
+ "lastNotNull"
+ ],
"fields": "",
"values": false
},
@@ -488,7 +499,7 @@
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
- "description": "",
+ "description": "Total memory allocated by metasrv. Calculated from jemalloc metrics and may vary from system metrics.",
"fieldConfig": {
"defaults": {
"color": {
@@ -522,7 +533,9 @@
"orientation": "auto",
"percentChangeColorMode": "standard",
"reduceOptions": {
- "calcs": ["lastNotNull"],
+ "calcs": [
+ "lastNotNull"
+ ],
"fields": "",
"values": false
},
@@ -553,7 +566,7 @@
"type": "mysql",
"uid": "${DS_MYSQL}"
},
- "description": "",
+ "description": "Total number of active metasrv instances",
"fieldConfig": {
"defaults": {
"color": {
@@ -590,7 +603,9 @@
"orientation": "auto",
"percentChangeColorMode": "standard",
"reduceOptions": {
- "calcs": ["lastNotNull"],
+ "calcs": [
+ "lastNotNull"
+ ],
"fields": "",
"values": false
},
@@ -638,6 +653,7 @@
"type": "mysql",
"uid": "${DS_MYSQL}"
},
+ "description": "User-created database count.",
"fieldConfig": {
"defaults": {
"color": {
@@ -674,7 +690,9 @@
"orientation": "auto",
"percentChangeColorMode": "standard",
"reduceOptions": {
- "calcs": ["lastNotNull"],
+ "calcs": [
+ "lastNotNull"
+ ],
"fields": "",
"values": false
},
@@ -722,6 +740,7 @@
"type": "mysql",
"uid": "${DS_MYSQL}"
},
+ "description": "Total number of tables.",
"fieldConfig": {
"defaults": {
"color": {
@@ -754,7 +773,9 @@
"orientation": "auto",
"percentChangeColorMode": "standard",
"reduceOptions": {
- "calcs": ["lastNotNull"],
+ "calcs": [
+ "lastNotNull"
+ ],
"fields": "",
"values": false
},
@@ -802,6 +823,7 @@
"type": "mysql",
"uid": "${DS_MYSQL}"
},
+ "description": "Total number of data file size.",
"fieldConfig": {
"defaults": {
"color": {
@@ -835,7 +857,9 @@
"orientation": "auto",
"percentChangeColorMode": "standard",
"reduceOptions": {
- "calcs": ["lastNotNull"],
+ "calcs": [
+ "lastNotNull"
+ ],
"fields": "",
"values": false
},
@@ -883,7 +907,7 @@
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
- "description": "",
+ "description": "Total number of rows ingested into the cluster, per second.",
"fieldConfig": {
"defaults": {
"color": {
@@ -919,7 +943,9 @@
"orientation": "auto",
"percentChangeColorMode": "standard",
"reduceOptions": {
- "calcs": ["lastNotNull"],
+ "calcs": [
+ "lastNotNull"
+ ],
"fields": "",
"values": false
},
@@ -950,7 +976,7 @@
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
- "description": "",
+ "description": "Total number of rows ingested via /events/logs endpoint, per second.",
"fieldConfig": {
"defaults": {
"color": {
@@ -986,7 +1012,9 @@
"orientation": "auto",
"percentChangeColorMode": "standard",
"reduceOptions": {
- "calcs": ["lastNotNull"],
+ "calcs": [
+ "lastNotNull"
+ ],
"fields": "",
"values": false
},
@@ -1021,6 +1049,7 @@
"type": "mysql",
"uid": "${DS_MYSQL}"
},
+ "description": "The approximate size of write-ahead logs",
"fieldConfig": {
"defaults": {
"color": {
@@ -1054,7 +1083,9 @@
"orientation": "auto",
"percentChangeColorMode": "standard",
"reduceOptions": {
- "calcs": ["lastNotNull"],
+ "calcs": [
+ "lastNotNull"
+ ],
"fields": "",
"values": false
},
@@ -1102,6 +1133,7 @@
"type": "mysql",
"uid": "${DS_MYSQL}"
},
+ "description": "Total size of index files.",
"fieldConfig": {
"defaults": {
"color": {
@@ -1135,7 +1167,9 @@
"orientation": "auto",
"percentChangeColorMode": "standard",
"reduceOptions": {
- "calcs": ["lastNotNull"],
+ "calcs": [
+ "lastNotNull"
+ ],
"fields": "",
"values": false
},
@@ -1183,6 +1217,7 @@
"type": "mysql",
"uid": "${DS_MYSQL}"
},
+ "description": "Total size of manifest file size. Manifest is a our table format metadata stored on object storage. ",
"fieldConfig": {
"defaults": {
"color": {
@@ -1216,7 +1251,9 @@
"orientation": "auto",
"percentChangeColorMode": "standard",
"reduceOptions": {
- "calcs": ["lastNotNull"],
+ "calcs": [
+ "lastNotNull"
+ ],
"fields": "",
"values": false
},
@@ -1264,6 +1301,7 @@
"type": "mysql",
"uid": "${DS_MYSQL}"
},
+ "description": "Total number of partitions in the cluster.",
"fieldConfig": {
"defaults": {
"color": {
@@ -1296,7 +1334,9 @@
"orientation": "auto",
"percentChangeColorMode": "standard",
"reduceOptions": {
- "calcs": ["lastNotNull"],
+ "calcs": [
+ "lastNotNull"
+ ],
"fields": "",
"values": false
},
@@ -1344,6 +1384,7 @@
"type": "mysql",
"uid": "${DS_MYSQL}"
},
+ "description": "Total number of data rows in the cluster. Calculated by sum of rows from each region.",
"fieldConfig": {
"defaults": {
"color": {
@@ -1377,7 +1418,9 @@
"orientation": "auto",
"percentChangeColorMode": "standard",
"reduceOptions": {
- "calcs": ["lastNotNull"],
+ "calcs": [
+ "lastNotNull"
+ ],
"fields": "",
"values": false
},
@@ -1425,7 +1468,7 @@
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
- "description": "",
+ "description": "Total rate data ingestion API calls by protocol.\n\nHere we listed 3 primary protocols:\n\n- Prometheus remote write\n- Greptime's gRPC API (when using our ingest SDK)\n- Log ingestion http API\n",
"fieldConfig": {
"defaults": {
"color": {
@@ -1547,7 +1590,7 @@
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
- "description": "",
+ "description": "Total rate of query API calls by protocol. This metric is collected from frontends.\n\nHere we listed 3 main protocols:\n- MySQL\n- Postgres\n- Prometheus API\n\nNote that there are some other minor query APIs like /sql are not included",
"fieldConfig": {
"defaults": {
"color": {
@@ -1682,7 +1725,7 @@
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
- "description": "",
+ "description": "Memory usage information of datanodes.\n\nThere are three types of the metrics:\n\n- allocated from jemalloc\n- resident memory as stat from jemalloc\n- process virtual memory",
"fieldConfig": {
"defaults": {
"color": {
@@ -1695,6 +1738,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
@@ -1725,8 +1769,7 @@
"mode": "absolute",
"steps": [
{
- "color": "green",
- "value": null
+ "color": "green"
},
{
"color": "red",
@@ -1747,7 +1790,9 @@
"id": 234,
"options": {
"legend": {
- "calcs": ["lastNotNull"],
+ "calcs": [
+ "lastNotNull"
+ ],
"displayMode": "table",
"placement": "bottom",
"showLegend": true,
@@ -1807,6 +1852,7 @@
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
+ "description": "Memory usage information of frontend.\n\nThere are three types of the metrics:\n\n- allocated from jemalloc\n- resident memory as stat from jemalloc\n- process virtual memory",
"fieldConfig": {
"defaults": {
"color": {
@@ -1819,6 +1865,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
@@ -1849,8 +1896,7 @@
"mode": "absolute",
"steps": [
{
- "color": "green",
- "value": null
+ "color": "green"
},
{
"color": "red",
@@ -1871,7 +1917,9 @@
"id": 233,
"options": {
"legend": {
- "calcs": ["lastNotNull"],
+ "calcs": [
+ "lastNotNull"
+ ],
"displayMode": "table",
"placement": "bottom",
"showLegend": true,
@@ -1931,6 +1979,7 @@
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
+ "description": "Memory usage information of metasrv.\n\nThere are three types of the metrics:\n\n- allocated from jemalloc\n- resident memory as stat from jemalloc\n- process virtual memory",
"fieldConfig": {
"defaults": {
"color": {
@@ -1943,6 +1992,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
@@ -1973,8 +2023,7 @@
"mode": "absolute",
"steps": [
{
- "color": "green",
- "value": null
+ "color": "green"
},
{
"color": "red",
@@ -1995,7 +2044,9 @@
"id": 235,
"options": {
"legend": {
- "calcs": ["lastNotNull"],
+ "calcs": [
+ "lastNotNull"
+ ],
"displayMode": "table",
"placement": "bottom",
"showLegend": true,
@@ -2055,7 +2106,7 @@
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
- "description": "",
+ "description": "Current memory usage by instance",
"fieldConfig": {
"defaults": {
"color": {
@@ -2068,6 +2119,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
@@ -2098,8 +2150,7 @@
"mode": "absolute",
"steps": [
{
- "color": "green",
- "value": null
+ "color": "green"
},
{
"color": "red",
@@ -2120,7 +2171,9 @@
"id": 256,
"options": {
"legend": {
- "calcs": ["lastNotNull"],
+ "calcs": [
+ "lastNotNull"
+ ],
"displayMode": "table",
"placement": "bottom",
"showLegend": true
@@ -2152,7 +2205,7 @@
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
- "description": "",
+ "description": "Current memory usage by instance",
"fieldConfig": {
"defaults": {
"color": {
@@ -2165,6 +2218,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
@@ -2195,8 +2249,7 @@
"mode": "absolute",
"steps": [
{
- "color": "green",
- "value": null
+ "color": "green"
},
{
"color": "red",
@@ -2217,7 +2270,9 @@
"id": 257,
"options": {
"legend": {
- "calcs": ["lastNotNull"],
+ "calcs": [
+ "lastNotNull"
+ ],
"displayMode": "table",
"placement": "bottom",
"showLegend": true
@@ -2249,7 +2304,7 @@
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
- "description": "",
+ "description": "Current memory usage by instance",
"fieldConfig": {
"defaults": {
"color": {
@@ -2262,6 +2317,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
@@ -2292,8 +2348,7 @@
"mode": "absolute",
"steps": [
{
- "color": "green",
- "value": null
+ "color": "green"
},
{
"color": "red",
@@ -2314,7 +2369,9 @@
"id": 258,
"options": {
"legend": {
- "calcs": ["lastNotNull"],
+ "calcs": [
+ "lastNotNull"
+ ],
"displayMode": "table",
"placement": "bottom",
"showLegend": true
@@ -2346,7 +2403,7 @@
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
- "description": "",
+ "description": "Current cpu usage of all instances accumulated",
"fieldConfig": {
"defaults": {
"color": {
@@ -2359,6 +2416,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
@@ -2389,8 +2447,7 @@
"mode": "absolute",
"steps": [
{
- "color": "green",
- "value": null
+ "color": "green"
},
{
"color": "red",
@@ -2411,7 +2468,9 @@
"id": 259,
"options": {
"legend": {
- "calcs": ["lastNotNull"],
+ "calcs": [
+ "lastNotNull"
+ ],
"displayMode": "table",
"placement": "bottom",
"showLegend": true,
@@ -2445,7 +2504,7 @@
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
- "description": "",
+ "description": "Current cpu usage of all instances accumulated",
"fieldConfig": {
"defaults": {
"color": {
@@ -2458,6 +2517,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
@@ -2488,8 +2548,7 @@
"mode": "absolute",
"steps": [
{
- "color": "green",
- "value": null
+ "color": "green"
},
{
"color": "red",
@@ -2510,7 +2569,9 @@
"id": 260,
"options": {
"legend": {
- "calcs": ["lastNotNull"],
+ "calcs": [
+ "lastNotNull"
+ ],
"displayMode": "table",
"placement": "bottom",
"showLegend": true,
@@ -2544,7 +2605,7 @@
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
- "description": "",
+ "description": "Current cpu usage of all instances accumulated",
"fieldConfig": {
"defaults": {
"color": {
@@ -2557,6 +2618,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
@@ -2587,8 +2649,7 @@
"mode": "absolute",
"steps": [
{
- "color": "green",
- "value": null
+ "color": "green"
},
{
"color": "red",
@@ -2609,7 +2670,9 @@
"id": 261,
"options": {
"legend": {
- "calcs": ["lastNotNull"],
+ "calcs": [
+ "lastNotNull"
+ ],
"displayMode": "table",
"placement": "bottom",
"showLegend": true,
@@ -2643,7 +2706,7 @@
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
- "description": "",
+ "description": "Current cpu usage by instance",
"fieldConfig": {
"defaults": {
"color": {
@@ -2656,6 +2719,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
@@ -2686,8 +2750,7 @@
"mode": "absolute",
"steps": [
{
- "color": "green",
- "value": null
+ "color": "green"
},
{
"color": "red",
@@ -2708,7 +2771,9 @@
"id": 262,
"options": {
"legend": {
- "calcs": ["lastNotNull"],
+ "calcs": [
+ "lastNotNull"
+ ],
"displayMode": "table",
"placement": "bottom",
"showLegend": true,
@@ -2742,7 +2807,7 @@
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
- "description": "",
+ "description": "Current cpu usage of all instances accumulated",
"fieldConfig": {
"defaults": {
"color": {
@@ -2755,6 +2820,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
@@ -2785,8 +2851,7 @@
"mode": "absolute",
"steps": [
{
- "color": "green",
- "value": null
+ "color": "green"
},
{
"color": "red",
@@ -2807,7 +2872,9 @@
"id": 263,
"options": {
"legend": {
- "calcs": ["lastNotNull"],
+ "calcs": [
+ "lastNotNull"
+ ],
"displayMode": "table",
"placement": "bottom",
"showLegend": true,
@@ -2841,7 +2908,7 @@
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
- "description": "",
+ "description": "Current cpu usage of all instances accumulated",
"fieldConfig": {
"defaults": {
"color": {
@@ -2854,6 +2921,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
@@ -2884,8 +2952,7 @@
"mode": "absolute",
"steps": [
{
- "color": "green",
- "value": null
+ "color": "green"
},
{
"color": "red",
@@ -2906,7 +2973,9 @@
"id": 264,
"options": {
"legend": {
- "calcs": ["lastNotNull"],
+ "calcs": [
+ "lastNotNull"
+ ],
"displayMode": "table",
"placement": "bottom",
"showLegend": true,
@@ -2936,7 +3005,7 @@
"type": "timeseries"
},
{
- "collapsed": true,
+ "collapsed": false,
"gridPos": {
"h": 1,
"w": 24,
@@ -2944,3148 +3013,3087 @@
"y": 64
},
"id": 192,
- "panels": [
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${DS_PROMETHEUS}"
+ "panels": [],
+ "title": "Frontend APIs",
+ "type": "row"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "description": "HTTP APIs QPS by instance, request url, http method and response status code",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
},
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "palette-classic"
- },
- "custom": {
- "axisBorderShow": false,
- "axisCenteredZero": false,
- "axisColorMode": "text",
- "axisLabel": "",
- "axisPlacement": "auto",
- "barAlignment": 0,
- "drawStyle": "line",
- "fillOpacity": 0,
- "gradientMode": "none",
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "insertNulls": false,
- "lineInterpolation": "linear",
- "lineWidth": 1,
- "pointSize": 5,
- "scaleDistribution": {
- "type": "linear"
- },
- "showPoints": "auto",
- "spanNulls": false,
- "stacking": {
- "group": "A",
- "mode": "none"
- },
- "thresholdsStyle": {
- "mode": "off"
- }
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- },
- "unit": "ops"
- },
- "overrides": []
- },
- "gridPos": {
- "h": 8,
- "w": 12,
- "x": 0,
- "y": 25
- },
- "id": 202,
- "options": {
- "legend": {
- "calcs": [],
- "displayMode": "table",
- "placement": "bottom",
- "showLegend": true
- },
- "tooltip": {
- "mode": "single",
- "sort": "none"
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "barWidthFactor": 0.6,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
}
},
- "targets": [
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${DS_PROMETHEUS}"
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
},
- "editorMode": "code",
- "expr": "sum by(pod, path, method, code) (rate(greptime_servers_http_requests_elapsed_count{pod=~\"$frontend\",path!~\"/health|/metrics\"}[$__rate_interval]))",
- "instant": false,
- "legendFormat": "[{{pod}}]-[{{path}}]-[{{method}}]-[{{code}}]-qps",
- "range": true,
- "refId": "A"
- }
- ],
- "title": "HTTP QPS per Instance",
- "type": "timeseries"
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "ops"
},
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 65
+ },
+ "id": 202,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "table",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "palette-classic"
- },
- "custom": {
- "axisBorderShow": false,
- "axisCenteredZero": false,
- "axisColorMode": "text",
- "axisLabel": "",
- "axisPlacement": "auto",
- "barAlignment": 0,
- "drawStyle": "points",
- "fillOpacity": 0,
- "gradientMode": "none",
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "insertNulls": false,
- "lineInterpolation": "linear",
- "lineWidth": 1,
- "pointSize": 5,
- "scaleDistribution": {
- "type": "linear"
- },
- "showPoints": "auto",
- "spanNulls": false,
- "stacking": {
- "group": "A",
- "mode": "none"
- },
- "thresholdsStyle": {
- "mode": "off"
- }
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- },
- "unit": "s"
- },
- "overrides": []
- },
- "gridPos": {
- "h": 8,
- "w": 12,
- "x": 12,
- "y": 25
- },
- "id": 203,
- "options": {
- "legend": {
- "calcs": [],
- "displayMode": "table",
- "placement": "bottom",
- "showLegend": true
- },
- "tooltip": {
- "mode": "single",
- "sort": "none"
+ "editorMode": "code",
+ "expr": "sum by(pod, path, method, code) (rate(greptime_servers_http_requests_elapsed_count{pod=~\"$frontend\",path!~\"/health|/metrics\"}[$__rate_interval]))",
+ "instant": false,
+ "legendFormat": "[{{pod}}]-[{{path}}]-[{{method}}]-[{{code}}]-qps",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "HTTP QPS per Instance",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "description": "P99 latency of HTTP requests by instance, request url, http method and response code",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "barWidthFactor": 0.6,
+ "drawStyle": "points",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
}
},
- "targets": [
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${DS_PROMETHEUS}"
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
},
- "editorMode": "code",
- "expr": "histogram_quantile(0.99, sum by(pod, le, path, method, code) (rate(greptime_servers_http_requests_elapsed_bucket{pod=~\"$frontend\",path!~\"/health|/metrics\"}[$__rate_interval])))",
- "instant": false,
- "legendFormat": "[{{pod}}]-[{{path}}]-[{{method}}]-[{{code}}]-p99",
- "range": true,
- "refId": "A"
- }
- ],
- "title": "HTTP P99 per Instance",
- "type": "timeseries"
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "s"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 65
+ },
+ "id": 203,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "table",
+ "placement": "bottom",
+ "showLegend": true
},
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "palette-classic"
- },
- "custom": {
- "axisBorderShow": false,
- "axisCenteredZero": false,
- "axisColorMode": "text",
- "axisLabel": "",
- "axisPlacement": "auto",
- "barAlignment": 0,
- "drawStyle": "line",
- "fillOpacity": 0,
- "gradientMode": "none",
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "insertNulls": false,
- "lineInterpolation": "linear",
- "lineWidth": 1,
- "pointSize": 5,
- "scaleDistribution": {
- "type": "linear"
- },
- "showPoints": "auto",
- "spanNulls": false,
- "stacking": {
- "group": "A",
- "mode": "none"
- },
- "thresholdsStyle": {
- "mode": "off"
- }
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- },
- "unit": "ops"
- },
- "overrides": [
- {
- "__systemRef": "hideSeriesFrom",
- "matcher": {
- "id": "byNames",
- "options": {
- "mode": "exclude",
- "names": [
- "[mycluster-frontend-5f94445cf8-mcmhf]-[/v1/prometheus/write]-[POST]-[204]-qps"
- ],
- "prefix": "All except:",
- "readOnly": true
- }
- },
- "properties": [
- {
- "id": "custom.hideFrom",
- "value": {
- "legend": false,
- "tooltip": false,
- "viz": true
- }
- }
- ]
- }
- ]
- },
- "gridPos": {
- "h": 8,
- "w": 12,
- "x": 0,
- "y": 33
- },
- "id": 211,
- "options": {
- "legend": {
- "calcs": [],
- "displayMode": "table",
- "placement": "bottom",
- "showLegend": true
- },
- "tooltip": {
- "mode": "single",
- "sort": "none"
- }
+ "editorMode": "code",
+ "expr": "histogram_quantile(0.99, sum by(pod, le, path, method, code) (rate(greptime_servers_http_requests_elapsed_bucket{pod=~\"$frontend\",path!~\"/health|/metrics\"}[$__rate_interval])))",
+ "instant": false,
+ "legendFormat": "[{{pod}}]-[{{path}}]-[{{method}}]-[{{code}}]-p99",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "HTTP P99 per Instance",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "description": "gRPC requests QPS on frontends by instance",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
},
- "targets": [
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${DS_PROMETHEUS}"
- },
- "editorMode": "code",
- "expr": "sum by(pod, path, code) (rate(greptime_servers_grpc_requests_elapsed_count{pod=~\"$frontend\"}[$__rate_interval]))",
- "instant": false,
- "legendFormat": "[{{pod}}]-[{{path}}]-[{{code}}]-qps",
- "range": true,
- "refId": "A"
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "barWidthFactor": 0.6,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
}
- ],
- "title": "gRPC QPS per Instance",
- "type": "timeseries"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${DS_PROMETHEUS}"
},
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "palette-classic"
- },
- "custom": {
- "axisBorderShow": false,
- "axisCenteredZero": false,
- "axisColorMode": "text",
- "axisLabel": "",
- "axisPlacement": "auto",
- "barAlignment": 0,
- "drawStyle": "points",
- "fillOpacity": 0,
- "gradientMode": "none",
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "insertNulls": false,
- "lineInterpolation": "linear",
- "lineWidth": 1,
- "pointSize": 5,
- "scaleDistribution": {
- "type": "linear"
- },
- "showPoints": "auto",
- "spanNulls": false,
- "stacking": {
- "group": "A",
- "mode": "none"
- },
- "thresholdsStyle": {
- "mode": "off"
- }
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
},
- "unit": "s"
- },
- "overrides": []
- },
- "gridPos": {
- "h": 8,
- "w": 12,
- "x": 12,
- "y": 33
- },
- "id": 212,
- "options": {
- "legend": {
- "calcs": [],
- "displayMode": "table",
- "placement": "bottom",
- "showLegend": true
- },
- "tooltip": {
- "mode": "single",
- "sort": "none"
- }
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
},
- "targets": [
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${DS_PROMETHEUS}"
- },
- "editorMode": "code",
- "expr": "histogram_quantile(0.99, sum by(pod, le, path, code) (rate(greptime_servers_grpc_requests_elapsed_bucket{pod=~\"$frontend\"}[$__rate_interval])))",
- "instant": false,
- "legendFormat": "[{{pod}}]-[{{path}}]-[{{method}}]-[{{code}}]-p99",
- "range": true,
- "refId": "A"
- }
- ],
- "title": "gRPC P99 per Instance",
- "type": "timeseries"
+ "unit": "ops"
},
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${DS_PROMETHEUS}"
- },
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "palette-classic"
- },
- "custom": {
- "axisBorderShow": false,
- "axisCenteredZero": false,
- "axisColorMode": "text",
- "axisLabel": "",
- "axisPlacement": "auto",
- "barAlignment": 0,
- "drawStyle": "line",
- "fillOpacity": 0,
- "gradientMode": "none",
- "hideFrom": {
+ "overrides": [
+ {
+ "__systemRef": "hideSeriesFrom",
+ "matcher": {
+ "id": "byNames",
+ "options": {
+ "mode": "exclude",
+ "names": [
+ "[mycluster-frontend-5f94445cf8-mcmhf]-[/v1/prometheus/write]-[POST]-[204]-qps"
+ ],
+ "prefix": "All except:",
+ "readOnly": true
+ }
+ },
+ "properties": [
+ {
+ "id": "custom.hideFrom",
+ "value": {
"legend": false,
"tooltip": false,
- "viz": false
- },
- "insertNulls": false,
- "lineInterpolation": "linear",
- "lineWidth": 1,
- "pointSize": 5,
- "scaleDistribution": {
- "type": "linear"
- },
- "showPoints": "auto",
- "spanNulls": false,
- "stacking": {
- "group": "A",
- "mode": "none"
- },
- "thresholdsStyle": {
- "mode": "off"
+ "viz": true
}
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- },
- "unit": "ops"
- },
- "overrides": [
- {
- "__systemRef": "hideSeriesFrom",
- "matcher": {
- "id": "byNames",
- "options": {
- "mode": "exclude",
- "names": ["[mycluster-frontend-5c59b4cc9b-kpb6q]-qps"],
- "prefix": "All except:",
- "readOnly": true
- }
- },
- "properties": [
- {
- "id": "custom.hideFrom",
- "value": {
- "legend": false,
- "tooltip": false,
- "viz": true
- }
- }
- ]
}
]
- },
- "gridPos": {
- "h": 8,
- "w": 12,
- "x": 0,
- "y": 41
- },
- "id": 213,
- "options": {
- "legend": {
- "calcs": [],
- "displayMode": "table",
- "placement": "bottom",
- "showLegend": true
- },
- "tooltip": {
- "mode": "single",
- "sort": "none"
- }
- },
- "targets": [
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${DS_PROMETHEUS}"
- },
- "editorMode": "code",
- "expr": "sum by(pod)(rate(greptime_servers_mysql_query_elapsed_count{pod=~\"$frontend\"}[$__rate_interval]))",
- "instant": false,
- "legendFormat": "[{{pod}}]-qps",
- "range": true,
- "refId": "A"
- }
- ],
- "title": "MySQL QPS per Instance",
- "type": "timeseries"
+ }
+ ]
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 73
+ },
+ "id": 211,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "table",
+ "placement": "bottom",
+ "showLegend": true
},
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "palette-classic"
- },
- "custom": {
- "axisBorderShow": false,
- "axisCenteredZero": false,
- "axisColorMode": "text",
- "axisLabel": "",
- "axisPlacement": "auto",
- "barAlignment": 0,
- "drawStyle": "points",
- "fillOpacity": 0,
- "gradientMode": "none",
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "insertNulls": false,
- "lineInterpolation": "linear",
- "lineWidth": 1,
- "pointSize": 5,
- "scaleDistribution": {
- "type": "linear"
- },
- "showPoints": "auto",
- "spanNulls": false,
- "stacking": {
- "group": "A",
- "mode": "none"
- },
- "thresholdsStyle": {
- "mode": "off"
- }
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- },
- "unit": "s"
- },
- "overrides": []
- },
- "gridPos": {
- "h": 8,
- "w": 12,
- "x": 12,
- "y": 41
- },
- "id": 214,
- "options": {
- "legend": {
- "calcs": [],
- "displayMode": "table",
- "placement": "bottom",
- "showLegend": true
- },
- "tooltip": {
- "mode": "single",
- "sort": "none"
- }
+ "editorMode": "code",
+ "expr": "sum by(pod, path, code) (rate(greptime_servers_grpc_requests_elapsed_count{pod=~\"$frontend\"}[$__rate_interval]))",
+ "instant": false,
+ "legendFormat": "[{{pod}}]-[{{path}}]-[{{code}}]-qps",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "gRPC QPS per Instance",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "description": "gRPC latency p99 by instance",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
},
- "targets": [
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${DS_PROMETHEUS}"
- },
- "editorMode": "code",
- "exemplar": false,
- "expr": "histogram_quantile(0.99, sum by(pod, le) (rate(greptime_servers_mysql_query_elapsed_bucket{pod=~\"$frontend\"}[$__rate_interval])))",
- "instant": false,
- "legendFormat": "[{{ pod }}]-p99",
- "range": true,
- "refId": "A"
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "barWidthFactor": 0.6,
+ "drawStyle": "points",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
}
- ],
- "title": "MySQL P99 per Instance",
- "type": "timeseries"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${DS_PROMETHEUS}"
},
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "palette-classic"
- },
- "custom": {
- "axisBorderShow": false,
- "axisCenteredZero": false,
- "axisColorMode": "text",
- "axisLabel": "",
- "axisPlacement": "auto",
- "barAlignment": 0,
- "drawStyle": "line",
- "fillOpacity": 0,
- "gradientMode": "none",
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "insertNulls": false,
- "lineInterpolation": "linear",
- "lineWidth": 1,
- "pointSize": 5,
- "scaleDistribution": {
- "type": "linear"
- },
- "showPoints": "auto",
- "spanNulls": false,
- "stacking": {
- "group": "A",
- "mode": "none"
- },
- "thresholdsStyle": {
- "mode": "off"
- }
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green"
- },
- {
- "color": "red",
- "value": 80
- }
- ]
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
},
- "unit": "ops"
- },
- "overrides": [
- {
- "__systemRef": "hideSeriesFrom",
- "matcher": {
- "id": "byNames",
- "options": {
- "mode": "exclude",
- "names": [
- "[mycluster-frontend-5f94445cf8-mcmhf]-[/v1/prometheus/write]-[POST]-[204]-qps"
- ],
- "prefix": "All except:",
- "readOnly": true
- }
- },
- "properties": [
- {
- "id": "custom.hideFrom",
- "value": {
- "legend": false,
- "tooltip": false,
- "viz": true
- }
- }
- ]
+ {
+ "color": "red",
+ "value": 80
}
]
},
- "gridPos": {
- "h": 8,
- "w": 12,
- "x": 0,
- "y": 49
- },
- "id": 215,
- "options": {
- "legend": {
- "calcs": [],
- "displayMode": "table",
- "placement": "bottom",
- "showLegend": true
- },
- "tooltip": {
- "mode": "single",
- "sort": "none"
- }
- },
- "targets": [
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${DS_PROMETHEUS}"
- },
- "editorMode": "code",
- "expr": "sum by(pod) (rate(greptime_servers_postgres_query_elapsed_count{pod=~\"$frontend\"}[$__rate_interval]))",
- "instant": false,
- "legendFormat": "[{{pod}}]-qps",
- "range": true,
- "refId": "A"
- }
- ],
- "title": "PostgreSQL QPS per Instance",
- "type": "timeseries"
+ "unit": "s"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 73
+ },
+ "id": 212,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "table",
+ "placement": "bottom",
+ "showLegend": true
},
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "palette-classic"
- },
- "custom": {
- "axisBorderShow": false,
- "axisCenteredZero": false,
- "axisColorMode": "text",
- "axisLabel": "",
- "axisPlacement": "auto",
- "barAlignment": 0,
- "drawStyle": "points",
- "fillOpacity": 0,
- "gradientMode": "none",
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "insertNulls": false,
- "lineInterpolation": "linear",
- "lineWidth": 1,
- "pointSize": 5,
- "scaleDistribution": {
- "type": "linear"
- },
- "showPoints": "auto",
- "spanNulls": false,
- "stacking": {
- "group": "A",
- "mode": "none"
- },
- "thresholdsStyle": {
- "mode": "off"
- }
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green"
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- },
- "unit": "s"
- },
- "overrides": []
- },
- "gridPos": {
- "h": 8,
- "w": 12,
- "x": 12,
- "y": 49
- },
- "id": 216,
- "options": {
- "legend": {
- "calcs": [],
- "displayMode": "table",
- "placement": "bottom",
- "showLegend": true
- },
- "tooltip": {
- "mode": "single",
- "sort": "none"
- }
- },
- "targets": [
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${DS_PROMETHEUS}"
- },
- "editorMode": "code",
- "expr": "histogram_quantile(0.99, sum by(pod, le) (rate(greptime_servers_postgres_query_elapsed_count{pod=~\"$frontend\"}[$__rate_interval])))",
- "instant": false,
- "legendFormat": "[{{pod}}]-p99",
- "range": true,
- "refId": "A"
- }
- ],
- "title": "PostgreSQL P99 per Instance",
- "type": "timeseries"
+ "editorMode": "code",
+ "expr": "histogram_quantile(0.99, sum by(pod, le, path, code) (rate(greptime_servers_grpc_requests_elapsed_bucket{pod=~\"$frontend\"}[$__rate_interval])))",
+ "instant": false,
+ "legendFormat": "[{{pod}}]-[{{path}}]-[{{method}}]-[{{code}}]-p99",
+ "range": true,
+ "refId": "A"
}
],
- "title": "Frontend APIs",
- "type": "row"
+ "title": "gRPC P99 per Instance",
+ "type": "timeseries"
},
{
- "collapsed": true,
- "gridPos": {
- "h": 1,
- "w": 24,
- "x": 0,
- "y": 65
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
},
- "id": 217,
- "panels": [
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${DS_PROMETHEUS}"
+ "description": "MySQL query rate by instance",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
},
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "palette-classic"
- },
- "custom": {
- "axisBorderShow": false,
- "axisCenteredZero": false,
- "axisColorMode": "text",
- "axisLabel": "",
- "axisPlacement": "auto",
- "barAlignment": 0,
- "drawStyle": "line",
- "fillOpacity": 0,
- "gradientMode": "none",
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "insertNulls": false,
- "lineInterpolation": "linear",
- "lineWidth": 1,
- "pointSize": 5,
- "scaleDistribution": {
- "type": "linear"
- },
- "showPoints": "auto",
- "spanNulls": false,
- "stacking": {
- "group": "A",
- "mode": "none"
- },
- "thresholdsStyle": {
- "mode": "off"
- }
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- },
- "unit": "rowsps"
- },
- "overrides": []
- },
- "gridPos": {
- "h": 6,
- "w": 24,
- "x": 0,
- "y": 26
- },
- "id": 218,
- "options": {
- "legend": {
- "calcs": ["lastNotNull"],
- "displayMode": "table",
- "placement": "bottom",
- "showLegend": true
- },
- "tooltip": {
- "mode": "single",
- "sort": "none"
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "barWidthFactor": 0.6,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
}
},
- "targets": [
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${DS_PROMETHEUS}"
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
},
- "editorMode": "code",
- "expr": "sum by(pod)(rate(greptime_table_operator_ingest_rows{pod=~\"$frontend\"}[$__rate_interval]))",
- "instant": false,
- "legendFormat": "[{{pod}}]-rps",
- "range": true,
- "refId": "A"
- }
- ],
- "title": "Ingest Rows per Instance",
- "type": "timeseries"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${DS_PROMETHEUS}"
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
},
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "palette-classic"
- },
- "custom": {
- "axisBorderShow": false,
- "axisCenteredZero": false,
- "axisColorMode": "text",
- "axisLabel": "",
- "axisPlacement": "auto",
- "barAlignment": 0,
- "drawStyle": "line",
- "fillOpacity": 0,
- "gradientMode": "none",
- "hideFrom": {
+ "unit": "ops"
+ },
+ "overrides": [
+ {
+ "__systemRef": "hideSeriesFrom",
+ "matcher": {
+ "id": "byNames",
+ "options": {
+ "mode": "exclude",
+ "names": [
+ "[mycluster-frontend-5c59b4cc9b-kpb6q]-qps"
+ ],
+ "prefix": "All except:",
+ "readOnly": true
+ }
+ },
+ "properties": [
+ {
+ "id": "custom.hideFrom",
+ "value": {
"legend": false,
"tooltip": false,
- "viz": false
- },
- "insertNulls": false,
- "lineInterpolation": "linear",
- "lineWidth": 1,
- "pointSize": 5,
- "scaleDistribution": {
- "type": "linear"
- },
- "showPoints": "auto",
- "spanNulls": false,
- "stacking": {
- "group": "A",
- "mode": "none"
- },
- "thresholdsStyle": {
- "mode": "off"
+ "viz": true
}
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- },
- "unit": "ops"
- },
- "overrides": []
- },
- "gridPos": {
- "h": 8,
- "w": 12,
- "x": 0,
- "y": 32
- },
- "id": 219,
- "options": {
- "legend": {
- "calcs": [],
- "displayMode": "list",
- "placement": "bottom",
- "showLegend": true
- },
- "tooltip": {
- "mode": "single",
- "sort": "none"
- }
- },
- "targets": [
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${DS_PROMETHEUS}"
- },
- "editorMode": "code",
- "expr": "sum by(pod, request_type) (rate(greptime_grpc_region_request_count{pod=~\"$frontend\"}[$__rate_interval]))",
- "instant": false,
- "legendFormat": "[{{pod}}]-[{{request_type}}]-qps",
- "range": true,
- "refId": "A"
- }
- ],
- "title": "Region Call QPS per Instance",
- "type": "timeseries"
+ }
+ ]
+ }
+ ]
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 81
+ },
+ "id": 213,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "table",
+ "placement": "bottom",
+ "showLegend": true
},
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
- "description": "",
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "palette-classic"
- },
- "custom": {
- "axisBorderShow": false,
- "axisCenteredZero": false,
- "axisColorMode": "text",
- "axisLabel": "",
- "axisPlacement": "auto",
- "barAlignment": 0,
- "drawStyle": "points",
- "fillOpacity": 0,
- "gradientMode": "none",
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "insertNulls": false,
- "lineInterpolation": "linear",
- "lineWidth": 1,
- "pointSize": 5,
- "scaleDistribution": {
- "type": "linear"
- },
- "showPoints": "auto",
- "spanNulls": false,
- "stacking": {
- "group": "A",
- "mode": "none"
- },
- "thresholdsStyle": {
- "mode": "off"
- }
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- },
- "unit": "s"
- },
- "overrides": []
- },
- "gridPos": {
- "h": 8,
- "w": 12,
- "x": 12,
- "y": 32
- },
- "id": 220,
- "options": {
- "legend": {
- "calcs": [],
- "displayMode": "table",
- "placement": "bottom",
- "showLegend": true
- },
- "tooltip": {
- "mode": "single",
- "sort": "none"
- }
- },
- "targets": [
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${DS_PROMETHEUS}"
- },
- "editorMode": "code",
- "expr": "histogram_quantile(0.99, sum by(pod, le, request_type) (rate(greptime_grpc_region_request_bucket{pod=~\"$frontend\"}[$__rate_interval])))",
- "instant": false,
- "legendFormat": "[{{pod}}]-[{{request_type}}]-p99",
- "range": true,
- "refId": "A"
- }
- ],
- "title": "Region Call P99 per Instance",
- "type": "timeseries"
+ "editorMode": "code",
+ "expr": "sum by(pod)(rate(greptime_servers_mysql_query_elapsed_count{pod=~\"$frontend\"}[$__rate_interval]))",
+ "instant": false,
+ "legendFormat": "[{{pod}}]-qps",
+ "range": true,
+ "refId": "A"
}
],
- "title": "Frontend <-> Datanode",
- "type": "row"
+ "title": "MySQL QPS per Instance",
+ "type": "timeseries"
},
{
- "collapsed": true,
- "gridPos": {
- "h": 1,
- "w": 24,
- "x": 0,
- "y": 66
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
},
- "id": 273,
- "panels": [
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${DS_PROMETHEUS}"
+ "description": "MySQL query latency p99 by instance",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
},
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "thresholds"
- },
- "custom": {
- "fillOpacity": 70,
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "insertNulls": false,
- "lineWidth": 0,
- "spanNulls": false
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- },
- "unit": "none"
- },
- "overrides": []
- },
- "gridPos": {
- "h": 8,
- "w": 12,
- "x": 0,
- "y": 27
- },
- "id": 274,
- "options": {
- "alignValue": "left",
- "legend": {
- "displayMode": "list",
- "placement": "bottom",
- "showLegend": true
- },
- "mergeValues": true,
- "rowHeight": 0.9,
- "showValue": "auto",
- "tooltip": {
- "mode": "single",
- "sort": "none"
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "barWidthFactor": 0.6,
+ "drawStyle": "points",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
}
},
- "targets": [
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${DS_PROMETHEUS}"
- },
- "editorMode": "code",
- "expr": "greptime_meta_region_migration_stat{datanode_type=\"src\"}",
- "instant": false,
- "legendFormat": "from-datanode-{{datanode_id}}",
- "range": true,
- "refId": "A"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${DS_PROMETHEUS}"
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
},
- "editorMode": "code",
- "expr": "greptime_meta_region_migration_stat{datanode_type=\"desc\"}",
- "hide": false,
- "instant": false,
- "legendFormat": "to-datanode-{{datanode_id}}",
- "range": true,
- "refId": "B"
- }
- ],
- "title": "Region migration datanode",
- "type": "state-timeline"
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "s"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 81
+ },
+ "id": 214,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "table",
+ "placement": "bottom",
+ "showLegend": true
},
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "palette-classic"
- },
- "custom": {
- "axisBorderShow": false,
- "axisCenteredZero": false,
- "axisColorMode": "text",
- "axisLabel": "",
- "axisPlacement": "auto",
- "barAlignment": 0,
- "drawStyle": "line",
- "fillOpacity": 0,
- "gradientMode": "none",
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "insertNulls": false,
- "lineInterpolation": "linear",
- "lineWidth": 1,
- "pointSize": 5,
- "scaleDistribution": {
- "type": "linear"
- },
- "showPoints": "auto",
- "spanNulls": false,
- "stacking": {
- "group": "A",
- "mode": "none"
- },
- "thresholdsStyle": {
- "mode": "off"
- }
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- },
- "unit": "none"
- },
- "overrides": []
- },
- "gridPos": {
- "h": 8,
- "w": 12,
- "x": 12,
- "y": 27
- },
- "id": 275,
- "options": {
- "legend": {
- "calcs": [],
- "displayMode": "list",
- "placement": "bottom",
- "showLegend": true
- },
- "tooltip": {
- "mode": "single",
- "sort": "none"
- }
- },
- "targets": [
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${DS_PROMETHEUS}"
- },
- "editorMode": "code",
- "expr": "greptime_meta_region_migration_error",
- "instant": false,
- "legendFormat": "__auto",
- "range": true,
- "refId": "A"
- }
- ],
- "title": "Region migration error",
- "type": "timeseries"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${DS_PROMETHEUS}"
- },
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "palette-classic"
- },
- "custom": {
- "axisBorderShow": false,
- "axisCenteredZero": false,
- "axisColorMode": "text",
- "axisLabel": "",
- "axisPlacement": "auto",
- "barAlignment": 0,
- "drawStyle": "line",
- "fillOpacity": 0,
- "gradientMode": "none",
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "insertNulls": false,
- "lineInterpolation": "linear",
- "lineWidth": 1,
- "pointSize": 5,
- "scaleDistribution": {
- "type": "linear"
- },
- "showPoints": "auto",
- "spanNulls": false,
- "stacking": {
- "group": "A",
- "mode": "none"
- },
- "thresholdsStyle": {
- "mode": "off"
- }
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- },
- "unit": "none"
- },
- "overrides": []
- },
- "gridPos": {
- "h": 8,
- "w": 12,
- "x": 0,
- "y": 35
- },
- "id": 276,
- "options": {
- "legend": {
- "calcs": [],
- "displayMode": "list",
- "placement": "bottom",
- "showLegend": true
- },
- "tooltip": {
- "mode": "single",
- "sort": "none"
- }
- },
- "targets": [
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${DS_PROMETHEUS}"
- },
- "editorMode": "code",
- "expr": "greptime_datanode_load",
- "instant": false,
- "legendFormat": "__auto",
- "range": true,
- "refId": "A"
- }
- ],
- "title": "Datanode load",
- "type": "timeseries"
+ "editorMode": "code",
+ "exemplar": false,
+ "expr": "histogram_quantile(0.99, sum by(pod, le) (rate(greptime_servers_mysql_query_elapsed_bucket{pod=~\"$frontend\"}[$__rate_interval])))",
+ "instant": false,
+ "legendFormat": "[{{ pod }}]-p99",
+ "range": true,
+ "refId": "A"
}
],
- "title": "Metasrv",
- "type": "row"
+ "title": "MySQL P99 per Instance",
+ "type": "timeseries"
},
{
- "collapsed": true,
- "gridPos": {
- "h": 1,
- "w": 24,
- "x": 0,
- "y": 67
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
},
- "id": 194,
- "panels": [
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${DS_PROMETHEUS}"
+ "description": "Postgres query rate by instance",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
},
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "palette-classic"
- },
- "custom": {
- "axisBorderShow": false,
- "axisCenteredZero": false,
- "axisColorMode": "text",
- "axisLabel": "",
- "axisPlacement": "auto",
- "barAlignment": 0,
- "drawStyle": "line",
- "fillOpacity": 0,
- "gradientMode": "none",
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "insertNulls": false,
- "lineInterpolation": "linear",
- "lineWidth": 1,
- "pointSize": 5,
- "scaleDistribution": {
- "type": "linear"
- },
- "showPoints": "auto",
- "spanNulls": false,
- "stacking": {
- "group": "A",
- "mode": "none"
- },
- "thresholdsStyle": {
- "mode": "off"
- }
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- },
- "unit": "ops"
- },
- "overrides": []
- },
- "gridPos": {
- "h": 8,
- "w": 12,
- "x": 0,
- "y": 68
- },
- "id": 201,
- "options": {
- "legend": {
- "calcs": [],
- "displayMode": "table",
- "placement": "bottom",
- "showLegend": true
- },
- "tooltip": {
- "mode": "single",
- "sort": "none"
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "barWidthFactor": 0.6,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
}
},
- "targets": [
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${DS_PROMETHEUS}"
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
},
- "editorMode": "code",
- "expr": "sum by(pod, type) (rate(greptime_mito_handle_request_elapsed_count{pod=~\"$datanode\"}[$__rate_interval]))",
- "instant": false,
- "legendFormat": "[{{pod}}]-[{{type}}]-qps",
- "range": true,
- "refId": "A"
- }
- ],
- "title": "Request QPS per Instance",
- "type": "timeseries"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${DS_PROMETHEUS}"
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
},
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "palette-classic"
- },
- "custom": {
- "axisBorderShow": false,
- "axisCenteredZero": false,
- "axisColorMode": "text",
- "axisLabel": "",
- "axisPlacement": "auto",
- "barAlignment": 0,
- "drawStyle": "points",
- "fillOpacity": 0,
- "gradientMode": "none",
- "hideFrom": {
+ "unit": "ops"
+ },
+ "overrides": [
+ {
+ "__systemRef": "hideSeriesFrom",
+ "matcher": {
+ "id": "byNames",
+ "options": {
+ "mode": "exclude",
+ "names": [
+ "[mycluster-frontend-5f94445cf8-mcmhf]-[/v1/prometheus/write]-[POST]-[204]-qps"
+ ],
+ "prefix": "All except:",
+ "readOnly": true
+ }
+ },
+ "properties": [
+ {
+ "id": "custom.hideFrom",
+ "value": {
"legend": false,
"tooltip": false,
- "viz": false
- },
- "insertNulls": false,
- "lineInterpolation": "linear",
- "lineWidth": 1,
- "pointSize": 5,
- "scaleDistribution": {
- "type": "linear"
- },
- "showPoints": "auto",
- "spanNulls": false,
- "stacking": {
- "group": "A",
- "mode": "none"
- },
- "thresholdsStyle": {
- "mode": "off"
+ "viz": true
}
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- },
- "unit": "s"
- },
- "overrides": []
- },
- "gridPos": {
- "h": 8,
- "w": 12,
- "x": 12,
- "y": 68
- },
- "id": 222,
- "options": {
- "legend": {
- "calcs": [],
- "displayMode": "table",
- "placement": "bottom",
- "showLegend": true
- },
- "tooltip": {
- "mode": "single",
- "sort": "none"
- }
- },
- "targets": [
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${DS_PROMETHEUS}"
- },
- "editorMode": "code",
- "expr": "histogram_quantile(0.99, sum by(pod, le, type) (rate(greptime_mito_handle_request_elapsed_bucket{pod=~\"$datanode\"}[$__rate_interval])))",
- "instant": false,
- "legendFormat": "[{{pod}}]-[{{type}}]-p99",
- "range": true,
- "refId": "A"
- }
- ],
- "title": "Request P99 per Instance",
- "type": "timeseries"
+ }
+ ]
+ }
+ ]
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 89
+ },
+ "id": 215,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "table",
+ "placement": "bottom",
+ "showLegend": true
},
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "palette-classic"
- },
- "custom": {
- "axisBorderShow": false,
- "axisCenteredZero": false,
- "axisColorMode": "text",
- "axisLabel": "",
- "axisPlacement": "auto",
- "barAlignment": 0,
- "drawStyle": "line",
- "fillOpacity": 0,
- "gradientMode": "none",
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "insertNulls": false,
- "lineInterpolation": "linear",
- "lineWidth": 1,
- "pointSize": 5,
- "scaleDistribution": {
- "type": "linear"
- },
- "showPoints": "auto",
- "spanNulls": false,
- "stacking": {
- "group": "A",
- "mode": "none"
- },
- "thresholdsStyle": {
- "mode": "off"
- }
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- },
- "unit": "decbytes"
- },
- "overrides": []
- },
- "gridPos": {
- "h": 8,
- "w": 12,
- "x": 0,
- "y": 76
- },
- "id": 200,
- "options": {
- "legend": {
- "calcs": [],
- "displayMode": "table",
- "placement": "bottom",
- "showLegend": true
- },
- "tooltip": {
- "mode": "single",
- "sort": "none"
+ "editorMode": "code",
+ "expr": "sum by(pod) (rate(greptime_servers_postgres_query_elapsed_count{pod=~\"$frontend\"}[$__rate_interval]))",
+ "instant": false,
+ "legendFormat": "[{{pod}}]-qps",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "PostgreSQL QPS per Instance",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "description": "Postgres query latency p99 by instance",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "barWidthFactor": 0.6,
+ "drawStyle": "points",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
}
},
- "targets": [
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${DS_PROMETHEUS}"
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
},
- "editorMode": "code",
- "expr": "greptime_mito_write_buffer_bytes{pod=~\"$datanode\"}",
- "instant": false,
- "legendFormat": "{{pod}}",
- "range": true,
- "refId": "A"
- }
- ],
- "title": "Write Buffer per Instance",
- "type": "timeseries"
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "s"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 89
+ },
+ "id": 216,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "table",
+ "placement": "bottom",
+ "showLegend": true
},
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "palette-classic"
- },
- "custom": {
- "axisBorderShow": false,
- "axisCenteredZero": false,
- "axisColorMode": "text",
- "axisLabel": "",
- "axisPlacement": "auto",
- "barAlignment": 0,
- "drawStyle": "line",
- "fillOpacity": 0,
- "gradientMode": "none",
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "insertNulls": false,
- "lineInterpolation": "linear",
- "lineWidth": 1,
- "pointSize": 5,
- "scaleDistribution": {
- "type": "linear"
- },
- "showPoints": "auto",
- "spanNulls": false,
- "stacking": {
- "group": "A",
- "mode": "none"
- },
- "thresholdsStyle": {
- "mode": "off"
- }
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- },
- "unit": "decbytes"
- },
- "overrides": []
- },
- "gridPos": {
- "h": 8,
- "w": 12,
- "x": 12,
- "y": 76
- },
- "id": 221,
- "options": {
- "legend": {
- "calcs": [],
- "displayMode": "table",
- "placement": "bottom",
- "showLegend": true
- },
- "tooltip": {
- "mode": "single",
- "sort": "none"
- }
- },
- "targets": [
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${DS_PROMETHEUS}"
- },
- "editorMode": "code",
- "expr": "sum by(pod) (greptime_mito_write_stall_total{pod=~\"$datanode\"})",
- "instant": false,
- "legendFormat": "{{pod}}",
- "range": true,
- "refId": "A"
- }
- ],
- "title": "Write Stall per Instance",
- "type": "timeseries"
- },
+ "editorMode": "code",
+ "expr": "histogram_quantile(0.99, sum by(pod, le) (rate(greptime_servers_postgres_query_elapsed_count{pod=~\"$frontend\"}[$__rate_interval])))",
+ "instant": false,
+ "legendFormat": "[{{pod}}]-p99",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "PostgreSQL P99 per Instance",
+ "type": "timeseries"
+ },
+ {
+ "collapsed": false,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 97
+ },
+ "id": 217,
+ "panels": [],
+ "title": "Frontend <-> Datanode",
+ "type": "row"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "description": "Ingestion rate by row as in each frontend",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "barWidthFactor": 0.6,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "rowsps"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 6,
+ "w": 24,
+ "x": 0,
+ "y": 98
+ },
+ "id": 218,
+ "options": {
+ "legend": {
+ "calcs": [
+ "lastNotNull"
+ ],
+ "displayMode": "table",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "editorMode": "code",
+ "expr": "sum by(pod)(rate(greptime_table_operator_ingest_rows{pod=~\"$frontend\"}[$__rate_interval]))",
+ "instant": false,
+ "legendFormat": "[{{pod}}]-rps",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "Ingest Rows per Instance",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "description": "Datanode query rate issued by each frontend",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "barWidthFactor": 0.6,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "ops"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 104
+ },
+ "id": 219,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "editorMode": "code",
+ "expr": "sum by(pod, request_type) (rate(greptime_grpc_region_request_count{pod=~\"$frontend\"}[$__rate_interval]))",
+ "instant": false,
+ "legendFormat": "[{{pod}}]-[{{request_type}}]-qps",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "Region Call QPS per Instance",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "description": "Datanode query latency at p99 as seen by each frontend",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "barWidthFactor": 0.6,
+ "drawStyle": "points",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "s"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 104
+ },
+ "id": 220,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "table",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "editorMode": "code",
+ "expr": "histogram_quantile(0.99, sum by(pod, le, request_type) (rate(greptime_grpc_region_request_bucket{pod=~\"$frontend\"}[$__rate_interval])))",
+ "instant": false,
+ "legendFormat": "[{{pod}}]-[{{request_type}}]-p99",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "Region Call P99 per Instance",
+ "type": "timeseries"
+ },
+ {
+ "collapsed": false,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 112
+ },
+ "id": 273,
+ "panels": [],
+ "title": "Metasrv",
+ "type": "row"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "description": "Counter of region migration by source and destination",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "thresholds"
+ },
+ "custom": {
+ "fillOpacity": 70,
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineWidth": 0,
+ "spanNulls": false
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "none"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 113
+ },
+ "id": 274,
+ "options": {
+ "alignValue": "left",
+ "legend": {
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "mergeValues": true,
+ "rowHeight": 0.9,
+ "showValue": "auto",
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "editorMode": "code",
+ "expr": "greptime_meta_region_migration_stat{datanode_type=\"src\"}",
+ "instant": false,
+ "legendFormat": "from-datanode-{{datanode_id}}",
+ "range": true,
+ "refId": "A"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "editorMode": "code",
+ "expr": "greptime_meta_region_migration_stat{datanode_type=\"desc\"}",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "to-datanode-{{datanode_id}}",
+ "range": true,
+ "refId": "B"
+ }
+ ],
+ "title": "Region migration datanode",
+ "type": "state-timeline"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "description": "Counter of region migration error",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "barWidthFactor": 0.6,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "none"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 113
+ },
+ "id": 275,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "editorMode": "code",
+ "expr": "greptime_meta_region_migration_error",
+ "instant": false,
+ "legendFormat": "__auto",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "Region migration error",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "description": "Gauge of load information of each datanode, collected via heartbeat between datanode and metasrv. This information is for metasrv to schedule workloads.",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "barWidthFactor": 0.6,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "none"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 121
+ },
+ "id": 276,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "editorMode": "code",
+ "expr": "greptime_datanode_load",
+ "instant": false,
+ "legendFormat": "__auto",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "Datanode load",
+ "type": "timeseries"
+ },
+ {
+ "collapsed": false,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 129
+ },
+ "id": 194,
+ "panels": [],
+ "title": "Mito Engine",
+ "type": "row"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "description": "Datanode storage engine QPS by instance",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "barWidthFactor": 0.6,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "ops"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 130
+ },
+ "id": 201,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "table",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "editorMode": "code",
+ "expr": "sum by(pod, type) (rate(greptime_mito_handle_request_elapsed_count{pod=~\"$datanode\"}[$__rate_interval]))",
+ "instant": false,
+ "legendFormat": "[{{pod}}]-[{{type}}]-qps",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "Request QPS per Instance",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "description": "Storage query latency at p99 by instance",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "barWidthFactor": 0.6,
+ "drawStyle": "points",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "s"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 130
+ },
+ "id": 222,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "table",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "editorMode": "code",
+ "expr": "histogram_quantile(0.99, sum by(pod, le, type) (rate(greptime_mito_handle_request_elapsed_bucket{pod=~\"$datanode\"}[$__rate_interval])))",
+ "instant": false,
+ "legendFormat": "[{{pod}}]-[{{type}}]-p99",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "Request P99 per Instance",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "description": "Memtable size on each instance.\n\nThe memtable holds unflushed data in memory and will flush it to object storage periodically or when size exceed configured limit.",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "barWidthFactor": 0.6,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "decbytes"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 138
+ },
+ "id": 200,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "table",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "editorMode": "code",
+ "expr": "greptime_mito_write_buffer_bytes{pod=~\"$datanode\"}",
+ "instant": false,
+ "legendFormat": "{{pod}}",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "Write Buffer per Instance",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "description": "Current counts for stalled write requests by instance\n\nWrite stalls when memtable is full and pending for flush\n\n",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "barWidthFactor": 0.6,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "none"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 138
+ },
+ "id": 221,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "table",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "editorMode": "code",
+ "expr": "sum by(pod) (greptime_mito_write_stall_total{pod=~\"$datanode\"})",
+ "instant": false,
+ "legendFormat": "{{pod}}",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "Write Stall per Instance",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "description": "Memtable flush rate by reason and instance.\n\nThere are several reasons when memtable get flushed. For example, it's full as in size, or reaching the time-to-flush, or by an artificial request.",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "barWidthFactor": 0.6,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "ops"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 146
+ },
+ "id": 224,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "table",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "palette-classic"
- },
- "custom": {
- "axisBorderShow": false,
- "axisCenteredZero": false,
- "axisColorMode": "text",
- "axisLabel": "",
- "axisPlacement": "auto",
- "barAlignment": 0,
- "drawStyle": "line",
- "fillOpacity": 0,
- "gradientMode": "none",
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "insertNulls": false,
- "lineInterpolation": "linear",
- "lineWidth": 1,
- "pointSize": 5,
- "scaleDistribution": {
- "type": "linear"
- },
- "showPoints": "auto",
- "spanNulls": false,
- "stacking": {
- "group": "A",
- "mode": "none"
- },
- "thresholdsStyle": {
- "mode": "off"
- }
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
+ "editorMode": "code",
+ "expr": "sum by(pod, reason) (rate(greptime_mito_flush_requests_total{pod=~\"$datanode\"}[$__rate_interval]))",
+ "instant": false,
+ "legendFormat": "[{{pod}}]-[{{reason}}]-success",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "Flush QPS per Instance",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "description": "Cache size by instance.\n",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "barWidthFactor": 0.6,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
},
- "unit": "ops"
- },
- "overrides": []
- },
- "gridPos": {
- "h": 8,
- "w": 12,
- "x": 0,
- "y": 84
- },
- "id": 224,
- "options": {
- "legend": {
- "calcs": [],
- "displayMode": "table",
- "placement": "bottom",
- "showLegend": true
- },
- "tooltip": {
- "mode": "single",
- "sort": "none"
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "decbytes"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 146
+ },
+ "id": 229,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "table",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "editorMode": "code",
+ "expr": "greptime_mito_cache_bytes{pod=~\"$datanode\"}",
+ "instant": false,
+ "legendFormat": "{{pod}}-{{type}}",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "Cached Bytes per Instance",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "description": "Read QPS from the storage engine by instance.\n\n",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "barWidthFactor": 0.6,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
}
},
- "targets": [
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${DS_PROMETHEUS}"
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
},
- "editorMode": "code",
- "expr": "sum by(pod, reason) (rate(greptime_mito_flush_requests_total{pod=~\"$datanode\"}[$__rate_interval]))",
- "instant": false,
- "legendFormat": "[{{pod}}]-[{{reason}}]-success",
- "range": true,
- "refId": "A"
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "ops"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 154
+ },
+ "id": 227,
+ "options": {
+ "legend": {
+ "calcs": [
+ "lastNotNull"
+ ],
+ "displayMode": "table",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "editorMode": "code",
+ "expr": "sum by(pod) (rate(greptime_mito_read_stage_elapsed_count{pod=~\"$datanode\", stage=\"total\"}[$__rate_interval]))",
+ "instant": false,
+ "legendFormat": "{{pod}}-p99",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "Read Stage QPS per Instance",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "description": "P99 latency of each type of reads by instance",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "barWidthFactor": 0.6,
+ "drawStyle": "points",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
}
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "s"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 154
+ },
+ "id": 228,
+ "options": {
+ "legend": {
+ "calcs": [
+ "lastNotNull"
],
- "title": "Flush QPS per Instance",
- "type": "timeseries"
+ "displayMode": "table",
+ "placement": "bottom",
+ "showLegend": true,
+ "sortBy": "Last *",
+ "sortDesc": true
},
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "palette-classic"
- },
- "custom": {
- "axisBorderShow": false,
- "axisCenteredZero": false,
- "axisColorMode": "text",
- "axisLabel": "",
- "axisPlacement": "auto",
- "barAlignment": 0,
- "drawStyle": "line",
- "fillOpacity": 0,
- "gradientMode": "none",
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "insertNulls": false,
- "lineInterpolation": "linear",
- "lineWidth": 1,
- "pointSize": 5,
- "scaleDistribution": {
- "type": "linear"
- },
- "showPoints": "auto",
- "spanNulls": false,
- "stacking": {
- "group": "A",
- "mode": "none"
- },
- "thresholdsStyle": {
- "mode": "off"
- }
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- },
- "unit": "decbytes"
- },
- "overrides": []
- },
- "gridPos": {
- "h": 8,
- "w": 12,
- "x": 12,
- "y": 84
- },
- "id": 229,
- "options": {
- "legend": {
- "calcs": [],
- "displayMode": "table",
- "placement": "bottom",
- "showLegend": true
- },
- "tooltip": {
- "mode": "single",
- "sort": "none"
+ "editorMode": "code",
+ "expr": "histogram_quantile(0.99, sum by(pod, le, stage) (rate(greptime_mito_read_stage_elapsed_bucket{pod=~\"$datanode\"}[$__rate_interval])))",
+ "instant": false,
+ "legendFormat": "{{pod}}-{{stage}}-p99",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "Read Stage P99 per Instance",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "description": "Compaction operation rate.\n\nCompaction happens when storage to merge and optimise data files.",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "barWidthFactor": 0.6,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
}
},
- "targets": [
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${DS_PROMETHEUS}"
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
},
- "editorMode": "code",
- "expr": "greptime_mito_cache_bytes{pod=~\"$datanode\"}",
- "instant": false,
- "legendFormat": "{{pod}}-{{type}}",
- "range": true,
- "refId": "A"
- }
- ],
- "title": "Cached Bytes per Instance",
- "type": "timeseries"
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "ops"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 162
+ },
+ "id": 231,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "table",
+ "placement": "bottom",
+ "showLegend": true
},
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "palette-classic"
- },
- "custom": {
- "axisBorderShow": false,
- "axisCenteredZero": false,
- "axisColorMode": "text",
- "axisLabel": "",
- "axisPlacement": "auto",
- "barAlignment": 0,
- "drawStyle": "line",
- "fillOpacity": 0,
- "gradientMode": "none",
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "insertNulls": false,
- "lineInterpolation": "linear",
- "lineWidth": 1,
- "pointSize": 5,
- "scaleDistribution": {
- "type": "linear"
- },
- "showPoints": "auto",
- "spanNulls": false,
- "stacking": {
- "group": "A",
- "mode": "none"
- },
- "thresholdsStyle": {
- "mode": "off"
- }
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- },
- "unit": "ops"
- },
- "overrides": []
- },
- "gridPos": {
- "h": 8,
- "w": 12,
- "x": 0,
- "y": 92
- },
- "id": 227,
- "options": {
- "legend": {
- "calcs": ["lastNotNull"],
- "displayMode": "table",
- "placement": "bottom",
- "showLegend": true
- },
- "tooltip": {
- "mode": "single",
- "sort": "none"
+ "editorMode": "code",
+ "expr": "sum by(pod) (rate(greptime_mito_compaction_total_elapsed_count{pod=~\"$datanode\"}[$__rate_interval]))",
+ "instant": false,
+ "legendFormat": "{{pod}}",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "Compaction OPS per Instance",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "description": "Latency of compaction task, at p99",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "barWidthFactor": 0.6,
+ "drawStyle": "points",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
}
},
- "targets": [
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${DS_PROMETHEUS}"
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
},
- "editorMode": "code",
- "expr": "sum by(pod) (rate(greptime_mito_read_stage_elapsed_count{pod=~\"$datanode\", stage=\"total\"}[$__rate_interval]))",
- "instant": false,
- "legendFormat": "{{pod}}-p99",
- "range": true,
- "refId": "A"
- }
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "s"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 162
+ },
+ "id": 230,
+ "options": {
+ "legend": {
+ "calcs": [
+ "lastNotNull"
],
- "title": "Read Stage QPS per Instance",
- "type": "timeseries"
+ "displayMode": "table",
+ "placement": "bottom",
+ "showLegend": true
},
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "palette-classic"
- },
- "custom": {
- "axisBorderShow": false,
- "axisCenteredZero": false,
- "axisColorMode": "text",
- "axisLabel": "",
- "axisPlacement": "auto",
- "barAlignment": 0,
- "drawStyle": "points",
- "fillOpacity": 0,
- "gradientMode": "none",
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "insertNulls": false,
- "lineInterpolation": "linear",
- "lineWidth": 1,
- "pointSize": 5,
- "scaleDistribution": {
- "type": "linear"
- },
- "showPoints": "auto",
- "spanNulls": false,
- "stacking": {
- "group": "A",
- "mode": "none"
- },
- "thresholdsStyle": {
- "mode": "off"
- }
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- },
- "unit": "s"
- },
- "overrides": []
- },
- "gridPos": {
- "h": 8,
- "w": 12,
- "x": 12,
- "y": 92
- },
- "id": 228,
- "options": {
- "legend": {
- "calcs": ["lastNotNull"],
- "displayMode": "table",
- "placement": "bottom",
- "showLegend": true,
- "sortBy": "Last *",
- "sortDesc": true
- },
- "tooltip": {
- "mode": "single",
- "sort": "none"
+ "editorMode": "code",
+ "expr": "histogram_quantile(0.99, sum by(pod, le) (rate(greptime_mito_compaction_total_elapsed_bucket{pod=~\"$datanode\"}[$__rate_interval])))",
+ "instant": false,
+ "legendFormat": "[{{pod}}]-compaction-p99",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "Compaction P99 per Instance",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "description": "Write latency by instance and stage type",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "barWidthFactor": 0.6,
+ "drawStyle": "points",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
}
},
- "targets": [
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${DS_PROMETHEUS}"
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
},
- "editorMode": "code",
- "expr": "histogram_quantile(0.99, sum by(pod, le, stage) (rate(greptime_mito_read_stage_elapsed_bucket{pod=~\"$datanode\"}[$__rate_interval])))",
- "instant": false,
- "legendFormat": "{{pod}}-{{stage}}-p99",
- "range": true,
- "refId": "A"
- }
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "s"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 170
+ },
+ "id": 225,
+ "options": {
+ "legend": {
+ "calcs": [
+ "lastNotNull"
],
- "title": "Read Stage P99 per Instance",
- "type": "timeseries"
+ "displayMode": "table",
+ "placement": "bottom",
+ "showLegend": true,
+ "sortBy": "Last *",
+ "sortDesc": true
},
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "palette-classic"
- },
- "custom": {
- "axisBorderShow": false,
- "axisCenteredZero": false,
- "axisColorMode": "text",
- "axisLabel": "",
- "axisPlacement": "auto",
- "barAlignment": 0,
- "drawStyle": "line",
- "fillOpacity": 0,
- "gradientMode": "none",
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "insertNulls": false,
- "lineInterpolation": "linear",
- "lineWidth": 1,
- "pointSize": 5,
- "scaleDistribution": {
- "type": "linear"
- },
- "showPoints": "auto",
- "spanNulls": false,
- "stacking": {
- "group": "A",
- "mode": "none"
- },
- "thresholdsStyle": {
- "mode": "off"
- }
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- },
- "unit": "ops"
- },
- "overrides": []
- },
- "gridPos": {
- "h": 8,
- "w": 12,
- "x": 0,
- "y": 100
- },
- "id": 231,
- "options": {
- "legend": {
- "calcs": [],
- "displayMode": "table",
- "placement": "bottom",
- "showLegend": true
- },
- "tooltip": {
- "mode": "single",
- "sort": "none"
- }
+ "editorMode": "code",
+ "expr": "histogram_quantile(0.99, sum by(pod, le, stage) (rate(greptime_mito_write_stage_elapsed_bucket{pod=~\"$datanode\"}[$__rate_interval])))",
+ "instant": false,
+ "legendFormat": "{{pod}}-{{stage}}-p99",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "Write Stage P99 per Instance",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "description": "Compaction latency by stage",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
},
- "targets": [
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${DS_PROMETHEUS}"
- },
- "editorMode": "code",
- "expr": "sum by(pod) (rate(greptime_mito_compaction_total_elapsed_count{pod=~\"$datanode\"}[$__rate_interval]))",
- "instant": false,
- "legendFormat": "{{pod}}",
- "range": true,
- "refId": "A"
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "barWidthFactor": 0.6,
+ "drawStyle": "points",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
}
- ],
- "title": "Compaction OPS per Instance",
- "type": "timeseries"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${DS_PROMETHEUS}"
},
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "palette-classic"
- },
- "custom": {
- "axisBorderShow": false,
- "axisCenteredZero": false,
- "axisColorMode": "text",
- "axisLabel": "",
- "axisPlacement": "auto",
- "barAlignment": 0,
- "drawStyle": "points",
- "fillOpacity": 0,
- "gradientMode": "none",
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "insertNulls": false,
- "lineInterpolation": "linear",
- "lineWidth": 1,
- "pointSize": 5,
- "scaleDistribution": {
- "type": "linear"
- },
- "showPoints": "auto",
- "spanNulls": false,
- "stacking": {
- "group": "A",
- "mode": "none"
- },
- "thresholdsStyle": {
- "mode": "off"
- }
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
},
- "unit": "s"
- },
- "overrides": []
- },
- "gridPos": {
- "h": 8,
- "w": 12,
- "x": 12,
- "y": 100
- },
- "id": 230,
- "options": {
- "legend": {
- "calcs": ["lastNotNull"],
- "displayMode": "table",
- "placement": "bottom",
- "showLegend": true
- },
- "tooltip": {
- "mode": "single",
- "sort": "none"
- }
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
},
- "targets": [
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${DS_PROMETHEUS}"
- },
- "editorMode": "code",
- "expr": "histogram_quantile(0.99, sum by(pod, le) (rate(greptime_mito_compaction_total_elapsed_bucket{pod=~\"$datanode\"}[$__rate_interval])))",
- "instant": false,
- "legendFormat": "[{{pod}}]-compaction-p99",
- "range": true,
- "refId": "A"
- }
+ "unit": "s"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 170
+ },
+ "id": 232,
+ "options": {
+ "legend": {
+ "calcs": [
+ "lastNotNull"
],
- "title": "Compaction P99 per Instance",
- "type": "timeseries"
+ "displayMode": "table",
+ "placement": "bottom",
+ "showLegend": true
},
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "palette-classic"
- },
- "custom": {
- "axisBorderShow": false,
- "axisCenteredZero": false,
- "axisColorMode": "text",
- "axisLabel": "",
- "axisPlacement": "auto",
- "barAlignment": 0,
- "drawStyle": "points",
- "fillOpacity": 0,
- "gradientMode": "none",
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "insertNulls": false,
- "lineInterpolation": "linear",
- "lineWidth": 1,
- "pointSize": 5,
- "scaleDistribution": {
- "type": "linear"
- },
- "showPoints": "auto",
- "spanNulls": false,
- "stacking": {
- "group": "A",
- "mode": "none"
- },
- "thresholdsStyle": {
- "mode": "off"
- }
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- },
- "unit": "s"
- },
- "overrides": []
- },
- "gridPos": {
- "h": 8,
- "w": 12,
- "x": 0,
- "y": 108
- },
- "id": 225,
- "options": {
- "legend": {
- "calcs": ["lastNotNull"],
- "displayMode": "table",
- "placement": "bottom",
- "showLegend": true,
- "sortBy": "Last *",
- "sortDesc": true
- },
- "tooltip": {
- "mode": "single",
- "sort": "none"
- }
+ "editorMode": "code",
+ "expr": "histogram_quantile(0.99, sum by(pod, le, stage) (rate(greptime_mito_compaction_stage_elapsed_bucket{pod=~\"$datanode\"}[$__rate_interval])))",
+ "instant": false,
+ "legendFormat": "{{pod}}-{{stage}}-p99",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "Compaction P99 per Instance",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "description": "Write-ahead logs write size as bytes. This chart includes stats of p95 and p99 size by instance, total WAL write rate.",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
},
- "targets": [
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${DS_PROMETHEUS}"
- },
- "editorMode": "code",
- "expr": "histogram_quantile(0.99, sum by(pod, le, stage) (rate(greptime_mito_write_stage_elapsed_bucket{pod=~\"$datanode\"}[$__rate_interval])))",
- "instant": false,
- "legendFormat": "{{pod}}-{{stage}}-p99",
- "range": true,
- "refId": "A"
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "barWidthFactor": 0.6,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
}
- ],
- "title": "Write Stage P99 per Instance",
- "type": "timeseries"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${DS_PROMETHEUS}"
},
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "palette-classic"
- },
- "custom": {
- "axisBorderShow": false,
- "axisCenteredZero": false,
- "axisColorMode": "text",
- "axisLabel": "",
- "axisPlacement": "auto",
- "barAlignment": 0,
- "drawStyle": "points",
- "fillOpacity": 0,
- "gradientMode": "none",
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "insertNulls": false,
- "lineInterpolation": "linear",
- "lineWidth": 1,
- "pointSize": 5,
- "scaleDistribution": {
- "type": "linear"
- },
- "showPoints": "auto",
- "spanNulls": false,
- "stacking": {
- "group": "A",
- "mode": "none"
- },
- "thresholdsStyle": {
- "mode": "off"
- }
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
},
- "unit": "s"
- },
- "overrides": []
- },
- "gridPos": {
- "h": 8,
- "w": 12,
- "x": 12,
- "y": 108
- },
- "id": 232,
- "options": {
- "legend": {
- "calcs": ["lastNotNull"],
- "displayMode": "table",
- "placement": "bottom",
- "showLegend": true
- },
- "tooltip": {
- "mode": "single",
- "sort": "none"
- }
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
},
- "targets": [
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${DS_PROMETHEUS}"
- },
- "editorMode": "code",
- "expr": "histogram_quantile(0.99, sum by(pod, le, stage) (rate(greptime_mito_compaction_stage_elapsed_bucket{pod=~\"$datanode\"}[$__rate_interval])))",
- "instant": false,
- "legendFormat": "{{pod}}-{{stage}}-p99",
- "range": true,
- "refId": "A"
- }
+ "unit": "bytes"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 178
+ },
+ "id": 268,
+ "options": {
+ "legend": {
+ "calcs": [
+ "lastNotNull"
],
- "title": "Compaction P99 per Instance",
- "type": "timeseries"
+ "displayMode": "table",
+ "placement": "bottom",
+ "showLegend": true
},
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "palette-classic"
- },
- "custom": {
- "axisBorderShow": false,
- "axisCenteredZero": false,
- "axisColorMode": "text",
- "axisLabel": "",
- "axisPlacement": "auto",
- "barAlignment": 0,
- "drawStyle": "line",
- "fillOpacity": 0,
- "gradientMode": "none",
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "insertNulls": false,
- "lineInterpolation": "linear",
- "lineWidth": 1,
- "pointSize": 5,
- "scaleDistribution": {
- "type": "linear"
- },
- "showPoints": "auto",
- "spanNulls": false,
- "stacking": {
- "group": "A",
- "mode": "none"
- },
- "thresholdsStyle": {
- "mode": "off"
- }
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- },
- "unit": "bytes"
- },
- "overrides": []
- },
- "gridPos": {
- "h": 8,
- "w": 12,
- "x": 0,
- "y": 116
- },
- "id": 268,
- "options": {
- "legend": {
- "calcs": ["lastNotNull"],
- "displayMode": "table",
- "placement": "bottom",
- "showLegend": true
- },
- "tooltip": {
- "mode": "single",
- "sort": "none"
- }
+ "editorMode": "code",
+ "expr": "histogram_quantile(0.95, sum by(le,pod) (rate(raft_engine_write_size_bucket[$__rate_interval])))",
+ "instant": false,
+ "legendFormat": "{{pod}}-req-size-p95",
+ "range": true,
+ "refId": "A"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
},
- "targets": [
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${DS_PROMETHEUS}"
- },
- "editorMode": "code",
- "expr": "histogram_quantile(0.95, sum by(le,pod) (rate(raft_engine_write_size_bucket[$__rate_interval])))",
- "instant": false,
- "legendFormat": "{{pod}}-req-size-p95",
- "range": true,
- "refId": "A"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${DS_PROMETHEUS}"
- },
- "editorMode": "code",
- "expr": "histogram_quantile(0.99, sum by(le,pod) (rate(raft_engine_write_size_bucket[$__rate_interval])))",
- "hide": false,
- "instant": false,
- "legendFormat": "{{pod}}-req-size-p99",
- "range": true,
- "refId": "B"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${DS_PROMETHEUS}"
- },
- "editorMode": "code",
- "expr": "rate(raft_engine_write_size_sum[$__rate_interval])",
- "hide": false,
- "instant": false,
- "legendFormat": "{{pod}}-throughput",
- "range": true,
- "refId": "C"
- }
- ],
- "title": "WAL write size",
- "type": "timeseries"
+ "editorMode": "code",
+ "expr": "histogram_quantile(0.99, sum by(le,pod) (rate(raft_engine_write_size_bucket[$__rate_interval])))",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "{{pod}}-req-size-p99",
+ "range": true,
+ "refId": "B"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "palette-classic"
- },
- "custom": {
- "axisBorderShow": false,
- "axisCenteredZero": false,
- "axisColorMode": "text",
- "axisLabel": "",
- "axisPlacement": "auto",
- "barAlignment": 0,
- "drawStyle": "line",
- "fillOpacity": 0,
- "gradientMode": "none",
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "insertNulls": false,
- "lineInterpolation": "linear",
- "lineWidth": 1,
- "pointSize": 5,
- "scaleDistribution": {
- "type": "linear"
- },
- "showPoints": "auto",
- "spanNulls": false,
- "stacking": {
- "group": "A",
- "mode": "none"
- },
- "thresholdsStyle": {
- "mode": "off"
- }
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- },
- "unit": "s"
- },
- "overrides": []
- },
- "gridPos": {
- "h": 8,
- "w": 12,
- "x": 12,
- "y": 116
- },
- "id": 269,
- "options": {
- "legend": {
- "calcs": ["lastNotNull"],
- "displayMode": "list",
- "placement": "bottom",
- "showLegend": true
- },
- "tooltip": {
- "mode": "single",
- "sort": "none"
+ "editorMode": "code",
+ "expr": "rate(raft_engine_write_size_sum[$__rate_interval])",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "{{pod}}-throughput",
+ "range": true,
+ "refId": "C"
+ }
+ ],
+ "title": "WAL write size",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "description": "Write-ahead log operations latency at p99",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "barWidthFactor": 0.6,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
}
},
- "targets": [
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${DS_PROMETHEUS}"
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
},
- "editorMode": "code",
- "expr": "histogram_quantile(0.99, sum by(le,logstore,optype,pod) (rate(greptime_logstore_op_elapsed_bucket[$__rate_interval])))",
- "instant": false,
- "legendFormat": "{{pod}}-{{logstore}}-{{optype}}-p99",
- "range": true,
- "refId": "A"
- }
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "s"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 178
+ },
+ "id": 269,
+ "options": {
+ "legend": {
+ "calcs": [
+ "lastNotNull"
],
- "title": "Log Store op duration seconds",
- "type": "timeseries"
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
},
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "palette-classic"
- },
- "custom": {
- "axisBorderShow": false,
- "axisCenteredZero": false,
- "axisColorMode": "text",
- "axisLabel": "",
- "axisPlacement": "auto",
- "barAlignment": 0,
- "drawStyle": "points",
- "fillOpacity": 0,
- "gradientMode": "none",
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "insertNulls": false,
- "lineInterpolation": "linear",
- "lineWidth": 1,
- "pointSize": 5,
- "scaleDistribution": {
- "type": "linear"
- },
- "showPoints": "auto",
- "spanNulls": false,
- "stacking": {
- "group": "A",
- "mode": "none"
- },
- "thresholdsStyle": {
- "mode": "off"
- }
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- },
- "unit": "s"
- },
- "overrides": []
- },
- "gridPos": {
- "h": 8,
- "w": 12,
- "x": 0,
- "y": 124
- },
- "id": 270,
- "options": {
- "legend": {
- "calcs": ["lastNotNull"],
- "displayMode": "table",
- "placement": "bottom",
- "showLegend": true
- },
- "tooltip": {
- "mode": "single",
- "sort": "none"
+ "editorMode": "code",
+ "expr": "histogram_quantile(0.99, sum by(le,logstore,optype,pod) (rate(greptime_logstore_op_elapsed_bucket[$__rate_interval])))",
+ "instant": false,
+ "legendFormat": "{{pod}}-{{logstore}}-{{optype}}-p99",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "Log Store op duration seconds",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "description": "Raft engine (local disk) log store sync latency, p99",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "barWidthFactor": 0.6,
+ "drawStyle": "points",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
}
},
- "pluginVersion": "11.1.3",
- "targets": [
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${DS_PROMETHEUS}"
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
},
- "editorMode": "code",
- "expr": "histogram_quantile(0.99, sum by(le, type, node, pod) (rate(raft_engine_sync_log_duration_seconds_bucket[$__rate_interval])))",
- "instant": false,
- "legendFormat": "{{pod}}-p99",
- "range": true,
- "refId": "A"
- }
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "s"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 186
+ },
+ "id": 270,
+ "options": {
+ "legend": {
+ "calcs": [
+ "lastNotNull"
],
- "title": "WAL sync duration seconds",
- "type": "timeseries"
+ "displayMode": "table",
+ "placement": "bottom",
+ "showLegend": true
},
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "11.1.3",
+ "targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "palette-classic"
- },
- "custom": {
- "axisBorderShow": false,
- "axisCenteredZero": false,
- "axisColorMode": "text",
- "axisLabel": "",
- "axisPlacement": "auto",
- "barAlignment": 0,
- "drawStyle": "points",
- "fillOpacity": 0,
- "gradientMode": "none",
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "insertNulls": false,
- "lineInterpolation": "linear",
- "lineWidth": 1,
- "pointSize": 5,
- "scaleDistribution": {
- "type": "linear"
- },
- "showPoints": "auto",
- "spanNulls": false,
- "stacking": {
- "group": "A",
- "mode": "none"
- },
- "thresholdsStyle": {
- "mode": "off"
- }
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- },
- "unit": "none"
- },
- "overrides": []
- },
- "gridPos": {
- "h": 8,
- "w": 12,
- "x": 12,
- "y": 124
- },
- "id": 271,
- "options": {
- "legend": {
- "calcs": ["lastNotNull"],
- "displayMode": "table",
- "placement": "bottom",
- "showLegend": true
- },
- "tooltip": {
- "mode": "single",
- "sort": "none"
+ "editorMode": "code",
+ "expr": "histogram_quantile(0.99, sum by(le, type, node, pod) (rate(raft_engine_sync_log_duration_seconds_bucket[$__rate_interval])))",
+ "instant": false,
+ "legendFormat": "{{pod}}-p99",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "WAL sync duration seconds",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "description": "Ongoing compaction task count",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "barWidthFactor": 0.6,
+ "drawStyle": "points",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
}
},
- "pluginVersion": "11.1.3",
- "targets": [
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${DS_PROMETHEUS}"
- },
- "editorMode": "code",
- "expr": "greptime_mito_inflight_compaction_count",
- "instant": false,
- "legendFormat": "{{pod}}",
- "range": true,
- "refId": "A"
- }
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "none"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 186
+ },
+ "id": 271,
+ "options": {
+ "legend": {
+ "calcs": [
+ "lastNotNull"
],
- "title": "Inflight Compaction",
- "type": "timeseries"
+ "displayMode": "table",
+ "placement": "bottom",
+ "showLegend": true
},
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "11.1.3",
+ "targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "palette-classic"
- },
- "custom": {
- "axisBorderShow": false,
- "axisCenteredZero": false,
- "axisColorMode": "text",
- "axisLabel": "",
- "axisPlacement": "auto",
- "barAlignment": 0,
- "drawStyle": "points",
- "fillOpacity": 0,
- "gradientMode": "none",
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "insertNulls": false,
- "lineInterpolation": "linear",
- "lineWidth": 1,
- "pointSize": 5,
- "scaleDistribution": {
- "type": "linear"
- },
- "showPoints": "auto",
- "spanNulls": false,
- "stacking": {
- "group": "A",
- "mode": "none"
- },
- "thresholdsStyle": {
- "mode": "off"
- }
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- },
- "unit": "none"
- },
- "overrides": []
- },
- "gridPos": {
- "h": 8,
- "w": 12,
- "x": 0,
- "y": 132
- },
- "id": 272,
- "options": {
- "legend": {
- "calcs": ["lastNotNull"],
- "displayMode": "list",
- "placement": "bottom",
- "showLegend": true
- },
- "tooltip": {
- "mode": "single",
- "sort": "none"
- }
- },
- "pluginVersion": "11.1.3",
- "targets": [
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${DS_PROMETHEUS}"
- },
- "editorMode": "code",
- "expr": "greptime_mito_inflight_flush_count",
- "instant": false,
- "legendFormat": "{{pod}}",
- "range": true,
- "refId": "A"
- }
- ],
- "title": "Inflight Flush",
- "type": "timeseries"
+ "editorMode": "code",
+ "expr": "greptime_mito_inflight_compaction_count",
+ "instant": false,
+ "legendFormat": "{{pod}}",
+ "range": true,
+ "refId": "A"
}
],
- "title": "Mito Engine",
- "type": "row"
- },
- {
- "collapsed": false,
- "gridPos": {
- "h": 1,
- "w": 24,
- "x": 0,
- "y": 68
- },
- "id": 195,
- "panels": [],
- "title": "OpenDAL",
- "type": "row"
+ "title": "Inflight Compaction",
+ "type": "timeseries"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
+ "description": "Ongoing flush task count",
"fieldConfig": {
"defaults": {
"color": {
@@ -6098,7 +6106,8 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
- "drawStyle": "line",
+ "barWidthFactor": 0.6,
+ "drawStyle": "points",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
@@ -6128,8 +6137,7 @@
"mode": "absolute",
"steps": [
{
- "color": "green",
- "value": null
+ "color": "green"
},
{
"color": "red",
@@ -6137,21 +6145,23 @@
}
]
},
- "unit": "ops"
+ "unit": "none"
},
"overrides": []
},
"gridPos": {
- "h": 10,
- "w": 24,
+ "h": 8,
+ "w": 12,
"x": 0,
- "y": 69
+ "y": 194
},
- "id": 209,
+ "id": 272,
"options": {
"legend": {
- "calcs": [],
- "displayMode": "table",
+ "calcs": [
+ "lastNotNull"
+ ],
+ "displayMode": "list",
"placement": "bottom",
"showLegend": true
},
@@ -6160,32 +6170,44 @@
"sort": "none"
}
},
+ "pluginVersion": "11.1.3",
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
- "disableTextWrap": false,
- "editorMode": "builder",
- "expr": "sum by(pod, scheme, operation) (rate(opendal_operation_bytes_count{pod=~\"$datanode\"}[$__rate_interval]))",
- "fullMetaSearch": false,
- "includeNullMetadata": true,
+ "editorMode": "code",
+ "expr": "greptime_mito_inflight_flush_count",
"instant": false,
- "legendFormat": "[{{pod}}]-[{{scheme}}]-[{{operation}}]-qps",
+ "legendFormat": "{{pod}}",
"range": true,
- "refId": "A",
- "useBackend": false
+ "refId": "A"
}
],
- "title": "QPS per Instance",
+ "title": "Inflight Flush",
"type": "timeseries"
},
+ {
+ "collapsed": false,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 202
+ },
+ "id": 195,
+ "panels": [],
+ "title": "OpenDAL",
+ "type": "row"
+ },
{
"datasource": {
+ "default": false,
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
+ "description": "object storage query rate by datanode and operation type",
"fieldConfig": {
"defaults": {
"color": {
@@ -6198,6 +6220,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
@@ -6228,8 +6251,7 @@
"mode": "absolute",
"steps": [
{
- "color": "green",
- "value": null
+ "color": "green"
},
{
"color": "red",
@@ -6245,13 +6267,13 @@
"h": 7,
"w": 12,
"x": 0,
- "y": 79
+ "y": 203
},
- "id": 196,
+ "id": 209,
"options": {
"legend": {
"calcs": [],
- "displayMode": "list",
+ "displayMode": "table",
"placement": "bottom",
"showLegend": true
},
@@ -6267,25 +6289,27 @@
"uid": "${DS_PROMETHEUS}"
},
"disableTextWrap": false,
- "editorMode": "builder",
- "expr": "sum by(pod, scheme) (rate(opendal_operation_bytes_count{pod=~\"$datanode\", operation=\"Reader::read\"}[$__rate_interval]))",
+ "editorMode": "code",
+ "expr": "sum by(pod, scheme, operation) (rate(opendal_operation_bytes_count{pod=~\"$datanode\"}[$__rate_interval]))",
"fullMetaSearch": false,
"includeNullMetadata": true,
"instant": false,
- "legendFormat": "[{{pod}}]-[{{scheme}}]-qps",
+ "legendFormat": "[{{pod}}]-[{{scheme}}]-[{{operation}}]-qps",
"range": true,
"refId": "A",
"useBackend": false
}
],
- "title": "Read QPS per Instance",
+ "title": "QPS per Instance",
"type": "timeseries"
},
{
"datasource": {
+ "default": false,
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
+ "description": "Total traffic as in bytes by instance and operation",
"fieldConfig": {
"defaults": {
"color": {
@@ -6298,6 +6322,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
@@ -6328,8 +6353,7 @@
"mode": "absolute",
"steps": [
{
- "color": "green",
- "value": null
+ "color": "green"
},
{
"color": "red",
@@ -6346,7 +6370,9 @@
"id": "byNames",
"options": {
"mode": "exclude",
- "names": ["[mycluster-datanode-0]-[fs]-[Writer::write]"],
+ "names": [
+ "[mycluster-datanode-0]-[fs]-[Writer::write]"
+ ],
"prefix": "All except:",
"readOnly": true
}
@@ -6368,7 +6394,7 @@
"h": 7,
"w": 12,
"x": 12,
- "y": 79
+ "y": 203
},
"id": 267,
"options": {
@@ -6390,7 +6416,7 @@
"uid": "${DS_PROMETHEUS}"
},
"disableTextWrap": false,
- "editorMode": "builder",
+ "editorMode": "code",
"expr": "sum by(pod, scheme, operation) (rate(opendal_operation_bytes_sum{pod=~\"$datanode\"}[$__rate_interval]))",
"fullMetaSearch": false,
"includeNullMetadata": true,
@@ -6409,6 +6435,7 @@
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
+ "description": "Object storage read traffic rate as in bytes per second by instance",
"fieldConfig": {
"defaults": {
"color": {
@@ -6421,6 +6448,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
@@ -6451,8 +6479,7 @@
"mode": "absolute",
"steps": [
{
- "color": "green",
- "value": null
+ "color": "green"
},
{
"color": "red",
@@ -6468,13 +6495,13 @@
"h": 7,
"w": 12,
"x": 0,
- "y": 86
+ "y": 210
},
- "id": 199,
+ "id": 196,
"options": {
"legend": {
"calcs": [],
- "displayMode": "table",
+ "displayMode": "list",
"placement": "bottom",
"showLegend": true
},
@@ -6491,7 +6518,7 @@
},
"disableTextWrap": false,
"editorMode": "builder",
- "expr": "sum by(pod, scheme) (rate(opendal_operation_duration_seconds_count{pod=~\"$datanode\", operation=\"Writer::write\"}[$__rate_interval]))",
+ "expr": "sum by(pod, scheme) (rate(opendal_operation_bytes_count{pod=~\"$datanode\", operation=\"Reader::read\"}[$__rate_interval]))",
"fullMetaSearch": false,
"includeNullMetadata": true,
"instant": false,
@@ -6501,7 +6528,7 @@
"useBackend": false
}
],
- "title": "Write QPS per Instance",
+ "title": "Read QPS per Instance",
"type": "timeseries"
},
{
@@ -6509,6 +6536,7 @@
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
+ "description": "Read operation latency at p99",
"fieldConfig": {
"defaults": {
"color": {
@@ -6521,6 +6549,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.6,
"drawStyle": "points",
"fillOpacity": 0,
"gradientMode": "none",
@@ -6551,8 +6580,7 @@
"mode": "absolute",
"steps": [
{
- "color": "green",
- "value": null
+ "color": "green"
},
{
"color": "red",
@@ -6568,7 +6596,7 @@
"h": 7,
"w": 12,
"x": 12,
- "y": 86
+ "y": 210
},
"id": 198,
"options": {
@@ -6609,6 +6637,7 @@
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
+ "description": "Object storage write traffic rate as in bytes per second by instance",
"fieldConfig": {
"defaults": {
"color": {
@@ -6621,6 +6650,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
@@ -6651,8 +6681,7 @@
"mode": "absolute",
"steps": [
{
- "color": "green",
- "value": null
+ "color": "green"
},
{
"color": "red",
@@ -6668,9 +6697,9 @@
"h": 7,
"w": 12,
"x": 0,
- "y": 93
+ "y": 217
},
- "id": 205,
+ "id": 199,
"options": {
"legend": {
"calcs": [],
@@ -6690,19 +6719,18 @@
"uid": "${DS_PROMETHEUS}"
},
"disableTextWrap": false,
- "editorMode": "code",
- "expr": "sum by(pod, scheme) (rate(opendal_operation_duration_seconds_count{pod=~\"$datanode\", operation=\"list\"}[$__rate_interval]))",
+ "editorMode": "builder",
+ "expr": "sum by(pod, scheme) (rate(opendal_operation_duration_seconds_count{pod=~\"$datanode\", operation=\"Writer::write\"}[$__rate_interval]))",
"fullMetaSearch": false,
"includeNullMetadata": true,
"instant": false,
- "interval": "",
"legendFormat": "[{{pod}}]-[{{scheme}}]-qps",
"range": true,
"refId": "A",
"useBackend": false
}
],
- "title": "List QPS per Instance",
+ "title": "Write QPS per Instance",
"type": "timeseries"
},
{
@@ -6710,6 +6738,7 @@
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
+ "description": "Write operation latency at p99",
"fieldConfig": {
"defaults": {
"color": {
@@ -6722,6 +6751,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.6,
"drawStyle": "points",
"fillOpacity": 0,
"gradientMode": "none",
@@ -6752,8 +6782,7 @@
"mode": "absolute",
"steps": [
{
- "color": "green",
- "value": null
+ "color": "green"
},
{
"color": "red",
@@ -6769,7 +6798,7 @@
"h": 7,
"w": 12,
"x": 12,
- "y": 93
+ "y": 217
},
"id": 204,
"options": {
@@ -6806,6 +6835,7 @@
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
+ "description": "Object storage list traffic rate as in bytes per second by instance",
"fieldConfig": {
"defaults": {
"color": {
@@ -6818,6 +6848,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
@@ -6848,8 +6879,7 @@
"mode": "absolute",
"steps": [
{
- "color": "green",
- "value": null
+ "color": "green"
},
{
"color": "red",
@@ -6865,9 +6895,9 @@
"h": 7,
"w": 12,
"x": 0,
- "y": 100
+ "y": 224
},
- "id": 207,
+ "id": 205,
"options": {
"legend": {
"calcs": [],
@@ -6886,15 +6916,20 @@
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
+ "disableTextWrap": false,
"editorMode": "code",
- "expr": "sum by(pod, scheme, operation) (rate(opendal_operation_duration_seconds_count{pod=~\"$datanode\",operation!~\"read|write|list|stat\"}[$__rate_interval]))",
+ "expr": "sum by(pod, scheme) (rate(opendal_operation_duration_seconds_count{pod=~\"$datanode\", operation=\"list\"}[$__rate_interval]))",
+ "fullMetaSearch": false,
+ "includeNullMetadata": true,
"instant": false,
- "legendFormat": "[{{pod}}]-[{{scheme}}]-[{{operation}}]-qps",
+ "interval": "",
+ "legendFormat": "[{{pod}}]-[{{scheme}}]-qps",
"range": true,
- "refId": "A"
+ "refId": "A",
+ "useBackend": false
}
],
- "title": "Other Requests per Instance",
+ "title": "List QPS per Instance",
"type": "timeseries"
},
{
@@ -6902,6 +6937,7 @@
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
+ "description": "List operation latency at p99",
"fieldConfig": {
"defaults": {
"color": {
@@ -6914,6 +6950,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.6,
"drawStyle": "points",
"fillOpacity": 0,
"gradientMode": "none",
@@ -6944,8 +6981,7 @@
"mode": "absolute",
"steps": [
{
- "color": "green",
- "value": null
+ "color": "green"
},
{
"color": "red",
@@ -6961,7 +6997,7 @@
"h": 7,
"w": 12,
"x": 12,
- "y": 100
+ "y": 224
},
"id": 206,
"options": {
@@ -6999,6 +7035,104 @@
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
+ "description": "Object storage traffic rate other than read/write/list/stat as in bytes per second by instance",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "barWidthFactor": 0.6,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "ops"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 7,
+ "w": 12,
+ "x": 0,
+ "y": 231
+ },
+ "id": 207,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "table",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "editorMode": "code",
+ "expr": "sum by(pod, scheme, operation) (rate(opendal_operation_duration_seconds_count{pod=~\"$datanode\",operation!~\"read|write|list|stat\"}[$__rate_interval]))",
+ "instant": false,
+ "legendFormat": "[{{pod}}]-[{{scheme}}]-[{{operation}}]-qps",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "Other Requests per Instance",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "description": "All other operation latency at p99",
"fieldConfig": {
"defaults": {
"color": {
@@ -7011,6 +7145,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.6,
"drawStyle": "points",
"fillOpacity": 0,
"gradientMode": "none",
@@ -7041,8 +7176,7 @@
"mode": "absolute",
"steps": [
{
- "color": "green",
- "value": null
+ "color": "green"
},
{
"color": "red",
@@ -7058,7 +7192,7 @@
"h": 7,
"w": 12,
"x": 12,
- "y": 107
+ "y": 231
},
"id": 210,
"options": {
@@ -7097,7 +7231,11 @@
"templating": {
"list": [
{
- "current": {},
+ "current": {
+ "selected": false,
+ "text": "No data sources found",
+ "value": ""
+ },
"hide": 0,
"includeAll": false,
"multi": false,
@@ -7111,7 +7249,11 @@
"type": "datasource"
},
{
- "current": {},
+ "current": {
+ "selected": false,
+ "text": "No data sources found",
+ "value": ""
+ },
"hide": 0,
"includeAll": false,
"multi": false,
@@ -7272,6 +7414,6 @@
"timezone": "",
"title": "GreptimeDB Cluster Metrics",
"uid": "ce3q6xwn3xa0qs",
- "version": 9,
+ "version": 10,
"weekStart": ""
}
diff --git a/grafana/summary.sh b/grafana/summary.sh
new file mode 100755
index 000000000000..4e63fd3bd7f8
--- /dev/null
+++ b/grafana/summary.sh
@@ -0,0 +1,11 @@
+#!/usr/bin/env bash
+
+BASEDIR=$(dirname "$0")
+echo '| Title | Description | Expressions |
+|---|---|---|'
+
+cat $BASEDIR/greptimedb-cluster.json | jq -r '
+ .panels |
+ map(select(.type == "stat" or .type == "timeseries")) |
+ .[] | "| \(.title) | \(.description | gsub("\n"; "<br>")) | \(.targets | map(.expr // .rawSql | "`\(.|gsub("\n"; "<br>"))`") | join("<br>")) |"
+'
|
feat
|
add description for each grafana panel (#5673)
|
4052563248c1473bbe6d396b7fd739bd2543d332
|
2022-12-02 18:09:53
|
Lei, HUANG
|
fix: pr template task default state (#687)
| false
|
diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md
index 53629f24d0af..1c62e4ad4105 100644
--- a/.github/pull_request_template.md
+++ b/.github/pull_request_template.md
@@ -13,7 +13,7 @@ Please explain IN DETAIL what the changes are in this PR and why they are needed
## Checklist
-- [] I have written the necessary rustdoc comments.
-- [] I have added the necessary unit tests and integration tests.
+- [ ] I have written the necessary rustdoc comments.
+- [ ] I have added the necessary unit tests and integration tests.
## Refer to a related PR or issue link (optional)
|
fix
|
pr template task default state (#687)
|
912341e4fa5e055f26a5828cb3928eb1fe617a02
|
2023-09-12 18:27:15
|
Weny Xu
|
fix: fix start issues under standalone mode (#2352)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index a9a627e712aa..9edc6dcc7045 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -1859,6 +1859,7 @@ dependencies = [
"humantime-serde",
"hyper",
"lazy_static",
+ "metrics",
"prost",
"regex",
"serde",
@@ -7284,8 +7285,7 @@ dependencies = [
[[package]]
name = "raft-engine"
version = "0.4.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e02bdc8cba47cb7062b433f56700a8edbc9fcd6d706389120d20aa1827e5ba7b"
+source = "git+https://github.com/tikv/raft-engine.git?rev=571462e36621407b9920465a1a15b8b01b929a7f#571462e36621407b9920465a1a15b8b01b929a7f"
dependencies = [
"byteorder",
"crc32fast",
diff --git a/Cargo.toml b/Cargo.toml
index cbc256cf06cb..c26db0e06854 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -153,7 +153,8 @@ object-store = { path = "src/object-store" }
partition = { path = "src/partition" }
promql = { path = "src/promql" }
query = { path = "src/query" }
-raft-engine = { version = "0.4" }
+# TODO(weny): waits for https://github.com/tikv/raft-engine/pull/335
+raft-engine = { git = "https://github.com/tikv/raft-engine.git", rev = "571462e36621407b9920465a1a15b8b01b929a7f" }
script = { path = "src/script" }
servers = { path = "src/servers" }
session = { path = "src/session" }
diff --git a/src/cmd/src/error.rs b/src/cmd/src/error.rs
index 3c55361e40b1..a8cfb26d18e6 100644
--- a/src/cmd/src/error.rs
+++ b/src/cmd/src/error.rs
@@ -23,6 +23,12 @@ use snafu::{Location, Snafu};
#[derive(Debug, Snafu)]
#[snafu(visibility(pub))]
pub enum Error {
+ #[snafu(display("Failed to create default catalog and schema, source: {}", source))]
+ InitMetadata {
+ location: Location,
+ source: common_meta::error::Error,
+ },
+
#[snafu(display("Failed to iter stream, source: {}", source))]
IterStream {
location: Location,
@@ -182,7 +188,9 @@ impl ErrorExt for Error {
Error::ShutdownMetaServer { source, .. } => source.status_code(),
Error::BuildMetaServer { source, .. } => source.status_code(),
Error::UnsupportedSelectorType { source, .. } => source.status_code(),
- Error::IterStream { source, .. } => source.status_code(),
+ Error::IterStream { source, .. } | Error::InitMetadata { source, .. } => {
+ source.status_code()
+ }
Error::MissingConfig { .. }
| Error::LoadLayeredConfig { .. }
| Error::IllegalConfig { .. }
diff --git a/src/cmd/src/standalone.rs b/src/cmd/src/standalone.rs
index a0ca94674133..15cb26c3a932 100644
--- a/src/cmd/src/standalone.rs
+++ b/src/cmd/src/standalone.rs
@@ -31,7 +31,6 @@ use frontend::instance::{FrontendInstance, Instance as FeInstance, StandaloneDat
use frontend::service_config::{
GrpcOptions, InfluxdbOptions, MysqlOptions, OpentsdbOptions, PostgresOptions, PromStoreOptions,
};
-use query::QueryEngineRef;
use serde::{Deserialize, Serialize};
use servers::http::HttpOptions;
use servers::tls::{TlsMode, TlsOption};
@@ -39,8 +38,8 @@ use servers::Mode;
use snafu::ResultExt;
use crate::error::{
- IllegalConfigSnafu, Result, ShutdownDatanodeSnafu, ShutdownFrontendSnafu, StartDatanodeSnafu,
- StartFrontendSnafu,
+ IllegalConfigSnafu, InitMetadataSnafu, Result, ShutdownDatanodeSnafu, ShutdownFrontendSnafu,
+ StartDatanodeSnafu, StartFrontendSnafu,
};
use crate::frontend::load_frontend_plugins;
use crate::options::{MixOptions, Options, TopLevelOptions};
@@ -318,13 +317,18 @@ impl StartCommand {
Arc::new(StandaloneDatanodeManager(region_server.clone())),
));
+ catalog_manager
+ .table_metadata_manager_ref()
+ .init()
+ .await
+ .context(InitMetadataSnafu)?;
+
// TODO: build frontend instance like in distributed mode
let mut frontend = build_frontend(
plugins,
kv_store,
procedure_manager,
catalog_manager,
- datanode.query_engine(),
region_server,
)
.await?;
@@ -344,19 +348,17 @@ async fn build_frontend(
kv_store: KvBackendRef,
procedure_manager: ProcedureManagerRef,
catalog_manager: CatalogManagerRef,
- query_engine: QueryEngineRef,
region_server: RegionServer,
) -> Result<FeInstance> {
- let mut frontend_instance = FeInstance::try_new_standalone(
+ let frontend_instance = FeInstance::try_new_standalone(
kv_store,
procedure_manager,
catalog_manager,
- query_engine,
+ plugins,
region_server,
)
.await
.context(StartFrontendSnafu)?;
- frontend_instance.set_plugins(plugins.clone());
Ok(frontend_instance)
}
diff --git a/src/common/meta/Cargo.toml b/src/common/meta/Cargo.toml
index 2bd4a56ab49a..566e49f05bea 100644
--- a/src/common/meta/Cargo.toml
+++ b/src/common/meta/Cargo.toml
@@ -22,6 +22,7 @@ etcd-client.workspace = true
futures.workspace = true
humantime-serde.workspace = true
lazy_static.workspace = true
+metrics.workspace = true
prost.workspace = true
regex.workspace = true
serde.workspace = true
diff --git a/src/common/meta/src/ddl_manager.rs b/src/common/meta/src/ddl_manager.rs
index b4787611d71e..dce827bf7cfc 100644
--- a/src/common/meta/src/ddl_manager.rs
+++ b/src/common/meta/src/ddl_manager.rs
@@ -360,9 +360,7 @@ impl DdlTaskExecutor for DdlManager {
ctx: &ExecutorContext,
request: SubmitDdlTaskRequest,
) -> Result<SubmitDdlTaskResponse> {
- let cluster_id = ctx.cluster_id.context(error::UnexpectedSnafu {
- err_msg: "cluster_id not found",
- })?;
+ let cluster_id = ctx.cluster_id.unwrap_or_default();
info!("Submitting Ddl task: {:?}", request.task);
match request.task {
CreateTable(create_table_task) => {
diff --git a/src/common/meta/src/key.rs b/src/common/meta/src/key.rs
index e8f67e7d2da7..6425cb6e3454 100644
--- a/src/common/meta/src/key.rs
+++ b/src/common/meta/src/key.rs
@@ -58,6 +58,7 @@ pub mod table_route;
use std::collections::BTreeMap;
use std::sync::Arc;
+use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use datanode_table::{DatanodeTableKey, DatanodeTableManager, DatanodeTableValue};
use lazy_static::lazy_static;
use regex::Regex;
@@ -67,8 +68,8 @@ use table::metadata::{RawTableInfo, TableId};
use table_info::{TableInfoKey, TableInfoManager, TableInfoValue};
use table_name::{TableNameKey, TableNameManager, TableNameValue};
-use self::catalog_name::{CatalogManager, CatalogNameValue};
-use self::schema_name::{SchemaManager, SchemaNameValue};
+use self::catalog_name::{CatalogManager, CatalogNameKey, CatalogNameValue};
+use self::schema_name::{SchemaManager, SchemaNameKey, SchemaNameValue};
use self::table_route::{TableRouteManager, TableRouteValue};
use crate::error::{self, Result, SerdeJsonSnafu};
use crate::kv_backend::txn::Txn;
@@ -165,6 +166,19 @@ impl TableMetadataManager {
}
}
+ pub async fn init(&self) -> Result<()> {
+ let catalog_name = CatalogNameKey::new(DEFAULT_CATALOG_NAME);
+ if !self.catalog_manager().exist(catalog_name).await? {
+ self.catalog_manager().create(catalog_name).await?;
+ }
+ let schema_name = SchemaNameKey::new(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME);
+ if !self.schema_manager().exist(schema_name).await? {
+ self.schema_manager().create(schema_name, None).await?;
+ }
+
+ Ok(())
+ }
+
pub fn table_name_manager(&self) -> &TableNameManager {
&self.table_name_manager
}
diff --git a/src/common/meta/src/key/catalog_name.rs b/src/common/meta/src/key/catalog_name.rs
index c6c727c0af39..6debb5af9356 100644
--- a/src/common/meta/src/key/catalog_name.rs
+++ b/src/common/meta/src/key/catalog_name.rs
@@ -16,8 +16,10 @@ use std::fmt::Display;
use std::sync::Arc;
use common_catalog::consts::DEFAULT_CATALOG_NAME;
+use common_telemetry::timer;
use futures::stream::BoxStream;
use futures::StreamExt;
+use metrics::increment_counter;
use serde::{Deserialize, Serialize};
use snafu::{OptionExt, ResultExt};
@@ -103,11 +105,13 @@ impl CatalogManager {
/// Creates `CatalogNameKey`.
pub async fn create(&self, catalog: CatalogNameKey<'_>) -> Result<()> {
let raw_key = catalog.as_raw_key();
+ let _timer = timer!(crate::metrics::METRIC_META_CREATE_CATALOG);
let req = PutRequest::new()
.with_key(raw_key)
.with_value(CatalogNameValue.try_as_raw_value()?);
self.kv_backend.put(req).await?;
+ increment_counter!(crate::metrics::METRIC_META_CREATE_CATALOG);
Ok(())
}
diff --git a/src/common/meta/src/key/schema_name.rs b/src/common/meta/src/key/schema_name.rs
index 5da6bdb2d38b..f923e5818e30 100644
--- a/src/common/meta/src/key/schema_name.rs
+++ b/src/common/meta/src/key/schema_name.rs
@@ -18,9 +18,11 @@ use std::sync::Arc;
use std::time::Duration;
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
+use common_telemetry::timer;
use futures::stream::BoxStream;
use futures::StreamExt;
use humantime_serde::re::humantime;
+use metrics::increment_counter;
use serde::{Deserialize, Serialize};
use snafu::{OptionExt, ResultExt};
@@ -143,12 +145,15 @@ impl SchemaManager {
schema: SchemaNameKey<'_>,
value: Option<SchemaNameValue>,
) -> Result<()> {
+ let _timer = timer!(crate::metrics::METRIC_META_CREATE_SCHEMA);
+
let raw_key = schema.as_raw_key();
let req = PutRequest::new()
.with_key(raw_key)
.with_value(value.unwrap_or_default().try_as_raw_value()?);
self.kv_backend.put(req).await?;
+ increment_counter!(crate::metrics::METRIC_META_CREATE_SCHEMA);
Ok(())
}
diff --git a/src/common/meta/src/metrics.rs b/src/common/meta/src/metrics.rs
index ed672a772133..49535607af72 100644
--- a/src/common/meta/src/metrics.rs
+++ b/src/common/meta/src/metrics.rs
@@ -14,6 +14,8 @@
pub const METRIC_META_TXN_REQUEST: &str = "meta.txn_request";
+pub(crate) const METRIC_META_CREATE_CATALOG: &str = "meta.create_catalog";
+pub(crate) const METRIC_META_CREATE_SCHEMA: &str = "meta.create_schema";
pub(crate) const METRIC_META_PROCEDURE_CREATE_TABLE: &str = "meta.procedure.create_table";
pub(crate) const METRIC_META_PROCEDURE_DROP_TABLE: &str = "meta.procedure.drop_table";
pub(crate) const METRIC_META_PROCEDURE_ALTER_TABLE: &str = "meta.procedure.alter_table";
diff --git a/src/frontend/src/instance.rs b/src/frontend/src/instance.rs
index 1d1e02fa18e2..d15d7cf7a60d 100644
--- a/src/frontend/src/instance.rs
+++ b/src/frontend/src/instance.rs
@@ -159,8 +159,12 @@ impl Instance {
meta_backend.clone(),
datanode_clients.clone(),
));
+ let partition_manager = Arc::new(PartitionRuleManager::new(meta_backend.clone()));
- let region_request_handler = DistRegionRequestHandler::arc(catalog_manager.clone());
+ let region_request_handler = DistRegionRequestHandler::arc(
+ partition_manager.clone(),
+ catalog_manager.datanode_manager().clone(),
+ );
let query_engine = QueryEngineFactory::new_with_plugins(
catalog_manager.clone(),
@@ -170,8 +174,6 @@ impl Instance {
)
.query_engine();
- let partition_manager = Arc::new(PartitionRuleManager::new(meta_backend.clone()));
-
let inserter = Arc::new(Inserter::new(
catalog_manager.clone(),
partition_manager.clone(),
@@ -295,15 +297,28 @@ impl Instance {
kv_backend: KvBackendRef,
procedure_manager: ProcedureManagerRef,
catalog_manager: CatalogManagerRef,
- query_engine: QueryEngineRef,
+ plugins: Arc<Plugins>,
region_server: RegionServer,
) -> Result<Self> {
+ let partition_manager = Arc::new(PartitionRuleManager::new(kv_backend.clone()));
+ let datanode_manager = Arc::new(StandaloneDatanodeManager(region_server));
+
+ let region_request_handler =
+ DistRegionRequestHandler::arc(partition_manager.clone(), datanode_manager.clone());
+
+ let query_engine = QueryEngineFactory::new_with_plugins(
+ catalog_manager.clone(),
+ Some(region_request_handler),
+ true,
+ plugins.clone(),
+ )
+ .query_engine();
+
let script_executor =
Arc::new(ScriptExecutor::new(catalog_manager.clone(), query_engine.clone()).await?);
let table_metadata_manager = Arc::new(TableMetadataManager::new(kv_backend.clone()));
- let datanode_manager = Arc::new(StandaloneDatanodeManager(region_server));
let cache_invalidator = Arc::new(DummyCacheInvalidator);
let ddl_executor = Arc::new(DdlManager::new(
procedure_manager,
@@ -341,7 +356,7 @@ impl Instance {
script_executor,
statement_executor,
query_engine,
- plugins: Default::default(),
+ plugins,
servers: Arc::new(HashMap::new()),
heartbeat_task: None,
inserter,
@@ -360,10 +375,6 @@ impl Instance {
&self.catalog_manager
}
- pub fn set_plugins(&mut self, map: Arc<Plugins>) {
- self.plugins = map;
- }
-
pub fn plugins(&self) -> Arc<Plugins> {
self.plugins.clone()
}
diff --git a/src/frontend/src/instance/distributed.rs b/src/frontend/src/instance/distributed.rs
index 4a99b9d2ef19..a14264a3ee36 100644
--- a/src/frontend/src/instance/distributed.rs
+++ b/src/frontend/src/instance/distributed.rs
@@ -19,20 +19,28 @@ use async_trait::async_trait;
use client::error::{HandleRequestSnafu, Result as ClientResult};
use client::region_handler::RegionRequestHandler;
use common_error::ext::BoxedError;
+use common_meta::datanode_manager::DatanodeManagerRef;
use common_recordbatch::SendableRecordBatchStream;
+use partition::manager::PartitionRuleManagerRef;
use snafu::{OptionExt, ResultExt};
use store_api::storage::RegionId;
-use crate::catalog::FrontendCatalogManager;
use crate::error::{FindDatanodeSnafu, FindTableRouteSnafu, RequestQuerySnafu, Result};
pub(crate) struct DistRegionRequestHandler {
- catalog_manager: Arc<FrontendCatalogManager>,
+ partition_manager: PartitionRuleManagerRef,
+ datanode_manager: DatanodeManagerRef,
}
impl DistRegionRequestHandler {
- pub fn arc(catalog_manager: Arc<FrontendCatalogManager>) -> Arc<Self> {
- Arc::new(Self { catalog_manager })
+ pub fn arc(
+ partition_manager: PartitionRuleManagerRef,
+ datanode_manager: DatanodeManagerRef,
+ ) -> Arc<Self> {
+ Arc::new(Self {
+ partition_manager,
+ datanode_manager,
+ })
}
}
@@ -51,8 +59,7 @@ impl DistRegionRequestHandler {
let region_id = RegionId::from_u64(request.region_id);
let table_route = self
- .catalog_manager
- .partition_manager()
+ .partition_manager
.find_table_route(region_id.table_id())
.await
.context(FindTableRouteSnafu {
@@ -64,7 +71,7 @@ impl DistRegionRequestHandler {
region: region_id.region_number(),
})?;
- let client = self.catalog_manager.datanode_manager().datanode(peer).await;
+ let client = self.datanode_manager.datanode(peer).await;
client
.handle_query(request)
diff --git a/src/meta-srv/src/error.rs b/src/meta-srv/src/error.rs
index 37b835c49609..9d96b333ad47 100644
--- a/src/meta-srv/src/error.rs
+++ b/src/meta-srv/src/error.rs
@@ -26,6 +26,12 @@ use crate::pubsub::Message;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub))]
pub enum Error {
+ #[snafu(display("Failed to create default catalog and schema, source: {}", source))]
+ InitMetadata {
+ location: Location,
+ source: common_meta::error::Error,
+ },
+
#[snafu(display("Failed to allocate next sequence number: {}", source))]
NextSequence {
location: Location,
@@ -612,6 +618,8 @@ impl ErrorExt for Error {
| Error::ConvertEtcdTxnObject { source, .. }
| Error::GetFullTableInfo { source, .. } => source.status_code(),
+ Error::InitMetadata { source, .. } => source.status_code(),
+
Error::Other { source, .. } => source.status_code(),
}
}
diff --git a/src/meta-srv/src/lib.rs b/src/meta-srv/src/lib.rs
index 4e5b26e1f532..f2a49e7452a5 100644
--- a/src/meta-srv/src/lib.rs
+++ b/src/meta-srv/src/lib.rs
@@ -25,6 +25,7 @@ pub mod handler;
pub mod keys;
pub mod lease;
pub mod lock;
+
pub mod metadata_service;
pub mod metasrv;
mod metrics;
diff --git a/src/meta-srv/src/metadata_service.rs b/src/meta-srv/src/metadata_service.rs
index 09efdbe6d308..8816abb8bc65 100644
--- a/src/meta-srv/src/metadata_service.rs
+++ b/src/meta-srv/src/metadata_service.rs
@@ -18,8 +18,7 @@ use async_trait::async_trait;
use common_meta::key::catalog_name::CatalogNameKey;
use common_meta::key::schema_name::SchemaNameKey;
use common_meta::key::TableMetadataManagerRef;
-use common_telemetry::{info, timer};
-use metrics::increment_counter;
+use common_telemetry::info;
use snafu::{ensure, ResultExt};
use crate::error;
@@ -62,15 +61,12 @@ impl MetadataService for DefaultMetadataService {
schema_name: &str,
if_not_exist: bool,
) -> Result<()> {
- let _timer = timer!(crate::metrics::METRIC_META_CREATE_SCHEMA);
-
self.table_metadata_manager
.catalog_manager()
.create(CatalogNameKey::new(catalog_name))
.await
.context(error::TableMetadataManagerSnafu)?;
- increment_counter!(crate::metrics::METRIC_META_CREATE_CATALOG);
info!("Successfully created a catalog: {}", catalog_name);
let schema = SchemaNameKey::new(catalog_name, schema_name);
diff --git a/src/meta-srv/src/metasrv.rs b/src/meta-srv/src/metasrv.rs
index fb320b8afb05..727f6a0c4187 100644
--- a/src/meta-srv/src/metasrv.rs
+++ b/src/meta-srv/src/metasrv.rs
@@ -19,7 +19,6 @@ use std::sync::Arc;
use std::time::Duration;
use api::v1::meta::Peer;
-use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use common_greptimedb_telemetry::GreptimeDBTelemetryTask;
use common_grpc::channel_manager;
use common_meta::ddl::DdlTaskExecutorRef;
@@ -36,10 +35,9 @@ use tokio::sync::broadcast::error::RecvError;
use crate::cluster::MetaPeerClientRef;
use crate::election::{Election, LeaderChangeMessage};
-use crate::error::{RecoverProcedureSnafu, Result};
+use crate::error::{InitMetadataSnafu, RecoverProcedureSnafu, Result};
use crate::handler::HeartbeatHandlerGroup;
use crate::lock::DistLockRef;
-use crate::metadata_service::MetadataServiceRef;
use crate::pubsub::{PublishRef, SubscribeManagerRef};
use crate::selector::{Selector, SelectorType};
use crate::service::mailbox::MailboxRef;
@@ -196,7 +194,6 @@ pub struct MetaSrv {
election: Option<ElectionRef>,
lock: DistLockRef,
procedure_manager: ProcedureManagerRef,
- metadata_service: MetadataServiceRef,
mailbox: MailboxRef,
ddl_executor: DdlTaskExecutorRef,
table_metadata_manager: TableMetadataManagerRef,
@@ -296,9 +293,10 @@ impl MetaSrv {
}
async fn create_default_schema_if_not_exist(&self) -> Result<()> {
- self.metadata_service
- .create_schema(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, true)
+ self.table_metadata_manager
+ .init()
.await
+ .context(InitMetadataSnafu)
}
pub fn shutdown(&self) {
diff --git a/src/meta-srv/src/metasrv/builder.rs b/src/meta-srv/src/metasrv/builder.rs
index 1c630a30ab59..39768817c5bd 100644
--- a/src/meta-srv/src/metasrv/builder.rs
+++ b/src/meta-srv/src/metasrv/builder.rs
@@ -43,7 +43,6 @@ use crate::handler::response_header_handler::ResponseHeaderHandler;
use crate::handler::{HeartbeatHandlerGroup, HeartbeatMailbox, Pushers};
use crate::lock::memory::MemLock;
use crate::lock::DistLockRef;
-use crate::metadata_service::{DefaultMetadataService, MetadataServiceRef};
use crate::metasrv::{
ElectionRef, MetaSrv, MetaSrvOptions, MetasrvInfo, SelectorContext, SelectorRef, TABLE_ID_SEQ,
};
@@ -66,7 +65,6 @@ pub struct MetaSrvBuilder {
election: Option<ElectionRef>,
meta_peer_client: Option<MetaPeerClientRef>,
lock: Option<DistLockRef>,
- metadata_service: Option<MetadataServiceRef>,
datanode_clients: Option<Arc<DatanodeClients>>,
pubsub: Option<(PublishRef, SubscribeManagerRef)>,
}
@@ -82,7 +80,6 @@ impl MetaSrvBuilder {
election: None,
options: None,
lock: None,
- metadata_service: None,
datanode_clients: None,
pubsub: None,
}
@@ -128,11 +125,6 @@ impl MetaSrvBuilder {
self
}
- pub fn metadata_service(mut self, metadata_service: MetadataServiceRef) -> Self {
- self.metadata_service = Some(metadata_service);
- self
- }
-
pub fn datanode_clients(mut self, clients: Arc<DatanodeClients>) -> Self {
self.datanode_clients = Some(clients);
self
@@ -155,7 +147,6 @@ impl MetaSrvBuilder {
selector,
handler_group,
lock,
- metadata_service,
datanode_clients,
pubsub,
} = self;
@@ -174,10 +165,7 @@ impl MetaSrvBuilder {
let kv_backend = KvBackendAdapter::wrap(kv_store.clone());
let table_id_sequence = Arc::new(Sequence::new(TABLE_ID_SEQ, 1024, 10, kv_backend.clone()));
let table_metadata_manager = Arc::new(TableMetadataManager::new(kv_backend.clone()));
- let metadata_service = metadata_service
- .unwrap_or_else(|| Arc::new(DefaultMetadataService::new(table_metadata_manager)));
let lock = lock.unwrap_or_else(|| Arc::new(MemLock::default()));
- let table_metadata_manager = build_table_metadata_manager(&kv_store);
let ctx = SelectorContext {
datanode_lease_secs: options.datanode_lease_secs,
server_addr: options.server_addr.clone(),
@@ -275,7 +263,6 @@ impl MetaSrvBuilder {
election,
lock,
procedure_manager,
- metadata_service,
mailbox,
ddl_executor: ddl_manager,
table_metadata_manager,
@@ -333,12 +320,6 @@ fn build_procedure_manager(options: &MetaSrvOptions, kv_store: &KvStoreRef) -> P
Arc::new(LocalManager::new(manager_config, state_store))
}
-fn build_table_metadata_manager(kv_store: &KvStoreRef) -> TableMetadataManagerRef {
- Arc::new(TableMetadataManager::new(KvBackendAdapter::wrap(
- kv_store.clone(),
- )))
-}
-
fn build_ddl_manager(
options: &MetaSrvOptions,
datanode_clients: Option<Arc<DatanodeClients>>,
diff --git a/src/meta-srv/src/metrics.rs b/src/meta-srv/src/metrics.rs
index 709e410b0b02..483fccfff8df 100644
--- a/src/meta-srv/src/metrics.rs
+++ b/src/meta-srv/src/metrics.rs
@@ -12,8 +12,6 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-pub(crate) const METRIC_META_CREATE_CATALOG: &str = "meta.create_catalog";
-pub(crate) const METRIC_META_CREATE_SCHEMA: &str = "meta.create_schema";
pub(crate) const METRIC_META_KV_REQUEST: &str = "meta.kv_request";
pub(crate) const METRIC_META_ROUTE_REQUEST: &str = "meta.route_request";
pub(crate) const METRIC_META_HEARTBEAT_CONNECTION_NUM: &str = "meta.heartbeat_connection_num";
diff --git a/src/meta-srv/src/mocks.rs b/src/meta-srv/src/mocks.rs
index d7393ba04863..0ce0b4230fa8 100644
--- a/src/meta-srv/src/mocks.rs
+++ b/src/meta-srv/src/mocks.rs
@@ -20,12 +20,10 @@ use api::v1::meta::heartbeat_server::HeartbeatServer;
use api::v1::meta::router_server::RouterServer;
use api::v1::meta::store_server::StoreServer;
use client::client_manager::DatanodeClients;
-use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use common_grpc::channel_manager::{ChannelConfig, ChannelManager};
use common_meta::key::TableMetadataManager;
use tower::service_fn;
-use crate::metadata_service::{DefaultMetadataService, MetadataService};
use crate::metasrv::builder::MetaSrvBuilder;
use crate::metasrv::{MetaSrv, MetaSrvOptions, SelectorRef};
use crate::service::store::etcd::EtcdStore;
@@ -64,12 +62,8 @@ pub async fn mock(
let table_metadata_manager = Arc::new(TableMetadataManager::new(KvBackendAdapter::wrap(
kv_store.clone(),
)));
- let metadata_service = DefaultMetadataService::new(table_metadata_manager);
- metadata_service
- .create_schema(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, true)
- .await
- .unwrap();
+ table_metadata_manager.init().await.unwrap();
let builder = MetaSrvBuilder::new().options(opts).kv_store(kv_store);
|
fix
|
fix start issues under standalone mode (#2352)
|
6b4be3a1cc990a1bd30f390ff9fd842a0835bc95
|
2024-02-07 15:35:04
|
Hudson C. Dalprá
|
fix(util): join_path function should not trim leading `/` (#3280)
| false
|
diff --git a/Makefile b/Makefile
index d26f3129af6f..61452f0f8746 100644
--- a/Makefile
+++ b/Makefile
@@ -65,7 +65,7 @@ endif
build: ## Build debug version greptime.
cargo ${CARGO_EXTENSION} build ${CARGO_BUILD_OPTS}
-.POHNY: build-by-dev-builder
+.PHONY: build-by-dev-builder
build-by-dev-builder: ## Build greptime by dev-builder.
docker run --network=host \
-v ${PWD}:/greptimedb -v ${CARGO_REGISTRY_CACHE}:/root/.cargo/registry \
@@ -144,11 +144,12 @@ multi-platform-buildx: ## Create buildx multi-platform builder.
docker buildx inspect ${BUILDX_BUILDER_NAME} || docker buildx create --name ${BUILDX_BUILDER_NAME} --driver docker-container --bootstrap --use
##@ Test
+.PHONY: test
test: nextest ## Run unit and integration tests.
cargo nextest run ${NEXTEST_OPTS}
-.PHONY: nextest ## Install nextest tools.
-nextest:
+.PHONY: nextest
+nextest: ## Install nextest tools.
cargo --list | grep nextest || cargo install cargo-nextest --locked
.PHONY: sqlness-test
diff --git a/src/object-store/src/util.rs b/src/object-store/src/util.rs
index febc8413e281..1ad9c47d51df 100644
--- a/src/object-store/src/util.rs
+++ b/src/object-store/src/util.rs
@@ -78,7 +78,49 @@ pub fn normalize_dir(v: &str) -> String {
/// - Otherwise, it's a file path.
pub fn join_path(parent: &str, child: &str) -> String {
let output = format!("{parent}/{child}");
- opendal::raw::normalize_path(&output)
+ normalize_path(&output)
+}
+
+/// Make sure all operation are constructed by normalized path:
+///
+/// - Path endswith `/` means it's a dir path.
+/// - Otherwise, it's a file path.
+///
+/// # Normalize Rules
+///
+/// - All whitespace will be trimmed: ` abc/def ` => `abc/def`
+/// - Repeated leading / will be trimmed: `///abc` => `/abc`
+/// - Internal // will be replaced by /: `abc///def` => `abc/def`
+/// - Empty path will be `/`: `` => `/`
+pub fn normalize_path(path: &str) -> String {
+ // - all whitespace has been trimmed.
+ let path = path.trim();
+
+ // Fast line for empty path.
+ if path.is_empty() {
+ return "/".to_string();
+ }
+
+ let has_leading = path.starts_with('/');
+ let has_trailing = path.ends_with('/');
+
+ let mut p = path
+ .split('/')
+ .filter(|v| !v.is_empty())
+ .collect::<Vec<_>>()
+ .join("/");
+
+ // If path is not starting with `/` but it should
+ if !p.starts_with('/') && has_leading {
+ p.insert(0, '/');
+ }
+
+ // If path is not ending with `/` but it should
+ if !p.ends_with('/') && has_trailing {
+ p.push('/');
+ }
+
+ p
}
/// Attaches instrument layers to the object store.
@@ -127,10 +169,14 @@ mod tests {
assert_eq!("/", join_path("", "/"));
assert_eq!("/", join_path("/", "/"));
assert_eq!("a/", join_path("a", ""));
+ assert_eq!("/a", join_path("/", "a"));
assert_eq!("a/b/c.txt", join_path("a/b", "c.txt"));
- assert_eq!("a/b/c.txt", join_path("/a/b", "c.txt"));
- assert_eq!("a/b/c/", join_path("/a/b", "c/"));
- assert_eq!("a/b/c/", join_path("/a/b", "/c/"));
- assert_eq!("a/b/c.txt", join_path("/a/b", "//c.txt"));
+ assert_eq!("/a/b/c.txt", join_path("/a/b", "c.txt"));
+ assert_eq!("/a/b/c/", join_path("/a/b", "c/"));
+ assert_eq!("/a/b/c/", join_path("/a/b", "/c/"));
+ assert_eq!("/a/b/c.txt", join_path("/a/b", "//c.txt"));
+ assert_eq!("abc/def", join_path(" abc", "/def "));
+ assert_eq!("/abc", join_path("//", "/abc"));
+ assert_eq!("abc/def", join_path("abc/", "//def"));
}
}
|
fix
|
join_path function should not trim leading `/` (#3280)
|
f78c467a86ced895c5c8a8a9810c925472bcdb82
|
2024-01-08 09:25:58
|
Weny Xu
|
chore: bump opendal to 0.44.1 (#3111)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index 8b9c75c08773..47a65918503d 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -5504,9 +5504,9 @@ checksum = "0ab1bc2a289d34bd04a330323ac98a1b4bc82c9d9fcb1e66b63caa84da26b575"
[[package]]
name = "opendal"
-version = "0.44.0"
+version = "0.44.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c32736a48ef08a5d2212864e2295c8e54f4d6b352b7f49aa0c29a12fc410ff66"
+checksum = "bc0ad72f7b44ca4ae59d27ea151fdc6f37305cf6efe099bdaedbb30ec34579c0"
dependencies = [
"anyhow",
"async-compat",
|
chore
|
bump opendal to 0.44.1 (#3111)
|
e0384a7d468512e1e097cd8561c4963b389e00b3
|
2025-01-18 20:23:56
|
Yingwen
|
feat: overwrites inferred compaction window by region options (#5396)
| false
|
diff --git a/src/mito2/src/engine/compaction_test.rs b/src/mito2/src/engine/compaction_test.rs
index 49b18c0ca5a4..5e5cb0de7531 100644
--- a/src/mito2/src/engine/compaction_test.rs
+++ b/src/mito2/src/engine/compaction_test.rs
@@ -12,16 +12,20 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+use std::collections::HashMap;
use std::ops::Range;
use std::sync::Arc;
+use std::time::Duration;
use api::v1::{ColumnSchema, Rows};
use common_recordbatch::{RecordBatches, SendableRecordBatchStream};
use datatypes::prelude::ScalarVector;
use datatypes::vectors::TimestampMillisecondVector;
use store_api::region_engine::{RegionEngine, RegionRole};
+use store_api::region_request::AlterKind::SetRegionOptions;
use store_api::region_request::{
- RegionCompactRequest, RegionDeleteRequest, RegionFlushRequest, RegionRequest,
+ RegionAlterRequest, RegionCompactRequest, RegionDeleteRequest, RegionFlushRequest,
+ RegionOpenRequest, RegionRequest, SetRegionOption,
};
use store_api::storage::{RegionId, ScanRequest};
use tokio::sync::Notify;
@@ -466,3 +470,219 @@ async fn test_compaction_update_time_window() {
let vec = collect_stream_ts(stream).await;
assert_eq!((0..4000).map(|v| v * 1000).collect::<Vec<_>>(), vec);
}
+
+#[tokio::test]
+async fn test_change_region_compaction_window() {
+ common_telemetry::init_default_ut_logging();
+ let mut env = TestEnv::new();
+ let engine = env.create_engine(MitoConfig::default()).await;
+
+ let region_id = RegionId::new(1, 1);
+
+ env.get_schema_metadata_manager()
+ .register_region_table_info(
+ region_id.table_id(),
+ "test_table",
+ "test_catalog",
+ "test_schema",
+ None,
+ env.get_kv_backend(),
+ )
+ .await;
+
+ let request = CreateRequestBuilder::new()
+ .insert_option("compaction.type", "twcs")
+ .insert_option("compaction.twcs.max_active_window_runs", "1")
+ .insert_option("compaction.twcs.max_active_window_files", "1")
+ .insert_option("compaction.twcs.max_inactive_window_runs", "1")
+ .insert_option("compaction.twcs.max_inactive_window_files", "1")
+ .build();
+ let region_dir = request.region_dir.clone();
+ let column_schemas = request
+ .column_metadatas
+ .iter()
+ .map(column_metadata_to_column_schema)
+ .collect::<Vec<_>>();
+ engine
+ .handle_request(region_id, RegionRequest::Create(request))
+ .await
+ .unwrap();
+ // Flush 2 SSTs for compaction.
+ put_and_flush(&engine, region_id, &column_schemas, 0..1200).await; // window 3600
+ put_and_flush(&engine, region_id, &column_schemas, 1200..2400).await; // window 3600
+
+ engine
+ .handle_request(
+ region_id,
+ RegionRequest::Compact(RegionCompactRequest::default()),
+ )
+ .await
+ .unwrap();
+
+ // Put window 7200
+ put_and_flush(&engine, region_id, &column_schemas, 4000..5000).await; // window 3600
+
+ // Check compaction window.
+ let region = engine.get_region(region_id).unwrap();
+ {
+ let version = region.version();
+ assert_eq!(
+ Some(Duration::from_secs(3600)),
+ version.compaction_time_window,
+ );
+ assert!(version.options.compaction.time_window().is_none());
+ }
+
+ // Change compaction window.
+ let request = RegionRequest::Alter(RegionAlterRequest {
+ schema_version: region.metadata().schema_version,
+ kind: SetRegionOptions {
+ options: vec![SetRegionOption::Twsc(
+ "compaction.twcs.time_window".to_string(),
+ "2h".to_string(),
+ )],
+ },
+ });
+ engine.handle_request(region_id, request).await.unwrap();
+
+ // Compaction again. It should compacts window 3600 and 7200
+ // into 7200.
+ engine
+ .handle_request(
+ region_id,
+ RegionRequest::Compact(RegionCompactRequest::default()),
+ )
+ .await
+ .unwrap();
+ // Check compaction window.
+ {
+ let region = engine.get_region(region_id).unwrap();
+ let version = region.version();
+ assert_eq!(
+ Some(Duration::from_secs(7200)),
+ version.compaction_time_window,
+ );
+ assert_eq!(
+ Some(Duration::from_secs(7200)),
+ version.options.compaction.time_window()
+ );
+ }
+
+ // Reopen region.
+ let engine = env.reopen_engine(engine, MitoConfig::default()).await;
+ engine
+ .handle_request(
+ region_id,
+ RegionRequest::Open(RegionOpenRequest {
+ engine: String::new(),
+ region_dir,
+ options: Default::default(),
+ skip_wal_replay: false,
+ }),
+ )
+ .await
+ .unwrap();
+ // Check compaction window.
+ {
+ let region = engine.get_region(region_id).unwrap();
+ let version = region.version();
+ assert_eq!(
+ Some(Duration::from_secs(7200)),
+ version.compaction_time_window,
+ );
+ // We open the region without options, so the time window should be None.
+ assert!(version.options.compaction.time_window().is_none());
+ }
+}
+
+#[tokio::test]
+async fn test_open_overwrite_compaction_window() {
+ common_telemetry::init_default_ut_logging();
+ let mut env = TestEnv::new();
+ let engine = env.create_engine(MitoConfig::default()).await;
+
+ let region_id = RegionId::new(1, 1);
+
+ env.get_schema_metadata_manager()
+ .register_region_table_info(
+ region_id.table_id(),
+ "test_table",
+ "test_catalog",
+ "test_schema",
+ None,
+ env.get_kv_backend(),
+ )
+ .await;
+
+ let request = CreateRequestBuilder::new()
+ .insert_option("compaction.type", "twcs")
+ .insert_option("compaction.twcs.max_active_window_runs", "1")
+ .insert_option("compaction.twcs.max_active_window_files", "1")
+ .insert_option("compaction.twcs.max_inactive_window_runs", "1")
+ .insert_option("compaction.twcs.max_inactive_window_files", "1")
+ .build();
+ let region_dir = request.region_dir.clone();
+ let column_schemas = request
+ .column_metadatas
+ .iter()
+ .map(column_metadata_to_column_schema)
+ .collect::<Vec<_>>();
+ engine
+ .handle_request(region_id, RegionRequest::Create(request))
+ .await
+ .unwrap();
+ // Flush 2 SSTs for compaction.
+ put_and_flush(&engine, region_id, &column_schemas, 0..1200).await; // window 3600
+ put_and_flush(&engine, region_id, &column_schemas, 1200..2400).await; // window 3600
+
+ engine
+ .handle_request(
+ region_id,
+ RegionRequest::Compact(RegionCompactRequest::default()),
+ )
+ .await
+ .unwrap();
+
+ // Check compaction window.
+ {
+ let region = engine.get_region(region_id).unwrap();
+ let version = region.version();
+ assert_eq!(
+ Some(Duration::from_secs(3600)),
+ version.compaction_time_window,
+ );
+ assert!(version.options.compaction.time_window().is_none());
+ }
+
+ // Reopen region.
+ let options = HashMap::from([
+ ("compaction.type".to_string(), "twcs".to_string()),
+ ("compaction.twcs.time_window".to_string(), "2h".to_string()),
+ ]);
+ let engine = env.reopen_engine(engine, MitoConfig::default()).await;
+ engine
+ .handle_request(
+ region_id,
+ RegionRequest::Open(RegionOpenRequest {
+ engine: String::new(),
+ region_dir,
+ options,
+ skip_wal_replay: false,
+ }),
+ )
+ .await
+ .unwrap();
+ // Check compaction window.
+ {
+ let region = engine.get_region(region_id).unwrap();
+ let version = region.version();
+ assert_eq!(
+ Some(Duration::from_secs(7200)),
+ version.compaction_time_window,
+ );
+ assert_eq!(
+ Some(Duration::from_secs(7200)),
+ version.options.compaction.time_window()
+ );
+ }
+}
diff --git a/src/mito2/src/region/version.rs b/src/mito2/src/region/version.rs
index 188c314837c0..cc809f61a782 100644
--- a/src/mito2/src/region/version.rs
+++ b/src/mito2/src/region/version.rs
@@ -26,6 +26,7 @@
use std::sync::{Arc, RwLock};
use std::time::Duration;
+use common_telemetry::info;
use store_api::metadata::RegionMetadataRef;
use store_api::storage::SequenceNumber;
@@ -253,7 +254,10 @@ pub(crate) struct Version {
///
/// Used to check if it is a flush task during the truncating table.
pub(crate) truncated_entry_id: Option<EntryId>,
- /// Inferred compaction time window.
+ /// Inferred compaction time window from flush.
+ ///
+ /// If compaction options contain a time window, it will overwrite this value
+ /// when creating a new version from the [VersionBuilder].
pub(crate) compaction_time_window: Option<Duration>,
/// Options of the region.
pub(crate) options: RegionOptions,
@@ -389,7 +393,24 @@ impl VersionBuilder {
}
/// Builds a new [Version] from the builder.
+ /// It overwrites the window size by compaction option.
pub(crate) fn build(self) -> Version {
+ let compaction_time_window = self
+ .options
+ .compaction
+ .time_window()
+ .or(self.compaction_time_window);
+ if self.compaction_time_window.is_some()
+ && compaction_time_window != self.compaction_time_window
+ {
+ info!(
+ "VersionBuilder overwrites region compaction time window from {:?} to {:?}, region: {}",
+ self.compaction_time_window,
+ compaction_time_window,
+ self.metadata.region_id
+ );
+ }
+
Version {
metadata: self.metadata,
memtables: self.memtables,
@@ -397,7 +418,7 @@ impl VersionBuilder {
flushed_entry_id: self.flushed_entry_id,
flushed_sequence: self.flushed_sequence,
truncated_entry_id: self.truncated_entry_id,
- compaction_time_window: self.compaction_time_window,
+ compaction_time_window,
options: self.options,
}
}
|
feat
|
overwrites inferred compaction window by region options (#5396)
|
466f7c644885853876a16f6f5faf045eaae9c29a
|
2024-05-24 12:00:50
|
Weny Xu
|
feat: add `RawEntryReader` and `OneshotWalEntryReader` trait (#4027)
| false
|
diff --git a/src/log-store/src/kafka.rs b/src/log-store/src/kafka.rs
index 07a21596cbf6..dc068f3b4b52 100644
--- a/src/log-store/src/kafka.rs
+++ b/src/log-store/src/kafka.rs
@@ -20,10 +20,9 @@ pub(crate) mod util;
use std::fmt::Display;
use serde::{Deserialize, Serialize};
-use store_api::logstore::entry::{Entry, Id as EntryId};
+use store_api::logstore::entry::{Entry, Id as EntryId, RawEntry};
use store_api::logstore::namespace::Namespace;
-
-use crate::error::Error;
+use store_api::storage::RegionId;
/// Kafka Namespace implementation.
#[derive(Debug, PartialEq, Eq, Hash, Clone, Serialize, Deserialize)]
@@ -56,7 +55,13 @@ pub struct EntryImpl {
}
impl Entry for EntryImpl {
- type Error = Error;
+ fn into_raw_entry(self) -> RawEntry {
+ RawEntry {
+ region_id: self.region_id(),
+ entry_id: self.id(),
+ data: self.data,
+ }
+ }
fn data(&self) -> &[u8] {
&self.data
@@ -66,6 +71,10 @@ impl Entry for EntryImpl {
self.id
}
+ fn region_id(&self) -> RegionId {
+ RegionId::from_u64(self.ns.region_id)
+ }
+
fn estimated_size(&self) -> usize {
size_of::<Self>() + self.data.capacity() * size_of::<u8>() + self.ns.topic.capacity()
}
diff --git a/src/log-store/src/noop.rs b/src/log-store/src/noop.rs
index ded005ec7981..e5ed7fd66bd2 100644
--- a/src/log-store/src/noop.rs
+++ b/src/log-store/src/noop.rs
@@ -13,9 +13,10 @@
// limitations under the License.
use common_wal::options::WalOptions;
-use store_api::logstore::entry::{Entry, Id as EntryId};
+use store_api::logstore::entry::{Entry, Id as EntryId, RawEntry};
use store_api::logstore::namespace::{Id as NamespaceId, Namespace};
use store_api::logstore::{AppendBatchResponse, AppendResponse, LogStore};
+use store_api::storage::RegionId;
use crate::error::{Error, Result};
@@ -36,7 +37,13 @@ impl Namespace for NamespaceImpl {
}
impl Entry for EntryImpl {
- type Error = Error;
+ fn into_raw_entry(self) -> RawEntry {
+ RawEntry {
+ region_id: self.region_id(),
+ entry_id: self.id(),
+ data: vec![],
+ }
+ }
fn data(&self) -> &[u8] {
&[]
@@ -46,6 +53,10 @@ impl Entry for EntryImpl {
0
}
+ fn region_id(&self) -> RegionId {
+ RegionId::from_u64(0)
+ }
+
fn estimated_size(&self) -> usize {
0
}
diff --git a/src/log-store/src/raft_engine.rs b/src/log-store/src/raft_engine.rs
index e7a6f6b0ca16..cdb600249caa 100644
--- a/src/log-store/src/raft_engine.rs
+++ b/src/log-store/src/raft_engine.rs
@@ -15,10 +15,10 @@
use std::hash::{Hash, Hasher};
use std::mem::size_of;
-use store_api::logstore::entry::{Entry, Id as EntryId};
+use store_api::logstore::entry::{Entry, Id as EntryId, RawEntry};
use store_api::logstore::namespace::{Id as NamespaceId, Namespace};
+use store_api::storage::RegionId;
-use crate::error::Error;
use crate::raft_engine::protos::logstore::{EntryImpl, NamespaceImpl};
mod backend;
@@ -67,7 +67,13 @@ impl Namespace for NamespaceImpl {
}
impl Entry for EntryImpl {
- type Error = Error;
+ fn into_raw_entry(self) -> RawEntry {
+ RawEntry {
+ region_id: self.region_id(),
+ entry_id: self.id(),
+ data: self.data,
+ }
+ }
fn data(&self) -> &[u8] {
self.data.as_slice()
@@ -77,6 +83,10 @@ impl Entry for EntryImpl {
self.id
}
+ fn region_id(&self) -> RegionId {
+ RegionId::from_u64(self.id)
+ }
+
fn estimated_size(&self) -> usize {
self.data.len() + size_of::<u64>() + size_of::<u64>()
}
diff --git a/src/mito2/src/wal.rs b/src/mito2/src/wal.rs
index 06e38f36f98f..a36493f300fb 100644
--- a/src/mito2/src/wal.rs
+++ b/src/mito2/src/wal.rs
@@ -14,6 +14,13 @@
//! Write ahead log of the engine.
+/// TODO(weny): remove it
+#[allow(unused)]
+pub(crate) mod raw_entry_reader;
+/// TODO(weny): remove it
+#[allow(unused)]
+pub(crate) mod wal_entry_reader;
+
use std::collections::HashMap;
use std::mem;
use std::sync::Arc;
diff --git a/src/mito2/src/wal/raw_entry_reader.rs b/src/mito2/src/wal/raw_entry_reader.rs
new file mode 100644
index 000000000000..aa4d5ea0e455
--- /dev/null
+++ b/src/mito2/src/wal/raw_entry_reader.rs
@@ -0,0 +1,44 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use futures::stream::BoxStream;
+use store_api::logstore::entry::RawEntry;
+use store_api::storage::RegionId;
+
+use crate::error::Result;
+use crate::wal::EntryId;
+
+/// A stream that yields [RawEntry].
+pub type RawEntryStream<'a> = BoxStream<'a, Result<RawEntry>>;
+
+// The namespace of kafka log store
+pub struct KafkaNamespace<'a> {
+ topic: &'a str,
+}
+
+// The namespace of raft engine log store
+pub struct RaftEngineNamespace {
+ region_id: RegionId,
+}
+
+/// The namespace of [RawEntryReader].
+pub(crate) enum LogStoreNamespace<'a> {
+ RaftEngine(RaftEngineNamespace),
+ Kafka(KafkaNamespace<'a>),
+}
+
+/// [RawEntryReader] provides the ability to read [RawEntry] from the underlying [LogStore].
+pub(crate) trait RawEntryReader: Send + Sync {
+ fn read(&self, ctx: LogStoreNamespace, start_id: EntryId) -> Result<RawEntryStream<'static>>;
+}
diff --git a/src/mito2/src/wal/wal_entry_reader.rs b/src/mito2/src/wal/wal_entry_reader.rs
new file mode 100644
index 000000000000..8c3e16122254
--- /dev/null
+++ b/src/mito2/src/wal/wal_entry_reader.rs
@@ -0,0 +1,24 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use store_api::storage::RegionId;
+
+use crate::error::Result;
+use crate::wal::raw_entry_reader::LogStoreNamespace;
+use crate::wal::{EntryId, WalEntryStream};
+
+/// [OneshotWalEntryReader] provides the ability to read and decode entries from the underlying store.
+pub(crate) trait OneshotWalEntryReader: Send + Sync {
+ fn read(self, ctx: LogStoreNamespace, start_id: EntryId) -> Result<WalEntryStream>;
+}
diff --git a/src/store-api/src/logstore/entry.rs b/src/store-api/src/logstore/entry.rs
index daac2df4c9af..50e58a38fe43 100644
--- a/src/store-api/src/logstore/entry.rs
+++ b/src/store-api/src/logstore/entry.rs
@@ -12,16 +12,24 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-use common_error::ext::ErrorExt;
+use crate::storage::RegionId;
/// An entry's id.
/// Different log store implementations may interpret the id to different meanings.
pub type Id = u64;
+/// The raw Wal entry.
+pub struct RawEntry {
+ pub region_id: RegionId,
+ pub entry_id: Id,
+ pub data: Vec<u8>,
+}
+
/// Entry is the minimal data storage unit through which users interact with the log store.
/// The log store implementation may have larger or smaller data storage unit than an entry.
pub trait Entry: Send + Sync {
- type Error: ErrorExt + Send + Sync;
+ /// Consumes [Entry] and converts to [RawEntry].
+ fn into_raw_entry(self) -> RawEntry;
/// Returns the contained data of the entry.
fn data(&self) -> &[u8];
@@ -30,6 +38,9 @@ pub trait Entry: Send + Sync {
/// Usually the namespace id is identical with the region id.
fn id(&self) -> Id;
+ /// Returns the [RegionId]
+ fn region_id(&self) -> RegionId;
+
/// Computes the estimated encoded size.
fn estimated_size(&self) -> usize;
}
diff --git a/src/store-api/src/logstore/entry_stream.rs b/src/store-api/src/logstore/entry_stream.rs
index 5f26133ada8a..6a5886b0b53f 100644
--- a/src/store-api/src/logstore/entry_stream.rs
+++ b/src/store-api/src/logstore/entry_stream.rs
@@ -39,6 +39,8 @@ mod tests {
use super::*;
pub use crate::logstore::entry::Id;
+ use crate::logstore::entry::RawEntry;
+ use crate::storage::RegionId;
pub struct SimpleEntry {
/// Binary data of current entry
@@ -64,7 +66,13 @@ mod tests {
}
impl Entry for SimpleEntry {
- type Error = Error;
+ fn into_raw_entry(self) -> RawEntry {
+ RawEntry {
+ region_id: RegionId::from_u64(0),
+ entry_id: 0,
+ data: vec![],
+ }
+ }
fn data(&self) -> &[u8] {
&self.data
@@ -74,6 +82,10 @@ mod tests {
0u64
}
+ fn region_id(&self) -> RegionId {
+ RegionId::from_u64(0)
+ }
+
fn estimated_size(&self) -> usize {
self.data.len()
}
|
feat
|
add `RawEntryReader` and `OneshotWalEntryReader` trait (#4027)
|
cdbdb04d933c44e42a4a6bd44dbed5f0e35b1141
|
2024-04-16 12:05:55
|
tison
|
refactor: remove redundant try_flush invocations (#3706)
| false
|
diff --git a/src/common/datasource/src/buffered_writer.rs b/src/common/datasource/src/buffered_writer.rs
index 852486ef9f5d..8ce34070ccb8 100644
--- a/src/common/datasource/src/buffered_writer.rs
+++ b/src/common/datasource/src/buffered_writer.rs
@@ -60,12 +60,6 @@ impl<
.context(error::BufferedWriterClosedSnafu)?;
let metadata = encoder.close().await?;
- // Use `rows_written` to keep a track of if any rows have been written.
- // If no row's been written, then we can simply close the underlying
- // writer without flush so that no file will be actually created.
- if self.rows_written != 0 {
- self.bytes_written += self.try_flush(true).await?;
- }
// It's important to shut down! flushes all pending writes
self.close_inner_writer().await?;
Ok((metadata, self.bytes_written))
@@ -79,8 +73,15 @@ impl<
Fut: Future<Output = Result<T>>,
> LazyBufferedWriter<T, U, F>
{
- /// Closes the writer without flushing the buffer data.
+ /// Closes the writer and flushes the buffer data.
pub async fn close_inner_writer(&mut self) -> Result<()> {
+ // Use `rows_written` to keep a track of if any rows have been written.
+ // If no row's been written, then we can simply close the underlying
+ // writer without flush so that no file will be actually created.
+ if self.rows_written != 0 {
+ self.bytes_written += self.try_flush(true).await?;
+ }
+
if let Some(writer) = &mut self.writer {
writer.shutdown().await.context(error::AsyncWriteSnafu)?;
}
@@ -117,7 +118,7 @@ impl<
Ok(())
}
- pub async fn try_flush(&mut self, all: bool) -> Result<u64> {
+ async fn try_flush(&mut self, all: bool) -> Result<u64> {
let mut bytes_written: u64 = 0;
// Once buffered data size reaches threshold, split the data in chunks (typically 4MB)
diff --git a/src/common/datasource/src/file_format.rs b/src/common/datasource/src/file_format.rs
index 8774c946d5ae..6f80590d26e1 100644
--- a/src/common/datasource/src/file_format.rs
+++ b/src/common/datasource/src/file_format.rs
@@ -213,10 +213,6 @@ pub async fn stream_to_file<T: DfRecordBatchEncoder, U: Fn(SharedBuffer) -> T>(
writer.write(&batch).await?;
rows += batch.num_rows();
}
-
- // Flushes all pending writes
- let _ = writer.try_flush(true).await?;
writer.close_inner_writer().await?;
-
Ok(rows)
}
diff --git a/src/common/datasource/src/file_format/parquet.rs b/src/common/datasource/src/file_format/parquet.rs
index c21bead9f71d..651d5904c874 100644
--- a/src/common/datasource/src/file_format/parquet.rs
+++ b/src/common/datasource/src/file_format/parquet.rs
@@ -215,10 +215,7 @@ impl BufferedWriter {
/// Write a record batch to stream writer.
pub async fn write(&mut self, arrow_batch: &RecordBatch) -> error::Result<()> {
- self.inner.write(arrow_batch).await?;
- self.inner.try_flush(false).await?;
-
- Ok(())
+ self.inner.write(arrow_batch).await
}
/// Close parquet writer.
|
refactor
|
remove redundant try_flush invocations (#3706)
|
1f0fc402871aa642243590c302cfc3417dc19210
|
2024-03-21 17:53:52
|
Lei, HUANG
|
fix: performance degradation caused by config change (#3556)
| false
|
diff --git a/src/mito2/src/memtable/partition_tree.rs b/src/mito2/src/memtable/partition_tree.rs
index a6a5f9dd4475..fccbf8387c4a 100644
--- a/src/mito2/src/memtable/partition_tree.rs
+++ b/src/mito2/src/memtable/partition_tree.rs
@@ -45,7 +45,7 @@ use crate::memtable::{
};
/// Use `1/DICTIONARY_SIZE_FACTOR` of OS memory as dictionary size.
-const DICTIONARY_SIZE_FACTOR: u64 = 8;
+pub(crate) const DICTIONARY_SIZE_FACTOR: u64 = 8;
pub(crate) const DEFAULT_MAX_KEYS_PER_SHARD: usize = 8192;
pub(crate) const DEFAULT_FREEZE_THRESHOLD: usize = 131072;
@@ -84,7 +84,7 @@ pub struct PartitionTreeConfig {
impl Default for PartitionTreeConfig {
fn default() -> Self {
- let mut fork_dictionary_bytes = ReadableSize::gb(1);
+ let mut fork_dictionary_bytes = ReadableSize::mb(512);
if let Some(sys_memory) = common_config::utils::get_sys_total_memory() {
let adjust_dictionary_bytes =
std::cmp::min(sys_memory / DICTIONARY_SIZE_FACTOR, fork_dictionary_bytes);
diff --git a/src/mito2/src/region/options.rs b/src/mito2/src/region/options.rs
index f595b1f15bc7..e890207e874b 100644
--- a/src/mito2/src/region/options.rs
+++ b/src/mito2/src/region/options.rs
@@ -248,10 +248,20 @@ pub struct PartitionTreeOptions {
impl Default for PartitionTreeOptions {
fn default() -> Self {
+ let mut fork_dictionary_bytes = ReadableSize::mb(512);
+ if let Some(sys_memory) = common_config::utils::get_sys_total_memory() {
+ let adjust_dictionary_bytes = std::cmp::min(
+ sys_memory / crate::memtable::partition_tree::DICTIONARY_SIZE_FACTOR,
+ fork_dictionary_bytes,
+ );
+ if adjust_dictionary_bytes.0 > 0 {
+ fork_dictionary_bytes = adjust_dictionary_bytes;
+ }
+ }
Self {
index_max_keys_per_shard: DEFAULT_MAX_KEYS_PER_SHARD,
data_freeze_threshold: DEFAULT_FREEZE_THRESHOLD,
- fork_dictionary_bytes: ReadableSize::mb(64),
+ fork_dictionary_bytes,
}
}
}
|
fix
|
performance degradation caused by config change (#3556)
|
3201aea3602b68bbd6cbdbb6d05f620517d687bc
|
2024-01-26 18:17:24
|
JeremyHi
|
feat: create tables in batch on prom write (#3246)
| false
|
diff --git a/src/common/meta/src/ddl/create_logical_tables.rs b/src/common/meta/src/ddl/create_logical_tables.rs
index 978de69c1ba9..6c7c5d8fcf5d 100644
--- a/src/common/meta/src/ddl/create_logical_tables.rs
+++ b/src/common/meta/src/ddl/create_logical_tables.rs
@@ -162,9 +162,11 @@ impl CreateLogicalTablesProcedure {
manager.create_logic_tables_metadata(tables_data).await?;
}
- info!("Created {num_tables} tables metadata for physical table {physical_table_id}");
+ let table_ids = self.creator.data.real_table_ids();
- Ok(Status::done_with_output(self.creator.data.real_table_ids()))
+ info!("Created {num_tables} tables {table_ids:?} metadata for physical table {physical_table_id}");
+
+ Ok(Status::done_with_output(table_ids))
}
fn create_region_request_builder(
diff --git a/src/common/meta/src/rpc/ddl.rs b/src/common/meta/src/rpc/ddl.rs
index f4c455a122d8..a9a455f878ba 100644
--- a/src/common/meta/src/rpc/ddl.rs
+++ b/src/common/meta/src/rpc/ddl.rs
@@ -203,7 +203,7 @@ impl TryFrom<PbSubmitDdlTaskResponse> for SubmitDdlTaskResponse {
fn try_from(resp: PbSubmitDdlTaskResponse) -> Result<Self> {
let table_id = resp.table_id.map(|t| t.id);
- let table_ids = resp.table_ids.iter().map(|t| t.id).collect();
+ let table_ids = resp.table_ids.into_iter().map(|t| t.id).collect();
Ok(Self {
key: resp.key,
table_id,
@@ -219,6 +219,11 @@ impl From<SubmitDdlTaskResponse> for PbSubmitDdlTaskResponse {
table_id: val
.table_id
.map(|table_id| api::v1::meta::TableId { id: table_id }),
+ table_ids: val
+ .table_ids
+ .into_iter()
+ .map(|id| api::v1::meta::TableId { id })
+ .collect(),
..Default::default()
}
}
diff --git a/src/operator/src/insert.rs b/src/operator/src/insert.rs
index bffff45806c3..07523c348215 100644
--- a/src/operator/src/insert.rs
+++ b/src/operator/src/insert.rs
@@ -252,20 +252,36 @@ impl Inserter {
on_physical_table: Option<String>,
statement_executor: &StatementExecutor,
) -> Result<()> {
- // TODO(jeremy): create and alter in batch? (from `handle_metric_row_inserts`)
+ let mut create_tables = vec![];
for req in &requests.inserts {
let catalog = ctx.current_catalog();
let schema = ctx.current_schema();
let table = self.get_table(catalog, schema, &req.table_name).await?;
match table {
Some(table) => {
+ // TODO(jeremy): alter in batch? (from `handle_metric_row_inserts`)
validate_request_with_table(req, &table)?;
self.alter_table_on_demand(req, table, ctx, statement_executor)
.await?
}
None => {
- self.create_table(req, ctx, &on_physical_table, statement_executor)
- .await?
+ create_tables.push(req);
+ }
+ }
+ }
+ if !create_tables.is_empty() {
+ if let Some(on_physical_table) = on_physical_table {
+ // Creates logical tables in batch.
+ self.create_logical_tables(
+ create_tables,
+ ctx,
+ &on_physical_table,
+ statement_executor,
+ )
+ .await?;
+ } else {
+ for req in create_tables {
+ self.create_table(req, ctx, statement_executor).await?;
}
}
}
@@ -403,7 +419,6 @@ impl Inserter {
&self,
req: &RowInsertRequest,
ctx: &QueryContextRef,
- on_physical_table: &Option<String>,
statement_executor: &StatementExecutor,
) -> Result<()> {
let table_ref =
@@ -412,15 +427,7 @@ impl Inserter {
let request_schema = req.rows.as_ref().unwrap().schema.as_slice();
let create_table_expr = &mut build_create_table_expr(&table_ref, request_schema)?;
- if let Some(physical_table) = on_physical_table {
- create_table_expr.engine = METRIC_ENGINE_NAME.to_string();
- create_table_expr.table_options.insert(
- LOGICAL_TABLE_METADATA_KEY.to_string(),
- physical_table.clone(),
- );
- }
-
- info!("Table `{table_ref}` does not exist, try creating table",);
+ info!("Table `{table_ref}` does not exist, try creating table");
// TODO(weny): multiple regions table.
let res = statement_executor
@@ -444,6 +451,65 @@ impl Inserter {
}
}
}
+
+ async fn create_logical_tables(
+ &self,
+ create_tables: Vec<&RowInsertRequest>,
+ ctx: &QueryContextRef,
+ physical_table: &str,
+ statement_executor: &StatementExecutor,
+ ) -> Result<()> {
+ let create_table_exprs = create_tables
+ .iter()
+ .map(|req| {
+ let table_ref = TableReference::full(
+ ctx.current_catalog(),
+ ctx.current_schema(),
+ &req.table_name,
+ );
+
+ info!("Logical table `{table_ref}` does not exist, try creating table");
+
+ let request_schema = req.rows.as_ref().unwrap().schema.as_slice();
+ let mut create_table_expr = build_create_table_expr(&table_ref, request_schema)?;
+
+ create_table_expr.engine = METRIC_ENGINE_NAME.to_string();
+ create_table_expr.table_options.insert(
+ LOGICAL_TABLE_METADATA_KEY.to_string(),
+ physical_table.to_string(),
+ );
+
+ Ok(create_table_expr)
+ })
+ .collect::<Result<Vec<_>>>()?;
+
+ let res = statement_executor
+ .create_logical_tables(&create_table_exprs)
+ .await;
+
+ match res {
+ Ok(_) => {
+ info!("Successfully created logical tables");
+ Ok(())
+ }
+ Err(err) => {
+ let failed_tables = create_table_exprs
+ .into_iter()
+ .map(|expr| {
+ format!(
+ "{}.{}.{}",
+ expr.catalog_name, expr.schema_name, expr.table_name
+ )
+ })
+ .collect::<Vec<_>>();
+ error!(
+ "Failed to create logical tables {:?}: {}",
+ failed_tables, err
+ );
+ Err(err)
+ }
+ }
+ }
}
fn validate_column_count_match(requests: &RowInsertRequests) -> Result<()> {
|
feat
|
create tables in batch on prom write (#3246)
|
9c76d2cf541effc076926ff6c8d3e082c1cf180b
|
2023-09-13 08:44:42
|
Ruihang Xia
|
feat: convert sql number to values with target type (#2370)
| false
|
diff --git a/src/sql/src/statements.rs b/src/sql/src/statements.rs
index c251c26dfdfa..1510335c77cd 100644
--- a/src/sql/src/statements.rs
+++ b/src/sql/src/statements.rs
@@ -37,7 +37,7 @@ use common_time::Timestamp;
use datatypes::prelude::ConcreteDataType;
use datatypes::schema::{ColumnDefaultConstraint, ColumnSchema, COMMENT_KEY};
use datatypes::types::TimestampType;
-use datatypes::value::Value;
+use datatypes::value::{OrderedF32, OrderedF64, Value};
use snafu::{ensure, OptionExt, ResultExt};
use crate::ast::{
@@ -128,12 +128,12 @@ fn parse_hex_string(s: &str) -> Result<Value> {
}
macro_rules! parse_number_to_value {
- ($data_type: expr, $n: ident, $(($Type: ident, $PrimitiveType: ident)), +) => {
+ ($data_type: expr, $n: ident, $(($Type: ident, $PrimitiveType: ident, $Target: ident)), +) => {
match $data_type {
$(
ConcreteDataType::$Type(_) => {
let n = parse_sql_number::<$PrimitiveType>($n)?;
- Ok(Value::from(n))
+ Ok(Value::$Type($Target::from(n)))
},
)+
_ => ParseSqlValueSnafu {
@@ -149,17 +149,17 @@ pub fn sql_number_to_value(data_type: &ConcreteDataType, n: &str) -> Result<Valu
parse_number_to_value!(
data_type,
n,
- (UInt8, u8),
- (UInt16, u16),
- (UInt32, u32),
- (UInt64, u64),
- (Int8, i8),
- (Int16, i16),
- (Int32, i32),
- (Int64, i64),
- (Float64, f64),
- (Float32, f32),
- (Timestamp, i64)
+ (UInt8, u8, u8),
+ (UInt16, u16, u16),
+ (UInt32, u32, u32),
+ (UInt64, u64, u64),
+ (Int8, i8, i8),
+ (Int16, i16, i16),
+ (Int32, i32, i32),
+ (Int64, i64, i64),
+ (Float64, f64, OrderedF64),
+ (Float32, f32, OrderedF32),
+ (Timestamp, i64, Timestamp)
)
// TODO(hl): also Date/DateTime
}
|
feat
|
convert sql number to values with target type (#2370)
|
4f29e50ef3b38d75ad1f329a1e0627bf5427db9b
|
2025-01-15 17:41:24
|
Ruihang Xia
|
feat: refine log query AST (#5316)
| false
|
diff --git a/src/log-query/src/log_query.rs b/src/log-query/src/log_query.rs
index a79efc78eeca..988c9c27a9b4 100644
--- a/src/log-query/src/log_query.rs
+++ b/src/log-query/src/log_query.rs
@@ -24,16 +24,73 @@ use crate::error::{
/// GreptimeDB's log query request.
#[derive(Debug, Serialize, Deserialize)]
pub struct LogQuery {
+ // Global query parameters
/// A fully qualified table name to query logs from.
pub table: TableName,
/// Specifies the time range for the log query. See [`TimeFilter`] for more details.
pub time_filter: TimeFilter,
- /// Columns with filters to query.
- pub columns: Vec<ColumnFilters>,
- /// Controls row skipping and fetch count for logs.
+ /// Controls row skipping and fetch on the result set.
pub limit: Limit,
- /// Adjacent lines to return.
+ /// Columns to return in the result set.
+ ///
+ /// The columns can be either from the original log or derived from processing exprs.
+ /// Default (empty) means all columns.
+ ///
+ /// TODO(ruihang): Do we need negative select?
+ pub columns: Vec<String>,
+
+ // Filters
+ /// Conjunction of filters to apply for the raw logs.
+ ///
+ /// Filters here can only refer to the columns from the original log.
+ pub filters: Vec<ColumnFilters>,
+ /// Adjacent lines to return. Applies to all filters above.
+ ///
+ /// TODO(ruihang): Do we need per-filter context?
pub context: Context,
+
+ // Processors
+ /// Expressions to calculate after filter.
+ pub exprs: Vec<LogExpr>,
+}
+
+/// Expression to calculate on log after filtering.
+#[derive(Debug, Serialize, Deserialize)]
+pub enum LogExpr {
+ NamedIdent(String),
+ PositionalIdent(usize),
+ Literal(String),
+ ScalarFunc {
+ name: String,
+ args: Vec<LogExpr>,
+ },
+ AggrFunc {
+ name: String,
+ args: Vec<LogExpr>,
+ /// Optional range function parameter. Stands for the time range for both step and align.
+ range: Option<String>,
+ by: Vec<LogExpr>,
+ },
+ Decompose {
+ expr: Box<LogExpr>,
+ /// JSON, CSV, etc.
+ schema: String,
+ /// Fields with type name to extract from the decomposed value.
+ fields: Vec<(String, String)>,
+ },
+ BinaryOp {
+ left: Box<LogExpr>,
+ op: String,
+ right: Box<LogExpr>,
+ },
+ Alias {
+ expr: Box<LogExpr>,
+ alias: String,
+ },
+ Filter {
+ expr: Box<LogExpr>,
+ filter: ContentFilter,
+ },
}
impl Default for LogQuery {
@@ -41,9 +98,11 @@ impl Default for LogQuery {
Self {
table: TableName::new("", "", ""),
time_filter: Default::default(),
- columns: vec![],
+ filters: vec![],
limit: Limit::default(),
context: Default::default(),
+ columns: vec![],
+ exprs: vec![],
}
}
}
@@ -232,6 +291,7 @@ pub struct ColumnFilters {
#[derive(Debug, Serialize, Deserialize)]
pub enum ContentFilter {
+ // Search-based filters
/// Only match the exact content.
///
/// For example, if the content is "pale blue dot", the filter "pale" or "pale blue" will match.
@@ -246,6 +306,14 @@ pub enum ContentFilter {
Contains(String),
/// Match the content with a regex pattern. The pattern should be a valid Rust regex.
Regex(String),
+
+ // Value-based filters
+ /// Content exists, a.k.a. not null.
+ Exist,
+ Between(String, String),
+ // TODO(ruihang): arithmetic operations
+
+ // Compound filters
Compound(Vec<ContentFilter>, BinaryOperator),
}
diff --git a/src/query/src/log_query/planner.rs b/src/query/src/log_query/planner.rs
index e19356e44400..79474fab53cb 100644
--- a/src/query/src/log_query/planner.rs
+++ b/src/query/src/log_query/planner.rs
@@ -69,13 +69,11 @@ impl LogQueryPlanner {
// Time filter
filters.push(self.build_time_filter(&query.time_filter, &schema)?);
- // Column filters and projections
- let mut projected_columns = Vec::new();
- for column_filter in &query.columns {
+ // Column filters
+ for column_filter in &query.filters {
if let Some(expr) = self.build_column_filter(column_filter)? {
filters.push(expr);
}
- projected_columns.push(col(&column_filter.column_name));
}
// Apply filters
@@ -87,9 +85,12 @@ impl LogQueryPlanner {
}
// Apply projections
- plan_builder = plan_builder
- .project(projected_columns)
- .context(DataFusionPlanningSnafu)?;
+ if !query.columns.is_empty() {
+ let projected_columns = query.columns.iter().map(col).collect::<Vec<_>>();
+ plan_builder = plan_builder
+ .project(projected_columns)
+ .context(DataFusionPlanningSnafu)?;
+ }
// Apply limit
plan_builder = plan_builder
@@ -159,6 +160,17 @@ impl LogQueryPlanner {
}
.build(),
),
+ log_query::ContentFilter::Exist => {
+ Ok(col(&column_filter.column_name).is_not_null())
+ }
+ log_query::ContentFilter::Between(lower, upper) => {
+ Ok(col(&column_filter.column_name)
+ .gt_eq(lit(ScalarValue::Utf8(Some(escape_like_pattern(lower)))))
+ .and(
+ col(&column_filter.column_name)
+ .lt_eq(lit(ScalarValue::Utf8(Some(escape_like_pattern(upper))))),
+ ))
+ }
log_query::ContentFilter::Compound(..) => Err::<Expr, _>(
UnimplementedSnafu {
feature: "compound filter",
@@ -267,7 +279,7 @@ mod tests {
end: Some("2021-01-02T00:00:00Z".to_string()),
span: None,
},
- columns: vec![ColumnFilters {
+ filters: vec![ColumnFilters {
column_name: "message".to_string(),
filters: vec![ContentFilter::Contains("error".to_string())],
}],
@@ -276,13 +288,14 @@ mod tests {
fetch: Some(100),
},
context: Context::None,
+ columns: vec![],
+ exprs: vec![],
};
let plan = planner.query_to_plan(log_query).await.unwrap();
- let expected = "Limit: skip=0, fetch=100 [message:Utf8]\
-\n Projection: greptime.public.test_table.message [message:Utf8]\
-\n Filter: greptime.public.test_table.timestamp >= Utf8(\"2021-01-01T00:00:00Z\") AND greptime.public.test_table.timestamp <= Utf8(\"2021-01-02T00:00:00Z\") AND greptime.public.test_table.message LIKE Utf8(\"%error%\") [message:Utf8, timestamp:Timestamp(Millisecond, None), host:Utf8;N]\
-\n TableScan: greptime.public.test_table [message:Utf8, timestamp:Timestamp(Millisecond, None), host:Utf8;N]";
+ let expected = "Limit: skip=0, fetch=100 [message:Utf8, timestamp:Timestamp(Millisecond, None), host:Utf8;N]\
+\n Filter: greptime.public.test_table.timestamp >= Utf8(\"2021-01-01T00:00:00Z\") AND greptime.public.test_table.timestamp <= Utf8(\"2021-01-02T00:00:00Z\") AND greptime.public.test_table.message LIKE Utf8(\"%error%\") [message:Utf8, timestamp:Timestamp(Millisecond, None), host:Utf8;N]\
+\n TableScan: greptime.public.test_table [message:Utf8, timestamp:Timestamp(Millisecond, None), host:Utf8;N]";
assert_eq!(plan.display_indent_schema().to_string(), expected);
}
@@ -380,7 +393,7 @@ mod tests {
end: Some("2021-01-02T00:00:00Z".to_string()),
span: None,
},
- columns: vec![ColumnFilters {
+ filters: vec![ColumnFilters {
column_name: "message".to_string(),
filters: vec![ContentFilter::Contains("error".to_string())],
}],
@@ -389,13 +402,14 @@ mod tests {
fetch: None,
},
context: Context::None,
+ columns: vec![],
+ exprs: vec![],
};
let plan = planner.query_to_plan(log_query).await.unwrap();
- let expected = "Limit: skip=10, fetch=1000 [message:Utf8]\
-\n Projection: greptime.public.test_table.message [message:Utf8]\
-\n Filter: greptime.public.test_table.timestamp >= Utf8(\"2021-01-01T00:00:00Z\") AND greptime.public.test_table.timestamp <= Utf8(\"2021-01-02T00:00:00Z\") AND greptime.public.test_table.message LIKE Utf8(\"%error%\") [message:Utf8, timestamp:Timestamp(Millisecond, None), host:Utf8;N]\
-\n TableScan: greptime.public.test_table [message:Utf8, timestamp:Timestamp(Millisecond, None), host:Utf8;N]";
+ let expected = "Limit: skip=10, fetch=1000 [message:Utf8, timestamp:Timestamp(Millisecond, None), host:Utf8;N]\
+\n Filter: greptime.public.test_table.timestamp >= Utf8(\"2021-01-01T00:00:00Z\") AND greptime.public.test_table.timestamp <= Utf8(\"2021-01-02T00:00:00Z\") AND greptime.public.test_table.message LIKE Utf8(\"%error%\") [message:Utf8, timestamp:Timestamp(Millisecond, None), host:Utf8;N]\
+\n TableScan: greptime.public.test_table [message:Utf8, timestamp:Timestamp(Millisecond, None), host:Utf8;N]";
assert_eq!(plan.display_indent_schema().to_string(), expected);
}
@@ -413,7 +427,7 @@ mod tests {
end: Some("2021-01-02T00:00:00Z".to_string()),
span: None,
},
- columns: vec![ColumnFilters {
+ filters: vec![ColumnFilters {
column_name: "message".to_string(),
filters: vec![ContentFilter::Contains("error".to_string())],
}],
@@ -422,13 +436,14 @@ mod tests {
fetch: None,
},
context: Context::None,
+ columns: vec![],
+ exprs: vec![],
};
let plan = planner.query_to_plan(log_query).await.unwrap();
- let expected = "Limit: skip=0, fetch=1000 [message:Utf8]\
-\n Projection: greptime.public.test_table.message [message:Utf8]\
-\n Filter: greptime.public.test_table.timestamp >= Utf8(\"2021-01-01T00:00:00Z\") AND greptime.public.test_table.timestamp <= Utf8(\"2021-01-02T00:00:00Z\") AND greptime.public.test_table.message LIKE Utf8(\"%error%\") [message:Utf8, timestamp:Timestamp(Millisecond, None), host:Utf8;N]\
-\n TableScan: greptime.public.test_table [message:Utf8, timestamp:Timestamp(Millisecond, None), host:Utf8;N]";
+ let expected = "Limit: skip=0, fetch=1000 [message:Utf8, timestamp:Timestamp(Millisecond, None), host:Utf8;N]\
+\n Filter: greptime.public.test_table.timestamp >= Utf8(\"2021-01-01T00:00:00Z\") AND greptime.public.test_table.timestamp <= Utf8(\"2021-01-02T00:00:00Z\") AND greptime.public.test_table.message LIKE Utf8(\"%error%\") [message:Utf8, timestamp:Timestamp(Millisecond, None), host:Utf8;N]\
+\n TableScan: greptime.public.test_table [message:Utf8, timestamp:Timestamp(Millisecond, None), host:Utf8;N]";
assert_eq!(plan.display_indent_schema().to_string(), expected);
}
|
feat
|
refine log query AST (#5316)
|
83c1b485eae99abf0e12b0d9eeee8105c0085a2e
|
2024-03-26 13:24:06
|
Ruihang Xia
|
chore: limit OpenDAL's feature gates (#3584)
| false
|
diff --git a/src/object-store/Cargo.toml b/src/object-store/Cargo.toml
index 4f7169597e5a..941955a347e9 100644
--- a/src/object-store/Cargo.toml
+++ b/src/object-store/Cargo.toml
@@ -20,7 +20,14 @@ md5 = "0.7"
moka = { workspace = true, features = ["future"] }
opendal = { version = "0.45", features = [
"layers-tracing",
-] }
+ "rustls",
+ "services-azblob",
+ "services-fs",
+ "services-gcs",
+ "services-http",
+ "services-oss",
+ "services-s3",
+], default-features = false }
prometheus.workspace = true
snafu.workspace = true
uuid.workspace = true
@@ -29,4 +36,5 @@ uuid.workspace = true
anyhow = "1.0"
common-telemetry.workspace = true
common-test-util.workspace = true
+opendal = { version = "0.45", features = ["services-memory"] }
tokio.workspace = true
|
chore
|
limit OpenDAL's feature gates (#3584)
|
e5ba3d1708dea171f7a0548b2ca165db143c5ecd
|
2023-08-24 08:59:08
|
Ruihang Xia
|
feat: rewrite the dist analyzer (#2238)
| false
|
diff --git a/src/query/src/dist_plan.rs b/src/query/src/dist_plan.rs
index ca1480c1bd82..830f3b14cf72 100644
--- a/src/query/src/dist_plan.rs
+++ b/src/query/src/dist_plan.rs
@@ -16,7 +16,6 @@ mod analyzer;
mod commutativity;
mod merge_scan;
mod planner;
-mod utils;
pub use analyzer::DistPlannerAnalyzer;
pub use merge_scan::MergeScanLogicalPlan;
diff --git a/src/query/src/dist_plan/analyzer.rs b/src/query/src/dist_plan/analyzer.rs
index 2ace7f2ea907..8ee7a59f821c 100644
--- a/src/query/src/dist_plan/analyzer.rs
+++ b/src/query/src/dist_plan/analyzer.rs
@@ -12,12 +12,11 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-use std::sync::{Arc, Mutex};
-
use datafusion::datasource::DefaultTableSource;
+use datafusion::error::Result as DfResult;
use datafusion_common::config::ConfigOptions;
-use datafusion_common::tree_node::{Transformed, TreeNode, TreeNodeVisitor, VisitRecursion};
-use datafusion_expr::{Extension, LogicalPlan};
+use datafusion_common::tree_node::{RewriteRecursion, TreeNode, TreeNodeRewriter};
+use datafusion_expr::LogicalPlan;
use datafusion_optimizer::analyzer::AnalyzerRule;
use substrait::{DFLogicalSubstraitConvertor, SubstraitPlan};
use table::metadata::TableType;
@@ -27,7 +26,6 @@ use crate::dist_plan::commutativity::{
partial_commutative_transformer, Categorizer, Commutativity,
};
use crate::dist_plan::merge_scan::MergeScanLogicalPlan;
-use crate::dist_plan::utils;
pub struct DistPlannerAnalyzer;
@@ -41,233 +39,196 @@ impl AnalyzerRule for DistPlannerAnalyzer {
plan: LogicalPlan,
_config: &ConfigOptions,
) -> datafusion_common::Result<LogicalPlan> {
- // (1) transform up merge scan
- let mut visitor = CommutativeVisitor::new();
- let _ = plan.visit(&mut visitor)?;
- let state = ExpandState::new();
- let plan = plan.transform_down(&|plan| Self::expand(plan, &visitor, &state))?;
-
- // (2) remove placeholder merge scan
- let plan = plan.transform(&Self::remove_placeholder_merge_scan)?;
-
- Ok(plan)
+ let mut rewriter = PlanRewriter::default();
+ plan.rewrite(&mut rewriter)
}
}
-impl DistPlannerAnalyzer {
- /// Add [MergeScanLogicalPlan] before the table scan
- #[allow(dead_code)]
- fn add_merge_scan(plan: LogicalPlan) -> datafusion_common::Result<Transformed<LogicalPlan>> {
- Ok(match plan {
- LogicalPlan::TableScan(table_scan) => {
- let ext_plan = LogicalPlan::Extension(Extension {
- node: Arc::new(MergeScanLogicalPlan::new(
- LogicalPlan::TableScan(table_scan),
- true,
- )),
- });
- Transformed::Yes(ext_plan)
- }
- _ => Transformed::No(plan),
- })
- }
-
- /// Remove placeholder [MergeScanLogicalPlan]
- fn remove_placeholder_merge_scan(
- plan: LogicalPlan,
- ) -> datafusion_common::Result<Transformed<LogicalPlan>> {
- Ok(match &plan {
- LogicalPlan::Extension(extension)
- if extension.node.name() == MergeScanLogicalPlan::name() =>
- {
- let merge_scan = extension
- .node
- .as_any()
- .downcast_ref::<MergeScanLogicalPlan>()
- .unwrap();
- if merge_scan.is_placeholder() {
- Transformed::Yes(merge_scan.input().clone())
- } else {
- Transformed::No(plan)
- }
- }
- _ => Transformed::No(plan),
- })
- }
-
- /// Expand stages on the stop node
- fn expand(
- mut plan: LogicalPlan,
- visitor: &CommutativeVisitor,
- state: &ExpandState,
- ) -> datafusion_common::Result<Transformed<LogicalPlan>> {
- if state.is_transformed() {
- // only transform once
- return Ok(Transformed::No(plan));
- }
- if let Some(stop_node) = visitor.stop_node && utils::hash_plan(&plan) != stop_node {
- // only act with the stop node or the root (the first node seen by this closure) if no stop node
- return Ok(Transformed::No(plan));
- }
-
- if visitor.stop_node.is_some() {
- // insert merge scan between the stop node and its child
- let children = plan.inputs();
- let mut new_children = Vec::with_capacity(children.len());
- for child in children {
- let mut new_child =
- MergeScanLogicalPlan::new(child.clone(), false).into_logical_plan();
- // expand stages
- for new_stage in &visitor.next_stage {
- new_child = new_stage.with_new_inputs(&[new_child])?
- }
- new_children.push(new_child);
- }
- plan = plan.with_new_inputs(&new_children)?;
- } else {
- // otherwise add merge scan as the new root
- plan = MergeScanLogicalPlan::new(plan, false).into_logical_plan();
- // expand stages
- for new_stage in &visitor.next_stage {
- plan = new_stage.with_new_inputs(&[plan])?
- }
- }
-
- state.set_transformed();
- Ok(Transformed::Yes(plan))
- }
+/// Status of the rewriter to mark if the current pass is expanded
+#[derive(Debug, Default, PartialEq, Eq, PartialOrd, Ord)]
+enum RewriterStatus {
+ #[default]
+ Unexpanded,
+ Expanded,
}
-struct ExpandState {
- transformed: Mutex<bool>,
+#[derive(Debug, Default)]
+struct PlanRewriter {
+ /// Current level in the tree
+ level: usize,
+ /// Simulated stack for the `rewrite` recursion
+ stack: Vec<(LogicalPlan, usize)>,
+ /// Stages to be expanded
+ stage: Vec<LogicalPlan>,
+ status: RewriterStatus,
+ /// Partition columns of the table in current pass
+ partition_cols: Option<Vec<String>>,
}
-impl ExpandState {
- pub fn new() -> Self {
- Self {
- transformed: Mutex::new(false),
- }
- }
-
- pub fn is_transformed(&self) -> bool {
- *self.transformed.lock().unwrap()
+impl PlanRewriter {
+ fn get_parent(&self) -> Option<&LogicalPlan> {
+ // level starts from 1, it's safe to minus by 1
+ self.stack
+ .iter()
+ .rev()
+ .find(|(_, level)| *level == self.level - 1)
+ .map(|(node, _)| node)
}
- /// Set the state to transformed
- pub fn set_transformed(&self) {
- *self.transformed.lock().unwrap() = true;
- }
-}
-
-#[derive(Debug)]
-struct CommutativeVisitor {
- next_stage: Vec<LogicalPlan>,
- // hash of the stop node
- stop_node: Option<u64>,
- /// Partition columns of current visiting table
- current_partition_cols: Option<Vec<String>>,
-}
-
-impl TreeNodeVisitor for CommutativeVisitor {
- type N = LogicalPlan;
-
- fn pre_visit(&mut self, plan: &LogicalPlan) -> datafusion_common::Result<VisitRecursion> {
- // find the first merge scan and stop traversing down
- // todo: check if it works for join
- Ok(match plan {
- LogicalPlan::TableScan(table_scan) => {
- // TODO(ruihang): spawn a sub visitor to retrieve partition columns
- if let Some(source) = table_scan
- .source
- .as_any()
- .downcast_ref::<DefaultTableSource>()
- {
- if let Some(provider) = source
- .table_provider
- .as_any()
- .downcast_ref::<DfTableProviderAdapter>()
- {
- if provider.table().table_type() == TableType::Base {
- let info = provider.table().table_info();
- let partition_key_indices = info.meta.partition_key_indices.clone();
- let schema = info.meta.schema.clone();
- let partition_cols = partition_key_indices
- .into_iter()
- .map(|index| schema.column_name_by_index(index).to_string())
- .collect::<Vec<String>>();
- self.current_partition_cols = Some(partition_cols);
- }
- }
- }
- VisitRecursion::Continue
- }
- _ => VisitRecursion::Continue,
- })
- }
-
- fn post_visit(&mut self, plan: &LogicalPlan) -> datafusion_common::Result<VisitRecursion> {
+ /// Return true if should stop and expand. The input plan is the parent node of current node
+ fn should_expand(&mut self, plan: &LogicalPlan) -> bool {
if DFLogicalSubstraitConvertor.encode(plan).is_err() {
common_telemetry::info!(
"substrait error: {:?}",
DFLogicalSubstraitConvertor.encode(plan)
);
- self.stop_node = Some(utils::hash_plan(plan));
- return Ok(VisitRecursion::Stop);
+ return true;
}
match Categorizer::check_plan(plan) {
Commutativity::Commutative => {}
Commutativity::PartialCommutative => {
if let Some(plan) = partial_commutative_transformer(plan) {
- self.next_stage.push(plan)
+ self.stage.push(plan)
}
}
Commutativity::ConditionalCommutative(transformer) => {
if let Some(transformer) = transformer
&& let Some(plan) = transformer(plan) {
- self.next_stage.push(plan)
+ self.stage.push(plan)
}
},
Commutativity::TransformedCommutative(transformer) => {
if let Some(transformer) = transformer
&& let Some(plan) = transformer(plan) {
- self.next_stage.push(plan)
- }
- },
- Commutativity::CheckPartition => {
- if let Some(partition_cols) = &self.current_partition_cols
- && partition_cols.is_empty() {
- // no partition columns, and can be encoded skip
- return Ok(VisitRecursion::Continue);
- } else {
- self.stop_node = Some(utils::hash_plan(plan));
- return Ok(VisitRecursion::Stop);
+ self.stage.push(plan)
}
},
- Commutativity::NonCommutative
+ Commutativity::CheckPartition
+ | Commutativity::NonCommutative
| Commutativity::Unimplemented
| Commutativity::Unsupported => {
- self.stop_node = Some(utils::hash_plan(plan));
- return Ok(VisitRecursion::Stop);
+ return true;
+ }
+ }
+
+ false
+ }
+
+ fn is_expanded(&self) -> bool {
+ self.status == RewriterStatus::Expanded
+ }
+
+ fn set_expanded(&mut self) {
+ self.status = RewriterStatus::Expanded;
+ }
+
+ fn set_unexpanded(&mut self) {
+ self.status = RewriterStatus::Unexpanded;
+ }
+
+ fn maybe_set_partitions(&mut self, plan: &LogicalPlan) {
+ if self.partition_cols.is_some() {
+ // only need to set once
+ return;
+ }
+
+ if let LogicalPlan::TableScan(table_scan) = plan {
+ if let Some(source) = table_scan
+ .source
+ .as_any()
+ .downcast_ref::<DefaultTableSource>()
+ {
+ if let Some(provider) = source
+ .table_provider
+ .as_any()
+ .downcast_ref::<DfTableProviderAdapter>()
+ {
+ if provider.table().table_type() == TableType::Base {
+ let info = provider.table().table_info();
+ let partition_key_indices = info.meta.partition_key_indices.clone();
+ let schema = info.meta.schema.clone();
+ let partition_cols = partition_key_indices
+ .into_iter()
+ .map(|index| schema.column_name_by_index(index).to_string())
+ .collect::<Vec<String>>();
+ self.partition_cols = Some(partition_cols);
+ }
+ }
}
}
+ }
- Ok(VisitRecursion::Continue)
+ /// pop one stack item and reduce the level by 1
+ fn pop_stack(&mut self) {
+ self.level -= 1;
+ self.stack.pop();
}
}
-impl CommutativeVisitor {
- pub fn new() -> Self {
- Self {
- next_stage: vec![],
- stop_node: None,
- current_partition_cols: None,
+impl TreeNodeRewriter for PlanRewriter {
+ type N = LogicalPlan;
+
+ /// descend
+ fn pre_visit<'a>(&'a mut self, node: &'a Self::N) -> DfResult<RewriteRecursion> {
+ self.level += 1;
+ self.stack.push((node.clone(), self.level));
+ // decendening will clear the stage
+ self.stage.clear();
+ self.set_unexpanded();
+ self.partition_cols = None;
+ Ok(RewriteRecursion::Continue)
+ }
+
+ /// ascend
+ ///
+ /// Besure to call `pop_stack` before returning
+ fn mutate(&mut self, node: Self::N) -> DfResult<Self::N> {
+ // only expand once on each ascending
+ if self.is_expanded() {
+ self.pop_stack();
+ return Ok(node);
}
+
+ self.maybe_set_partitions(&node);
+
+ let Some(parent) = self.get_parent() else {
+ // add merge scan as the new root
+ let mut node = MergeScanLogicalPlan::new(node, false).into_logical_plan();
+ // expand stages
+ for new_stage in self.stage.drain(..) {
+ node = new_stage.with_new_inputs(&[node])?
+ }
+ self.set_expanded();
+
+ self.pop_stack();
+ return Ok(node);
+ };
+
+ // TODO(ruihang): avoid this clone
+ if self.should_expand(&parent.clone()) {
+ // TODO(ruihang): does this work for nodes with multiple children?;
+ // replace the current node with expanded one
+ let mut node = MergeScanLogicalPlan::new(node, false).into_logical_plan();
+ // expand stages
+ for new_stage in self.stage.drain(..) {
+ node = new_stage.with_new_inputs(&[node])?
+ }
+ self.set_expanded();
+
+ self.pop_stack();
+ return Ok(node);
+ }
+
+ self.pop_stack();
+ Ok(node)
}
}
#[cfg(test)]
mod test {
+ use std::sync::Arc;
+
use datafusion::datasource::DefaultTableSource;
use datafusion_expr::{avg, col, lit, Expr, LogicalPlanBuilder};
use table::table::adapter::DfTableProviderAdapter;
diff --git a/src/query/src/dist_plan/utils.rs b/src/query/src/dist_plan/utils.rs
deleted file mode 100644
index 78d2a85b1e56..000000000000
--- a/src/query/src/dist_plan/utils.rs
+++ /dev/null
@@ -1,45 +0,0 @@
-// Copyright 2023 Greptime Team
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-use std::hash::{Hash, Hasher};
-
-use ahash::AHasher;
-use datafusion_expr::LogicalPlan;
-
-/// Calculate u64 hash for a [LogicalPlan].
-pub fn hash_plan(plan: &LogicalPlan) -> u64 {
- let mut hasher = AHasher::default();
- plan.hash(&mut hasher);
- hasher.finish()
-}
-
-#[cfg(test)]
-mod test {
- use datafusion_expr::LogicalPlanBuilder;
-
- use super::*;
-
- #[test]
- fn hash_two_plan() {
- let plan1 = LogicalPlanBuilder::empty(false).build().unwrap();
- let plan2 = LogicalPlanBuilder::empty(false)
- .explain(false, false)
- .unwrap()
- .build()
- .unwrap();
-
- assert_eq!(hash_plan(&plan1), hash_plan(&plan1));
- assert_ne!(hash_plan(&plan1), hash_plan(&plan2));
- }
-}
diff --git a/tests/cases/distributed/explain/join_10_tables.result b/tests/cases/distributed/explain/join_10_tables.result
new file mode 100644
index 000000000000..23ba1f31fec6
--- /dev/null
+++ b/tests/cases/distributed/explain/join_10_tables.result
@@ -0,0 +1,206 @@
+create table t_1 (ts timestamp time index, vin string, val int);
+
+Affected Rows: 0
+
+create table t_2 (ts timestamp time index, vin string, val int);
+
+Affected Rows: 0
+
+create table t_3 (ts timestamp time index, vin string, val int);
+
+Affected Rows: 0
+
+create table t_4 (ts timestamp time index, vin string, val int);
+
+Affected Rows: 0
+
+create table t_5 (ts timestamp time index, vin string, val int);
+
+Affected Rows: 0
+
+create table t_6 (ts timestamp time index, vin string, val int);
+
+Affected Rows: 0
+
+create table t_7 (ts timestamp time index, vin string, val int);
+
+Affected Rows: 0
+
+create table t_8 (ts timestamp time index, vin string, val int);
+
+Affected Rows: 0
+
+create table t_9 (ts timestamp time index, vin string, val int);
+
+Affected Rows: 0
+
+create table t_10 (ts timestamp time index, vin string, val int);
+
+Affected Rows: 0
+
+-- SQLNESS REPLACE (-+) -
+-- SQLNESS REPLACE (\s\s+) _
+-- SQLNESS REPLACE (RoundRobinBatch.*) REDACTED
+-- SQLNESS REPLACE (Hash.*) REDACTED
+-- SQLNESS REPLACE (peer-.*) REDACTED
+explain
+select *
+from
+ t_1
+ INNER JOIN t_2 ON t_2.ts = t_1.ts
+ AND t_2.vin = t_1.vin
+ INNER JOIN t_3 ON t_3.ts = t_2.ts
+ AND t_3.vin = t_2.vin
+ INNER JOIN t_4 ON t_4.ts = t_3.ts
+ AND t_4.vin = t_3.vin
+ INNER JOIN t_5 ON t_5.ts = t_4.ts
+ AND t_5.vin = t_4.vin
+ INNER JOIN t_6 ON t_6.ts = t_5.ts
+ AND t_6.vin = t_5.vin
+ INNER JOIN t_7 ON t_7.ts = t_6.ts
+ AND t_7.vin = t_6.vin
+ INNER JOIN t_8 ON t_8.ts = t_7.ts
+ AND t_8.vin = t_7.vin
+ INNER JOIN t_9 ON t_9.ts = t_8.ts
+ AND t_9.vin = t_8.vin
+ INNER JOIN t_10 ON t_10.ts = t_9.ts
+ AND t_10.vin = t_9.vin
+where
+ t_1.vin is not null
+order by t_1.ts desc
+limit 1;
+
++-+-+
+| plan_type_| plan_|
++-+-+
+| logical_plan_| Limit: skip=0, fetch=1_|
+|_|_Sort: t_1.ts DESC NULLS FIRST, fetch=1_|
+|_|_Inner Join: t_9.ts = t_10.ts, t_9.vin = t_10.vin_|
+|_|_Inner Join: t_8.ts = t_9.ts, t_8.vin = t_9.vin_|
+|_|_Inner Join: t_7.ts = t_8.ts, t_7.vin = t_8.vin_|
+|_|_Inner Join: t_6.ts = t_7.ts, t_6.vin = t_7.vin_|
+|_|_Inner Join: t_5.ts = t_6.ts, t_5.vin = t_6.vin_|
+|_|_Inner Join: t_4.ts = t_5.ts, t_4.vin = t_5.vin_|
+|_|_Inner Join: t_3.ts = t_4.ts, t_3.vin = t_4.vin_|
+|_|_Inner Join: t_2.ts = t_3.ts, t_2.vin = t_3.vin_|
+|_|_Inner Join: t_1.ts = t_2.ts, t_1.vin = t_2.vin_|
+|_|_Filter: t_1.vin IS NOT NULL_|
+|_|_MergeScan [is_placeholder=false]_|
+|_|_Filter: t_2.vin IS NOT NULL_|
+|_|_MergeScan [is_placeholder=false]_|
+|_|_MergeScan [is_placeholder=false]_|
+|_|_MergeScan [is_placeholder=false]_|
+|_|_MergeScan [is_placeholder=false]_|
+|_|_MergeScan [is_placeholder=false]_|
+|_|_MergeScan [is_placeholder=false]_|
+|_|_MergeScan [is_placeholder=false]_|
+|_|_MergeScan [is_placeholder=false]_|
+|_|_MergeScan [is_placeholder=false]_|
+| physical_plan | GlobalLimitExec: skip=0, fetch=1_|
+|_|_SortPreservingMergeExec: [ts@0 DESC], fetch=1_|
+|_|_SortExec: fetch=1, expr=[ts@0 DESC]_|
+|_|_CoalesceBatchesExec: target_batch_size=8192_|
+|_|_REDACTED
+|_|_CoalesceBatchesExec: target_batch_size=8192_|
+|_|_REDACTED
+|_|_CoalesceBatchesExec: target_batch_size=8192_|
+|_|_REDACTED
+|_|_CoalesceBatchesExec: target_batch_size=8192_|
+|_|_REDACTED
+|_|_CoalesceBatchesExec: target_batch_size=8192_|
+|_|_REDACTED
+|_|_CoalesceBatchesExec: target_batch_size=8192_|
+|_|_REDACTED
+|_|_CoalesceBatchesExec: target_batch_size=8192_|
+|_|_REDACTED
+|_|_CoalesceBatchesExec: target_batch_size=8192_|
+|_|_REDACTED
+|_|_CoalesceBatchesExec: target_batch_size=8192_|
+|_|_REDACTED
+|_|_CoalesceBatchesExec: target_batch_size=8192_|
+|_|_RepartitionExec: partitioning=REDACTED
+|_|_CoalesceBatchesExec: target_batch_size=8192_|
+|_|_FilterExec: vin@1 IS NOT NULL_|
+|_|_RepartitionExec: partitioning=REDACTED
+|_|_MergeScanExec: peers=[REDACTED
+|_|_CoalesceBatchesExec: target_batch_size=8192_|
+|_|_RepartitionExec: partitioning=REDACTED
+|_|_CoalesceBatchesExec: target_batch_size=8192_|
+|_|_FilterExec: vin@1 IS NOT NULL_|
+|_|_RepartitionExec: partitioning=REDACTED
+|_|_MergeScanExec: peers=[REDACTED
+|_|_CoalesceBatchesExec: target_batch_size=8192_|
+|_|_RepartitionExec: partitioning=REDACTED
+|_|_RepartitionExec: partitioning=REDACTED
+|_|_MergeScanExec: peers=[REDACTED
+|_|_CoalesceBatchesExec: target_batch_size=8192_|
+|_|_RepartitionExec: partitioning=REDACTED
+|_|_RepartitionExec: partitioning=REDACTED
+|_|_MergeScanExec: peers=[REDACTED
+|_|_CoalesceBatchesExec: target_batch_size=8192_|
+|_|_RepartitionExec: partitioning=REDACTED
+|_|_RepartitionExec: partitioning=REDACTED
+|_|_MergeScanExec: peers=[REDACTED
+|_|_CoalesceBatchesExec: target_batch_size=8192_|
+|_|_RepartitionExec: partitioning=REDACTED
+|_|_RepartitionExec: partitioning=REDACTED
+|_|_MergeScanExec: peers=[REDACTED
+|_|_CoalesceBatchesExec: target_batch_size=8192_|
+|_|_RepartitionExec: partitioning=REDACTED
+|_|_RepartitionExec: partitioning=REDACTED
+|_|_MergeScanExec: peers=[REDACTED
+|_|_CoalesceBatchesExec: target_batch_size=8192_|
+|_|_RepartitionExec: partitioning=REDACTED
+|_|_RepartitionExec: partitioning=REDACTED
+|_|_MergeScanExec: peers=[REDACTED
+|_|_CoalesceBatchesExec: target_batch_size=8192_|
+|_|_RepartitionExec: partitioning=REDACTED
+|_|_RepartitionExec: partitioning=REDACTED
+|_|_MergeScanExec: peers=[REDACTED
+|_|_CoalesceBatchesExec: target_batch_size=8192_|
+|_|_RepartitionExec: partitioning=REDACTED
+|_|_RepartitionExec: partitioning=REDACTED
+|_|_MergeScanExec: peers=[REDACTED
+|_|_|
++-+-+
+
+drop table t_1;
+
+Affected Rows: 1
+
+drop table t_2;
+
+Affected Rows: 1
+
+drop table t_3;
+
+Affected Rows: 1
+
+drop table t_4;
+
+Affected Rows: 1
+
+drop table t_5;
+
+Affected Rows: 1
+
+drop table t_6;
+
+Affected Rows: 1
+
+drop table t_7;
+
+Affected Rows: 1
+
+drop table t_8;
+
+Affected Rows: 1
+
+drop table t_9;
+
+Affected Rows: 1
+
+drop table t_10;
+
+Affected Rows: 1
+
diff --git a/tests/cases/distributed/explain/join_10_tables.sql b/tests/cases/distributed/explain/join_10_tables.sql
new file mode 100644
index 000000000000..6f756ade6bb1
--- /dev/null
+++ b/tests/cases/distributed/explain/join_10_tables.sql
@@ -0,0 +1,53 @@
+create table t_1 (ts timestamp time index, vin string, val int);
+create table t_2 (ts timestamp time index, vin string, val int);
+create table t_3 (ts timestamp time index, vin string, val int);
+create table t_4 (ts timestamp time index, vin string, val int);
+create table t_5 (ts timestamp time index, vin string, val int);
+create table t_6 (ts timestamp time index, vin string, val int);
+create table t_7 (ts timestamp time index, vin string, val int);
+create table t_8 (ts timestamp time index, vin string, val int);
+create table t_9 (ts timestamp time index, vin string, val int);
+create table t_10 (ts timestamp time index, vin string, val int);
+
+-- SQLNESS REPLACE (-+) -
+-- SQLNESS REPLACE (\s\s+) _
+-- SQLNESS REPLACE (RoundRobinBatch.*) REDACTED
+-- SQLNESS REPLACE (Hash.*) REDACTED
+-- SQLNESS REPLACE (peer-.*) REDACTED
+explain
+select *
+from
+ t_1
+ INNER JOIN t_2 ON t_2.ts = t_1.ts
+ AND t_2.vin = t_1.vin
+ INNER JOIN t_3 ON t_3.ts = t_2.ts
+ AND t_3.vin = t_2.vin
+ INNER JOIN t_4 ON t_4.ts = t_3.ts
+ AND t_4.vin = t_3.vin
+ INNER JOIN t_5 ON t_5.ts = t_4.ts
+ AND t_5.vin = t_4.vin
+ INNER JOIN t_6 ON t_6.ts = t_5.ts
+ AND t_6.vin = t_5.vin
+ INNER JOIN t_7 ON t_7.ts = t_6.ts
+ AND t_7.vin = t_6.vin
+ INNER JOIN t_8 ON t_8.ts = t_7.ts
+ AND t_8.vin = t_7.vin
+ INNER JOIN t_9 ON t_9.ts = t_8.ts
+ AND t_9.vin = t_8.vin
+ INNER JOIN t_10 ON t_10.ts = t_9.ts
+ AND t_10.vin = t_9.vin
+where
+ t_1.vin is not null
+order by t_1.ts desc
+limit 1;
+
+drop table t_1;
+drop table t_2;
+drop table t_3;
+drop table t_4;
+drop table t_5;
+drop table t_6;
+drop table t_7;
+drop table t_8;
+drop table t_9;
+drop table t_10;
diff --git a/tests/cases/distributed/optimizer/filter_push_down.result b/tests/cases/distributed/optimizer/filter_push_down.result
index 40884864ef0c..94ab90c33ded 100644
--- a/tests/cases/distributed/optimizer/filter_push_down.result
+++ b/tests/cases/distributed/optimizer/filter_push_down.result
@@ -56,7 +56,22 @@ SELECT i1.i,i2.i FROM integers i1 LEFT OUTER JOIN integers i2 ON 1=1 WHERE i1.i>
SELECT i1.i,i2.i FROM integers i1 LEFT OUTER JOIN integers i2 ON 1=0 WHERE i2.i IS NOT NULL ORDER BY 2;
-Error: 1003(Internal), This feature is not implemented: Unsupported expression: IsNotNull(Column(Column { relation: Some(Full { catalog: "greptime", schema: "public", table: "integers" }), name: "i" }))
++---+---+
+| i | i |
++---+---+
+| 1 | 1 |
+| 2 | 1 |
+| 3 | 1 |
+| | 1 |
+| 1 | 2 |
+| 2 | 2 |
+| 3 | 2 |
+| | 2 |
+| 1 | 3 |
+| 2 | 3 |
+| 3 | 3 |
+| | 3 |
++---+---+
SELECT i1.i,i2.i FROM integers i1 LEFT OUTER JOIN integers i2 ON 1=0 WHERE i2.i>1 ORDER BY 2;
@@ -75,7 +90,22 @@ SELECT i1.i,i2.i FROM integers i1 LEFT OUTER JOIN integers i2 ON 1=0 WHERE i2.i>
SELECT i1.i,i2.i FROM integers i1 LEFT OUTER JOIN integers i2 ON 1=0 WHERE CASE WHEN i2.i IS NULL THEN False ELSE True END ORDER BY 2;
-Error: 1003(Internal), This feature is not implemented: Unsupported expression: IsNotNull(Column(Column { relation: Some(Full { catalog: "greptime", schema: "public", table: "integers" }), name: "i" }))
++---+---+
+| i | i |
++---+---+
+| 1 | 1 |
+| 2 | 1 |
+| 3 | 1 |
+| | 1 |
+| 1 | 2 |
+| 2 | 2 |
+| 3 | 2 |
+| | 2 |
+| 1 | 3 |
+| 2 | 3 |
+| 3 | 3 |
+| | 3 |
++---+---+
SELECT DISTINCT i1.i,i2.i FROM integers i1 LEFT OUTER JOIN integers i2 ON 1=0 WHERE i2.i IS NULL ORDER BY 1;
|
feat
|
rewrite the dist analyzer (#2238)
|
39e74dc87e5d8162f0b2deb2113b98168124be0d
|
2023-07-24 12:59:24
|
liyang
|
chore: rename tag github env (#2019)
| false
|
diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml
index eae7af690192..110b9a895dcb 100644
--- a/.github/workflows/release.yml
+++ b/.github/workflows/release.yml
@@ -133,7 +133,7 @@ jobs:
run: |
buildTime=`date "+%Y%m%d"`
SCHEDULED_BUILD_VERSION=${{ env.SCHEDULED_BUILD_VERSION_PREFIX }}-$buildTime-${{ env.SCHEDULED_PERIOD }}
- echo "IMAGE_TAG=${SCHEDULED_BUILD_VERSION:1}" >> $GITHUB_ENV
+ echo "TAG=${SCHEDULED_BUILD_VERSION:1}" >> $GITHUB_ENV
- name: Configure tag
shell: bash
@@ -317,7 +317,7 @@ jobs:
run: |
buildTime=`date "+%Y%m%d"`
SCHEDULED_BUILD_VERSION=${{ env.SCHEDULED_BUILD_VERSION_PREFIX }}-$buildTime-${{ env.SCHEDULED_PERIOD }}
- echo "IMAGE_TAG=${SCHEDULED_BUILD_VERSION:1}" >> $GITHUB_ENV
+ echo "TAG=${SCHEDULED_BUILD_VERSION:1}" >> $GITHUB_ENV
- name: Configure tag
shell: bash
|
chore
|
rename tag github env (#2019)
|
656050722c889a3353bb5b30ebf85d6c2a7c319f
|
2024-07-31 13:11:49
|
dennis zhuang
|
fix: overflow when parsing default value with negative numbers (#4459)
| false
|
diff --git a/src/sql/src/statements.rs b/src/sql/src/statements.rs
index 196e3b9c9863..7568fcb24f2a 100644
--- a/src/sql/src/statements.rs
+++ b/src/sql/src/statements.rs
@@ -371,7 +371,20 @@ fn parse_column_default_constraint(
// Always use lowercase for function expression
ColumnDefaultConstraint::Function(func.to_lowercase())
}
+
ColumnOption::Default(Expr::UnaryOp { op, expr }) => {
+ // Specialized process for handling numerical inputs to prevent
+ // overflow errors during the parsing of negative numbers,
+ // See https://github.com/GreptimeTeam/greptimedb/issues/4351
+ if let (UnaryOperator::Minus, Expr::Value(SqlValue::Number(n, _))) =
+ (op, expr.as_ref())
+ {
+ return Ok(Some(ColumnDefaultConstraint::Value(sql_number_to_value(
+ data_type,
+ &format!("-{n}"),
+ )?)));
+ }
+
if let Expr::Value(v) = &**expr {
let value = sql_value_to_value(column_name, data_type, v, timezone, Some(*op))?;
ColumnDefaultConstraint::Value(value)
@@ -1050,6 +1063,28 @@ mod tests {
constraint,
Some(ColumnDefaultConstraint::Value(Value::Boolean(true)))
);
+
+ // Test negative number
+ let opts = vec![ColumnOptionDef {
+ name: None,
+ option: ColumnOption::Default(Expr::UnaryOp {
+ op: UnaryOperator::Minus,
+ expr: Box::new(Expr::Value(SqlValue::Number("32768".to_string(), false))),
+ }),
+ }];
+
+ let constraint = parse_column_default_constraint(
+ "coll",
+ &ConcreteDataType::int16_datatype(),
+ &opts,
+ None,
+ )
+ .unwrap();
+
+ assert_matches!(
+ constraint,
+ Some(ColumnDefaultConstraint::Value(Value::Int16(-32768)))
+ );
}
#[test]
diff --git a/tests/cases/standalone/common/create/create.result b/tests/cases/standalone/common/create/create.result
index 237873dee110..86a89483e82a 100644
--- a/tests/cases/standalone/common/create/create.result
+++ b/tests/cases/standalone/common/create/create.result
@@ -54,6 +54,23 @@ CREATE TABLE 'N.~' (i TIMESTAMP TIME INDEX);
Error: 1004(InvalidArguments), Invalid table name: N.~
+CREATE TABLE neg_default_value_min(i TIMESTAMP TIME INDEX, j SMALLINT DEFAULT -32768);
+
+Affected Rows: 0
+
+DESC TABLE neg_default_value_min;
+
++--------+----------------------+-----+------+---------+---------------+
+| Column | Type | Key | Null | Default | Semantic Type |
++--------+----------------------+-----+------+---------+---------------+
+| i | TimestampMillisecond | PRI | NO | | TIMESTAMP |
+| j | Int16 | | YES | -32768 | FIELD |
++--------+----------------------+-----+------+---------+---------------+
+
+DROP TABLE neg_default_value_min;
+
+Affected Rows: 0
+
DESC TABLE integers;
+--------+----------------------+-----+------+---------+---------------+
diff --git a/tests/cases/standalone/common/create/create.sql b/tests/cases/standalone/common/create/create.sql
index 8aa1f48606e7..f5ae083318b2 100644
--- a/tests/cases/standalone/common/create/create.sql
+++ b/tests/cases/standalone/common/create/create.sql
@@ -26,6 +26,12 @@ CREATE TABLE test2 (i INTEGER, j TIMESTAMP TIME INDEX);
CREATE TABLE 'N.~' (i TIMESTAMP TIME INDEX);
+CREATE TABLE neg_default_value_min(i TIMESTAMP TIME INDEX, j SMALLINT DEFAULT -32768);
+
+DESC TABLE neg_default_value_min;
+
+DROP TABLE neg_default_value_min;
+
DESC TABLE integers;
DESC TABLE test1;
|
fix
|
overflow when parsing default value with negative numbers (#4459)
|
f2cc912c87a6199e42fd6b11c3cab88562105924
|
2023-04-21 11:10:58
|
Weny Xu
|
feat: implement ParquetFileReaderFactory (#1423)
| false
|
diff --git a/src/common/datasource/src/file_format/parquet.rs b/src/common/datasource/src/file_format/parquet.rs
index 8847018608d6..1c79f4aca6c9 100644
--- a/src/common/datasource/src/file_format/parquet.rs
+++ b/src/common/datasource/src/file_format/parquet.rs
@@ -12,11 +12,20 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+use std::result;
+use std::sync::Arc;
+
use arrow_schema::Schema;
use async_trait::async_trait;
+use datafusion::error::Result as DatafusionResult;
use datafusion::parquet::arrow::async_reader::AsyncFileReader;
use datafusion::parquet::arrow::parquet_to_arrow_schema;
-use object_store::ObjectStore;
+use datafusion::parquet::errors::{ParquetError, Result as ParquetResult};
+use datafusion::parquet::file::metadata::ParquetMetaData;
+use datafusion::physical_plan::file_format::{FileMeta, ParquetFileReaderFactory};
+use datafusion::physical_plan::metrics::ExecutionPlanMetricsSet;
+use futures::future::BoxFuture;
+use object_store::{ObjectStore, Reader};
use snafu::ResultExt;
use crate::error::{self, Result};
@@ -49,6 +58,87 @@ impl FileFormat for ParquetFormat {
}
}
+#[derive(Debug, Clone)]
+pub struct DefaultParquetFileReaderFactory {
+ object_store: ObjectStore,
+}
+
+/// Returns a AsyncFileReader factory
+impl DefaultParquetFileReaderFactory {
+ pub fn new(object_store: ObjectStore) -> Self {
+ Self { object_store }
+ }
+}
+
+impl ParquetFileReaderFactory for DefaultParquetFileReaderFactory {
+ // TODO(weny): Supports [`metadata_size_hint`].
+ // The upstream has a implementation supports [`metadata_size_hint`],
+ // however it coupled with Box<dyn ObjectStore>.
+ fn create_reader(
+ &self,
+ _partition_index: usize,
+ file_meta: FileMeta,
+ _metadata_size_hint: Option<usize>,
+ _metrics: &ExecutionPlanMetricsSet,
+ ) -> DatafusionResult<Box<dyn AsyncFileReader + Send>> {
+ let path = file_meta.location().to_string();
+ let object_store = self.object_store.clone();
+
+ Ok(Box::new(LazyParquetFileReader::new(object_store, path)))
+ }
+}
+
+pub struct LazyParquetFileReader {
+ object_store: ObjectStore,
+ reader: Option<Reader>,
+ path: String,
+}
+
+impl LazyParquetFileReader {
+ pub fn new(object_store: ObjectStore, path: String) -> Self {
+ LazyParquetFileReader {
+ object_store,
+ path,
+ reader: None,
+ }
+ }
+
+ /// Must initialize the reader, or throw an error from the future.
+ async fn maybe_initialize(&mut self) -> result::Result<(), object_store::Error> {
+ if self.reader.is_none() {
+ let reader = self.object_store.reader(&self.path).await?;
+ self.reader = Some(reader);
+ }
+
+ Ok(())
+ }
+}
+
+impl AsyncFileReader for LazyParquetFileReader {
+ fn get_bytes(
+ &mut self,
+ range: std::ops::Range<usize>,
+ ) -> BoxFuture<'_, ParquetResult<bytes::Bytes>> {
+ Box::pin(async move {
+ self.maybe_initialize()
+ .await
+ .map_err(|e| ParquetError::External(Box::new(e)))?;
+ // Safety: Must initialized
+ self.reader.as_mut().unwrap().get_bytes(range).await
+ })
+ }
+
+ fn get_metadata(&mut self) -> BoxFuture<'_, ParquetResult<Arc<ParquetMetaData>>> {
+ Box::pin(async move {
+ self.maybe_initialize()
+ .await
+ .map_err(|e| ParquetError::External(Box::new(e)))?;
+ // Safety: Must initialized
+ self.reader.as_mut().unwrap().get_metadata().await
+ })
+ }
+}
+
#[cfg(test)]
mod tests {
use super::*;
diff --git a/src/common/datasource/src/file_format/tests.rs b/src/common/datasource/src/file_format/tests.rs
index 7df3ffd21644..36036c73d387 100644
--- a/src/common/datasource/src/file_format/tests.rs
+++ b/src/common/datasource/src/file_format/tests.rs
@@ -12,19 +12,24 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+use std::sync::Arc;
use std::vec;
use arrow_schema::SchemaRef;
use datafusion::assert_batches_eq;
use datafusion::datasource::listing::PartitionedFile;
use datafusion::datasource::object_store::ObjectStoreUrl;
-use datafusion::physical_plan::file_format::{FileOpener, FileScanConfig, FileStream};
+use datafusion::execution::context::TaskContext;
+use datafusion::physical_plan::file_format::{FileOpener, FileScanConfig, FileStream, ParquetExec};
use datafusion::physical_plan::metrics::ExecutionPlanMetricsSet;
+use datafusion::physical_plan::ExecutionPlan;
+use datafusion::prelude::SessionContext;
use futures::StreamExt;
use crate::compression::CompressionType;
use crate::file_format::csv::{CsvConfigBuilder, CsvOpener};
use crate::file_format::json::JsonOpener;
+use crate::file_format::parquet::DefaultParquetFileReaderFactory;
use crate::test_util::{self, test_basic_schema, test_store};
fn scan_config(file_schema: SchemaRef, limit: Option<usize>, filename: &str) -> FileScanConfig {
@@ -159,3 +164,43 @@ async fn test_csv_opener() {
test.run().await;
}
}
+
+#[tokio::test(flavor = "multi_thread")]
+async fn test_parquet_exec() {
+ let store = test_store("/");
+
+ let schema = test_basic_schema();
+
+ let path = &test_util::get_data_dir("tests/parquet/basic.parquet")
+ .display()
+ .to_string();
+ let base_config = scan_config(schema.clone(), None, path);
+
+ let exec = ParquetExec::new(base_config, None, None)
+ .with_parquet_file_reader_factory(Arc::new(DefaultParquetFileReaderFactory::new(store)));
+
+ let ctx = SessionContext::new();
+
+ let context = Arc::new(TaskContext::from(&ctx));
+
+ // The stream batch size can be set by ctx.session_config.batch_size
+ let result = exec
+ .execute(0, context)
+ .unwrap()
+ .map(|b| b.unwrap())
+ .collect::<Vec<_>>()
+ .await;
+
+ assert_batches_eq!(
+ vec![
+ "+-----+-------+",
+ "| num | str |",
+ "+-----+-------+",
+ "| 5 | test |",
+ "| 2 | hello |",
+ "| 4 | foo |",
+ "+-----+-------+",
+ ],
+ &result
+ );
+}
diff --git a/src/object-store/src/lib.rs b/src/object-store/src/lib.rs
index 5e74aa0cec69..0e8d9f91631a 100644
--- a/src/object-store/src/lib.rs
+++ b/src/object-store/src/lib.rs
@@ -16,7 +16,7 @@ pub use opendal::raw::normalize_path as raw_normalize_path;
pub use opendal::raw::oio::Pager;
pub use opendal::{
layers, services, Builder as ObjectStoreBuilder, Entry, EntryMode, Error, ErrorKind, Metakey,
- Operator as ObjectStore, Result, Writer,
+ Operator as ObjectStore, Reader, Result, Writer,
};
pub mod cache_policy;
|
feat
|
implement ParquetFileReaderFactory (#1423)
|
51b23664f762fb96ce6044475f4d304d5116e162
|
2023-05-30 06:29:14
|
LFC
|
feat: update table metadata in lock (#1634)
| false
|
diff --git a/src/meta-srv/src/lock.rs b/src/meta-srv/src/lock.rs
index 9923ed894502..b4d0a3eafe30 100644
--- a/src/meta-srv/src/lock.rs
+++ b/src/meta-srv/src/lock.rs
@@ -13,6 +13,8 @@
// limitations under the License.
pub mod etcd;
+pub(crate) mod keys;
+pub(crate) mod memory;
use std::sync::Arc;
@@ -28,6 +30,14 @@ pub struct Opts {
pub expire_secs: Option<u64>,
}
+impl Default for Opts {
+ fn default() -> Self {
+ Opts {
+ expire_secs: Some(DEFAULT_EXPIRE_TIME_SECS),
+ }
+ }
+}
+
#[async_trait::async_trait]
pub trait DistLock: Send + Sync {
// Lock acquires a distributed shared lock on a given named lock. On success, it
diff --git a/src/meta-srv/src/lock/keys.rs b/src/meta-srv/src/lock/keys.rs
new file mode 100644
index 000000000000..b1b87244bffd
--- /dev/null
+++ b/src/meta-srv/src/lock/keys.rs
@@ -0,0 +1,28 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! All keys used for distributed locking in the Metasrv.
+//! Place them in this unified module for better maintenance.
+
+use common_meta::RegionIdent;
+
+use crate::lock::Key;
+
+pub(crate) fn table_metadata_lock_key(region: &RegionIdent) -> Key {
+ format!(
+ "table_metadata_lock_({}-{}.{}.{}-{})",
+ region.cluster_id, region.catalog, region.schema, region.table, region.table_id,
+ )
+ .into_bytes()
+}
diff --git a/src/meta-srv/src/lock/memory.rs b/src/meta-srv/src/lock/memory.rs
new file mode 100644
index 000000000000..72637700f144
--- /dev/null
+++ b/src/meta-srv/src/lock/memory.rs
@@ -0,0 +1,112 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::sync::Arc;
+
+use async_trait::async_trait;
+use dashmap::DashMap;
+use tokio::sync::{Mutex, OwnedMutexGuard};
+
+use crate::error::Result;
+use crate::lock::{DistLock, Key, Opts};
+
+#[derive(Default)]
+pub(crate) struct MemLock {
+ mutexes: DashMap<Key, Arc<Mutex<()>>>,
+ guards: DashMap<Key, OwnedMutexGuard<()>>,
+}
+
+#[async_trait]
+impl DistLock for MemLock {
+ async fn lock(&self, key: Vec<u8>, _opts: Opts) -> Result<Key> {
+ let mutex = self
+ .mutexes
+ .entry(key.clone())
+ .or_insert_with(|| Arc::new(Mutex::new(())))
+ .clone();
+
+ let guard = mutex.lock_owned().await;
+
+ self.guards.insert(key.clone(), guard);
+ Ok(key)
+ }
+
+ async fn unlock(&self, key: Vec<u8>) -> Result<()> {
+ // drop the guard, so that the mutex can be unlocked,
+ // effectively make the `mutex.lock_owned` in `lock` method to proceed
+ self.guards.remove(&key);
+ Ok(())
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use std::collections::HashMap;
+ use std::sync::atomic::{AtomicU32, Ordering};
+
+ use rand::seq::SliceRandom;
+
+ use super::*;
+
+ #[tokio::test(flavor = "multi_thread")]
+ async fn test_mem_lock_concurrently() {
+ let lock = Arc::new(MemLock::default());
+
+ let keys = (0..10)
+ .map(|i| format!("my-lock-{i}").into_bytes())
+ .collect::<Vec<Key>>();
+ let counters: [(Key, AtomicU32); 10] = keys
+ .iter()
+ .map(|x| (x.clone(), AtomicU32::new(0)))
+ .collect::<Vec<_>>()
+ .try_into()
+ .unwrap();
+ let counters = Arc::new(HashMap::from(counters));
+
+ let tasks = (0..100)
+ .map(|_| {
+ let mut keys = keys.clone();
+ keys.shuffle(&mut rand::thread_rng());
+
+ let lock_clone = lock.clone();
+ let counters_clone = counters.clone();
+ tokio::spawn(async move {
+ // every key counter will be added by 1 for 10 times
+ for i in 0..100 {
+ let key = &keys[i % keys.len()];
+ lock_clone
+ .lock(key.clone(), Opts { expire_secs: None })
+ .await
+ .unwrap();
+
+ // Intentionally create a critical section:
+ // if our MemLock is flawed, the resulting counter is wrong.
+ //
+ // Note that AtomicU32 is only used to enable the updates from multiple tasks,
+ // does not make any guarantee about the correctness of the result.
+
+ let counter = counters_clone.get(key).unwrap();
+ let v = counter.load(Ordering::Relaxed);
+ counter.store(v + 1, Ordering::Relaxed);
+
+ lock_clone.unlock(key.clone()).await.unwrap();
+ }
+ })
+ })
+ .collect::<Vec<_>>();
+ futures::future::join_all(tasks).await;
+
+ assert!(counters.values().all(|x| x.load(Ordering::Relaxed) == 1000));
+ }
+}
diff --git a/src/meta-srv/src/metasrv.rs b/src/meta-srv/src/metasrv.rs
index 66635b27f8a9..64e4117aa6e2 100644
--- a/src/meta-srv/src/metasrv.rs
+++ b/src/meta-srv/src/metasrv.rs
@@ -120,7 +120,7 @@ pub struct MetaSrv {
handler_group: HeartbeatHandlerGroup,
election: Option<ElectionRef>,
meta_peer_client: Option<MetaPeerClient>,
- lock: Option<DistLockRef>,
+ lock: DistLockRef,
procedure_manager: ProcedureManagerRef,
metadata_service: MetadataServiceRef,
mailbox: MailboxRef,
@@ -244,8 +244,8 @@ impl MetaSrv {
}
#[inline]
- pub fn lock(&self) -> Option<DistLockRef> {
- self.lock.clone()
+ pub fn lock(&self) -> &DistLockRef {
+ &self.lock
}
#[inline]
diff --git a/src/meta-srv/src/metasrv/builder.rs b/src/meta-srv/src/metasrv/builder.rs
index ac41100545cf..5b89dca3e33d 100644
--- a/src/meta-srv/src/metasrv/builder.rs
+++ b/src/meta-srv/src/metasrv/builder.rs
@@ -25,6 +25,7 @@ use crate::handler::{
KeepLeaseHandler, OnLeaderStartHandler, PersistStatsHandler, Pushers, RegionFailureHandler,
ResponseHeaderHandler,
};
+use crate::lock::memory::MemLock;
use crate::lock::DistLockRef;
use crate::metadata_service::{DefaultMetadataService, MetadataServiceRef};
use crate::metasrv::{
@@ -140,6 +141,8 @@ impl MetaSrvBuilder {
let state_store = Arc::new(MetaStateStore::new(kv_store.clone()));
let procedure_manager = Arc::new(LocalManager::new(ManagerConfig::default(), state_store));
+ let lock = lock.unwrap_or_else(|| Arc::new(MemLock::default()));
+
let handler_group = match handler_group {
Some(handler_group) => handler_group,
None => {
@@ -154,6 +157,7 @@ impl MetaSrvBuilder {
catalog: None,
schema: None,
},
+ lock.clone(),
));
let region_failure_handler =
diff --git a/src/meta-srv/src/procedure/region_failover.rs b/src/meta-srv/src/procedure/region_failover.rs
index 49fb17c9ecda..167ba1c9b3dd 100644
--- a/src/meta-srv/src/procedure/region_failover.rs
+++ b/src/meta-srv/src/procedure/region_failover.rs
@@ -38,6 +38,7 @@ use serde::{Deserialize, Serialize};
use snafu::ResultExt;
use crate::error::{Error, RegisterProcedureLoaderSnafu, Result};
+use crate::lock::DistLockRef;
use crate::metasrv::{SelectorContext, SelectorRef};
use crate::service::mailbox::MailboxRef;
@@ -49,6 +50,7 @@ pub(crate) struct RegionFailoverManager {
procedure_manager: ProcedureManagerRef,
selector: SelectorRef,
selector_ctx: SelectorContext,
+ dist_lock: DistLockRef,
running_procedures: Arc<Mutex<HashSet<RegionIdent>>>,
}
@@ -72,33 +74,35 @@ impl RegionFailoverManager {
procedure_manager: ProcedureManagerRef,
selector: SelectorRef,
selector_ctx: SelectorContext,
+ dist_lock: DistLockRef,
) -> Self {
Self {
mailbox,
procedure_manager,
selector,
selector_ctx,
+ dist_lock,
running_procedures: Arc::new(Mutex::new(HashSet::new())),
}
}
+ fn create_context(&self) -> RegionFailoverContext {
+ RegionFailoverContext {
+ mailbox: self.mailbox.clone(),
+ selector: self.selector.clone(),
+ selector_ctx: self.selector_ctx.clone(),
+ dist_lock: self.dist_lock.clone(),
+ }
+ }
+
pub(crate) fn try_start(&self) -> Result<()> {
- let mailbox = self.mailbox.clone();
- let selector = self.selector.clone();
- let selector_ctx = self.selector_ctx.clone();
+ let context = self.create_context();
self.procedure_manager
.register_loader(
RegionFailoverProcedure::TYPE_NAME,
Box::new(move |json| {
- RegionFailoverProcedure::from_json(
- json,
- RegionFailoverContext {
- mailbox: mailbox.clone(),
- selector: selector.clone(),
- selector_ctx: selector_ctx.clone(),
- },
- )
- .map(|p| Box::new(p) as _)
+ let context = context.clone();
+ RegionFailoverProcedure::from_json(json, context).map(|p| Box::new(p) as _)
}),
)
.context(RegisterProcedureLoaderSnafu {
@@ -120,14 +124,8 @@ impl RegionFailoverManager {
return;
}
- let procedure = RegionFailoverProcedure::new(
- failed_region.clone(),
- RegionFailoverContext {
- mailbox: self.mailbox.clone(),
- selector: self.selector.clone(),
- selector_ctx: self.selector_ctx.clone(),
- },
- );
+ let context = self.create_context();
+ let procedure = RegionFailoverProcedure::new(failed_region.clone(), context);
let procedure_with_id = ProcedureWithId::with_random_id(Box::new(procedure));
let procedure_id = procedure_with_id.id;
info!("Starting region failover procedure {procedure_id} for region {failed_region:?}");
@@ -172,6 +170,7 @@ pub struct RegionFailoverContext {
pub mailbox: MailboxRef,
pub selector: SelectorRef,
pub selector_ctx: SelectorContext,
+ pub dist_lock: DistLockRef,
}
/// The state machine of region failover procedure. Driven by the call to `next`.
@@ -316,6 +315,7 @@ mod tests {
use super::*;
use crate::handler::{HeartbeatMailbox, Pusher, Pushers};
+ use crate::lock::memory::MemLock;
use crate::selector::{Namespace, Selector};
use crate::sequence::Sequence;
use crate::service::mailbox::Channel;
@@ -356,21 +356,54 @@ mod tests {
pub struct TestingEnv {
pub context: RegionFailoverContext,
- pub failed_region: RegionIdent,
pub heartbeat_receivers: HashMap<DatanodeId, Receiver<tonic::Result<HeartbeatResponse>>>,
}
+ impl TestingEnv {
+ pub async fn failed_region(&self, region_number: u32) -> RegionIdent {
+ let table = "my_table";
+ let key = TableGlobalKey {
+ catalog_name: DEFAULT_CATALOG_NAME.to_string(),
+ schema_name: DEFAULT_SCHEMA_NAME.to_string(),
+ table_name: table.to_string(),
+ };
+ let value =
+ table_routes::get_table_global_value(&self.context.selector_ctx.kv_store, &key)
+ .await
+ .unwrap()
+ .unwrap();
+
+ let failed_datanode = value
+ .regions_id_map
+ .iter()
+ .find_map(|(&datanode_id, regions)| {
+ if regions.contains(®ion_number) {
+ Some(datanode_id)
+ } else {
+ None
+ }
+ })
+ .unwrap();
+ RegionIdent {
+ cluster_id: 0,
+ datanode_id: failed_datanode,
+ table_id: 1,
+ engine: MITO_ENGINE.to_string(),
+ region_number,
+ catalog: DEFAULT_CATALOG_NAME.to_string(),
+ schema: DEFAULT_SCHEMA_NAME.to_string(),
+ table: table.to_string(),
+ }
+ }
+ }
+
pub struct TestingEnvBuilder {
selector: Option<SelectorRef>,
- failed_region: Option<u32>,
}
impl TestingEnvBuilder {
pub fn new() -> Self {
- Self {
- selector: None,
- failed_region: None,
- }
+ Self { selector: None }
}
#[allow(unused)]
@@ -379,11 +412,6 @@ mod tests {
self
}
- pub fn with_failed_region(mut self, failed_region: u32) -> Self {
- self.failed_region = Some(failed_region);
- self
- }
-
pub async fn build(self) -> TestingEnv {
let kv_store = Arc::new(MemStore::new()) as _;
@@ -409,29 +437,6 @@ mod tests {
Sequence::new("test_heartbeat_mailbox", 0, 100, kv_store.clone());
let mailbox = HeartbeatMailbox::create(pushers, mailbox_sequence);
- let failed_region = self.failed_region.unwrap_or(1);
- let failed_datanode = table_global_value
- .regions_id_map
- .iter()
- .find_map(|(datanode_id, regions)| {
- if regions.contains(&failed_region) {
- Some(*datanode_id)
- } else {
- None
- }
- })
- .unwrap();
- let failed_region = RegionIdent {
- cluster_id: 0,
- datanode_id: failed_datanode,
- table_id: 1,
- engine: MITO_ENGINE.to_string(),
- region_number: failed_region,
- catalog: DEFAULT_CATALOG_NAME.to_string(),
- schema: DEFAULT_SCHEMA_NAME.to_string(),
- table: table.to_string(),
- };
-
let selector = self.selector.unwrap_or_else(|| {
let nodes = (1..=table_global_value.regions_id_map.len())
.map(|id| Peer {
@@ -454,8 +459,8 @@ mod tests {
mailbox,
selector,
selector_ctx,
+ dist_lock: Arc::new(MemLock::default()),
},
- failed_region,
heartbeat_receivers,
}
}
@@ -465,21 +470,19 @@ mod tests {
async fn test_region_failover_procedure() {
common_telemetry::init_default_ut_logging();
- let TestingEnv {
- context,
- failed_region,
- mut heartbeat_receivers,
- } = TestingEnvBuilder::new().build().await;
+ let mut env = TestingEnvBuilder::new().build().await;
+ let failed_region = env.failed_region(1).await;
let mut procedure = Box::new(RegionFailoverProcedure::new(
failed_region.clone(),
- context.clone(),
+ env.context.clone(),
)) as BoxedProcedure;
- let mut failed_datanode = heartbeat_receivers
+ let mut failed_datanode = env
+ .heartbeat_receivers
.remove(&failed_region.datanode_id)
.unwrap();
- let mailbox_clone = context.mailbox.clone();
+ let mailbox_clone = env.context.mailbox.clone();
let failed_region_clone = failed_region.clone();
common_runtime::spawn_bg(async move {
let resp = failed_datanode.recv().await.unwrap().unwrap();
@@ -516,8 +519,8 @@ mod tests {
});
let (candidate_tx, mut candidate_rx) = tokio::sync::mpsc::channel(1);
- for (datanode_id, mut recv) in heartbeat_receivers.into_iter() {
- let mailbox_clone = context.mailbox.clone();
+ for (datanode_id, mut recv) in env.heartbeat_receivers.into_iter() {
+ let mailbox_clone = env.context.mailbox.clone();
let failed_region_clone = failed_region.clone();
let candidate_tx = candidate_tx.clone();
common_runtime::spawn_bg(async move {
@@ -575,7 +578,7 @@ mod tests {
schema_name: failed_region.schema.clone(),
table_name: failed_region.table.clone(),
};
- let value = table_routes::get_table_global_value(&context.selector_ctx.kv_store, &key)
+ let value = table_routes::get_table_global_value(&env.context.selector_ctx.kv_store, &key)
.await
.unwrap()
.unwrap();
@@ -595,18 +598,18 @@ mod tests {
#[tokio::test]
async fn test_state_serde() {
- let TestingEnv {
- context,
- failed_region,
- heartbeat_receivers: _,
- } = TestingEnvBuilder::new().build().await;
+ let env = TestingEnvBuilder::new().build().await;
+ let failed_region = env.failed_region(1).await;
let state = RegionFailoverStart::new();
let node = Node {
failed_region,
state: Some(Box::new(state)),
};
- let procedure = RegionFailoverProcedure { node, context };
+ let procedure = RegionFailoverProcedure {
+ node,
+ context: env.context,
+ };
let s = procedure.dump().unwrap();
assert_eq!(
diff --git a/src/meta-srv/src/procedure/region_failover/activate_region.rs b/src/meta-srv/src/procedure/region_failover/activate_region.rs
index 761a174918f6..66b8bef58ae4 100644
--- a/src/meta-srv/src/procedure/region_failover/activate_region.rs
+++ b/src/meta-srv/src/procedure/region_failover/activate_region.rs
@@ -131,30 +131,27 @@ mod tests {
use api::v1::meta::mailbox_message::Payload;
use common_meta::instruction::SimpleReply;
- use super::super::tests::{TestingEnv, TestingEnvBuilder};
+ use super::super::tests::TestingEnvBuilder;
use super::*;
#[tokio::test]
async fn test_activate_region_success() {
common_telemetry::init_default_ut_logging();
- let TestingEnv {
- context,
- failed_region,
- mut heartbeat_receivers,
- } = TestingEnvBuilder::new().build().await;
+ let mut env = TestingEnvBuilder::new().build().await;
+ let failed_region = env.failed_region(1).await;
let candidate = 2;
let state = ActivateRegion::new(Peer::new(candidate, ""));
let mailbox_receiver = state
- .send_open_region_message(&context, &failed_region, Duration::from_millis(100))
+ .send_open_region_message(&env.context, &failed_region, Duration::from_millis(100))
.await
.unwrap();
let message_id = mailbox_receiver.message_id();
// verify that the open region message is sent
- let rx = heartbeat_receivers.get_mut(&candidate).unwrap();
+ let rx = env.heartbeat_receivers.get_mut(&candidate).unwrap();
let resp = rx.recv().await.unwrap().unwrap();
let received = &resp.mailbox_message.unwrap();
assert_eq!(received.id, message_id);
@@ -169,7 +166,7 @@ mod tests {
);
// simulating response from Datanode
- context
+ env.context
.mailbox
.on_recv(
message_id,
@@ -205,21 +202,18 @@ mod tests {
async fn test_activate_region_timeout() {
common_telemetry::init_default_ut_logging();
- let TestingEnv {
- context,
- failed_region,
- mut heartbeat_receivers,
- } = TestingEnvBuilder::new().build().await;
+ let mut env = TestingEnvBuilder::new().build().await;
+ let failed_region = env.failed_region(1).await;
let candidate = 2;
let state = ActivateRegion::new(Peer::new(candidate, ""));
let mailbox_receiver = state
- .send_open_region_message(&context, &failed_region, Duration::from_millis(100))
+ .send_open_region_message(&env.context, &failed_region, Duration::from_millis(100))
.await
.unwrap();
// verify that the open region message is sent
- let rx = heartbeat_receivers.get_mut(&candidate).unwrap();
+ let rx = env.heartbeat_receivers.get_mut(&candidate).unwrap();
let resp = rx.recv().await.unwrap().unwrap();
let received = &resp.mailbox_message.unwrap();
assert_eq!(received.id, mailbox_receiver.message_id());
diff --git a/src/meta-srv/src/procedure/region_failover/deactivate_region.rs b/src/meta-srv/src/procedure/region_failover/deactivate_region.rs
index 49b1095764d3..b24e188c05fa 100644
--- a/src/meta-srv/src/procedure/region_failover/deactivate_region.rs
+++ b/src/meta-srv/src/procedure/region_failover/deactivate_region.rs
@@ -136,29 +136,27 @@ mod tests {
use api::v1::meta::mailbox_message::Payload;
use common_meta::instruction::SimpleReply;
- use super::super::tests::{TestingEnv, TestingEnvBuilder};
+ use super::super::tests::TestingEnvBuilder;
use super::*;
#[tokio::test]
async fn test_deactivate_region_success() {
common_telemetry::init_default_ut_logging();
- let TestingEnv {
- context,
- failed_region,
- mut heartbeat_receivers,
- } = TestingEnvBuilder::new().build().await;
+ let mut env = TestingEnvBuilder::new().build().await;
+ let failed_region = env.failed_region(1).await;
let state = DeactivateRegion::new(Peer::new(2, ""));
let mailbox_receiver = state
- .send_close_region_message(&context, &failed_region, Duration::from_millis(100))
+ .send_close_region_message(&env.context, &failed_region, Duration::from_millis(100))
.await
.unwrap();
let message_id = mailbox_receiver.message_id();
// verify that the close region message is sent
- let rx = heartbeat_receivers
+ let rx = env
+ .heartbeat_receivers
.get_mut(&failed_region.datanode_id)
.unwrap();
let resp = rx.recv().await.unwrap().unwrap();
@@ -175,7 +173,7 @@ mod tests {
);
// simulating response from Datanode
- context
+ env.context
.mailbox
.on_recv(
message_id,
@@ -211,20 +209,18 @@ mod tests {
async fn test_deactivate_region_timeout() {
common_telemetry::init_default_ut_logging();
- let TestingEnv {
- context,
- failed_region,
- mut heartbeat_receivers,
- } = TestingEnvBuilder::new().build().await;
+ let mut env = TestingEnvBuilder::new().build().await;
+ let failed_region = env.failed_region(1).await;
let state = DeactivateRegion::new(Peer::new(2, ""));
let mailbox_receiver = state
- .send_close_region_message(&context, &failed_region, Duration::from_millis(100))
+ .send_close_region_message(&env.context, &failed_region, Duration::from_millis(100))
.await
.unwrap();
// verify that the open region message is sent
- let rx = heartbeat_receivers
+ let rx = env
+ .heartbeat_receivers
.get_mut(&failed_region.datanode_id)
.unwrap();
let resp = rx.recv().await.unwrap().unwrap();
diff --git a/src/meta-srv/src/procedure/region_failover/failover_start.rs b/src/meta-srv/src/procedure/region_failover/failover_start.rs
index f9ecde0e298d..ca2c00d05cef 100644
--- a/src/meta-srv/src/procedure/region_failover/failover_start.rs
+++ b/src/meta-srv/src/procedure/region_failover/failover_start.rs
@@ -101,30 +101,27 @@ impl State for RegionFailoverStart {
#[cfg(test)]
mod tests {
- use super::super::tests::{TestingEnv, TestingEnvBuilder};
+ use super::super::tests::TestingEnvBuilder;
use super::*;
#[tokio::test]
async fn test_choose_failover_candidate() {
common_telemetry::init_default_ut_logging();
- let TestingEnv {
- context,
- failed_region,
- heartbeat_receivers: _,
- } = TestingEnvBuilder::new().build().await;
+ let env = TestingEnvBuilder::new().build().await;
+ let failed_region = env.failed_region(1).await;
let mut state = RegionFailoverStart::new();
assert!(state.failover_candidate.is_none());
let candidate = state
- .choose_candidate(&context, &failed_region)
+ .choose_candidate(&env.context, &failed_region)
.await
.unwrap();
assert_ne!(candidate.id, failed_region.datanode_id);
let candidate_again = state
- .choose_candidate(&context, &failed_region)
+ .choose_candidate(&env.context, &failed_region)
.await
.unwrap();
assert_eq!(candidate, candidate_again);
diff --git a/src/meta-srv/src/procedure/region_failover/update_metadata.rs b/src/meta-srv/src/procedure/region_failover/update_metadata.rs
index 42dce8b427ad..731f7df18dc6 100644
--- a/src/meta-srv/src/procedure/region_failover/update_metadata.rs
+++ b/src/meta-srv/src/procedure/region_failover/update_metadata.rs
@@ -29,6 +29,8 @@ use crate::error::{
TableRouteConversionSnafu,
};
use crate::keys::TableRouteKey;
+use crate::lock::keys::table_metadata_lock_key;
+use crate::lock::Opts;
use crate::table_routes;
#[derive(Serialize, Deserialize, Debug)]
@@ -41,13 +43,29 @@ impl UpdateRegionMetadata {
Self { candidate }
}
- async fn update_meta(
+ // TODO(LFC): Update the two table metadata values in a batch atomically.
+ //
+ // Though the updating of the two metadata values is guarded by a distributed lock,
+ // it does not robust enough. For example, the lock lease could be expired in the middle of
+ // one's updating, letting others to start updating concurrently. For now, we set the lease of
+ // the distributed lock to 10 seconds, which is long enough here to get the job done.
+ //
+ // Maybe we should introduce "version" companion values to these two metadata values, and
+ // use ETCD transaction request to update them?
+
+ /// Updates the metadata of the table. Specifically, the [TableGlobalValue] and [TableRouteValue].
+ async fn update_metadata(
&self,
ctx: &RegionFailoverContext,
failed_region: &RegionIdent,
) -> Result<()> {
+ let key = table_metadata_lock_key(failed_region);
+ let key = ctx.dist_lock.lock(key, Opts::default()).await?;
+
self.update_table_global_value(ctx, failed_region).await?;
self.update_table_route(ctx, failed_region).await?;
+
+ ctx.dist_lock.unlock(key).await?;
Ok(())
}
@@ -183,15 +201,17 @@ impl State for UpdateRegionMetadata {
ctx: &RegionFailoverContext,
failed_region: &RegionIdent,
) -> Result<Box<dyn State>> {
- self.update_meta(ctx, failed_region).await.map_err(|e| {
- RetryLaterSnafu {
- reason: format!(
- "Failed to update metadata for failed region: {}, error: {}",
- failed_region, e
- ),
- }
- .build()
- })?;
+ self.update_metadata(ctx, failed_region)
+ .await
+ .map_err(|e| {
+ RetryLaterSnafu {
+ reason: format!(
+ "Failed to update metadata for failed region: {}, error: {}",
+ failed_region, e
+ ),
+ }
+ .build()
+ })?;
Ok(Box::new(RegionFailoverEnd))
}
}
@@ -209,12 +229,8 @@ mod tests {
async fn test_update_table_global_value() {
common_telemetry::init_default_ut_logging();
- async fn test(env: TestingEnv, candidate: u64) -> TableGlobalValue {
- let TestingEnv {
- context,
- failed_region,
- heartbeat_receivers: _,
- } = env;
+ async fn test(env: TestingEnv, failed_region: u32, candidate: u64) -> TableGlobalValue {
+ let failed_region = env.failed_region(failed_region).await;
let key = TableGlobalKey {
catalog_name: failed_region.catalog.clone(),
@@ -223,19 +239,19 @@ mod tests {
};
let original =
- table_routes::get_table_global_value(&context.selector_ctx.kv_store, &key)
+ table_routes::get_table_global_value(&env.context.selector_ctx.kv_store, &key)
.await
.unwrap()
.unwrap();
let state = UpdateRegionMetadata::new(Peer::new(candidate, ""));
state
- .update_table_global_value(&context, &failed_region)
+ .update_table_global_value(&env.context, &failed_region)
.await
.unwrap();
let updated =
- table_routes::get_table_global_value(&context.selector_ctx.kv_store, &key)
+ table_routes::get_table_global_value(&env.context.selector_ctx.kv_store, &key)
.await
.unwrap()
.unwrap();
@@ -253,8 +269,8 @@ mod tests {
// 3 => 4
// Testing failed region 1 moves to Datanode 2.
- let env = TestingEnvBuilder::new().with_failed_region(1).build().await;
- let updated = test(env, 2).await;
+ let env = TestingEnvBuilder::new().build().await;
+ let updated = test(env, 1, 2).await;
let new_region_id_map = updated.regions_id_map;
assert_eq!(new_region_id_map.len(), 3);
@@ -263,8 +279,8 @@ mod tests {
assert_eq!(new_region_id_map.get(&3), Some(&vec![4]));
// Testing failed region 3 moves to Datanode 3.
- let env = TestingEnvBuilder::new().with_failed_region(3).build().await;
- let updated = test(env, 3).await;
+ let env = TestingEnvBuilder::new().build().await;
+ let updated = test(env, 3, 3).await;
let new_region_id_map = updated.regions_id_map;
assert_eq!(new_region_id_map.len(), 2);
@@ -272,8 +288,8 @@ mod tests {
assert_eq!(new_region_id_map.get(&3), Some(&vec![4, 3]));
// Testing failed region 1 moves to a new Datanode, 4.
- let env = TestingEnvBuilder::new().with_failed_region(1).build().await;
- let updated = test(env, 4).await;
+ let env = TestingEnvBuilder::new().build().await;
+ let updated = test(env, 1, 4).await;
let new_region_id_map = updated.regions_id_map;
assert_eq!(new_region_id_map.len(), 4);
@@ -283,8 +299,8 @@ mod tests {
assert_eq!(new_region_id_map.get(&4), Some(&vec![1]));
// Testing failed region 3 moves to a new Datanode, 4.
- let env = TestingEnvBuilder::new().with_failed_region(3).build().await;
- let updated = test(env, 4).await;
+ let env = TestingEnvBuilder::new().build().await;
+ let updated = test(env, 3, 4).await;
let new_region_id_map = updated.regions_id_map;
assert_eq!(new_region_id_map.len(), 3);
@@ -297,16 +313,12 @@ mod tests {
async fn test_update_table_route() {
common_telemetry::init_default_ut_logging();
- async fn test(env: TestingEnv, candidate: u64) -> TableRouteValue {
- let TestingEnv {
- context,
- failed_region,
- heartbeat_receivers: _,
- } = env;
+ async fn test(env: TestingEnv, failed_region: u32, candidate: u64) -> TableRouteValue {
+ let failed_region = env.failed_region(failed_region).await;
let state = UpdateRegionMetadata::new(Peer::new(candidate, ""));
state
- .update_table_route(&context, &failed_region)
+ .update_table_route(&env.context, &failed_region)
.await
.unwrap();
@@ -316,7 +328,7 @@ mod tests {
schema_name: &failed_region.schema,
table_name: &failed_region.table,
};
- table_routes::get_table_route_value(&context.selector_ctx.kv_store, &key)
+ table_routes::get_table_route_value(&env.context.selector_ctx.kv_store, &key)
.await
.unwrap()
}
@@ -329,8 +341,8 @@ mod tests {
// 4 => 3
// Testing failed region 1 moves to Datanode 2.
- let env = TestingEnvBuilder::new().with_failed_region(1).build().await;
- let updated = test(env, 2).await;
+ let env = TestingEnvBuilder::new().build().await;
+ let updated = test(env, 1, 2).await;
let actual = &updated.table_route.as_ref().unwrap().region_routes;
// Expected region routes:
@@ -341,17 +353,17 @@ mod tests {
// 4 => 3
let peers = &updated.peers;
assert_eq!(peers.len(), 3);
- let expected = vec![
+ let expected = &vec![
new_region_route(1, peers, 2),
new_region_route(2, peers, 1),
new_region_route(3, peers, 2),
new_region_route(4, peers, 3),
];
- assert_eq!(actual, &expected);
+ assert_eq!(actual, expected);
// Testing failed region 3 moves to Datanode 3.
- let env = TestingEnvBuilder::new().with_failed_region(3).build().await;
- let updated = test(env, 3).await;
+ let env = TestingEnvBuilder::new().build().await;
+ let updated = test(env, 3, 3).await;
let actual = &updated.table_route.as_ref().unwrap().region_routes;
// Expected region routes:
@@ -362,17 +374,17 @@ mod tests {
// 4 => 3
let peers = &updated.peers;
assert_eq!(peers.len(), 2);
- let expected = vec![
+ let expected = &vec![
new_region_route(1, peers, 1),
new_region_route(2, peers, 1),
new_region_route(3, peers, 3),
new_region_route(4, peers, 3),
];
- assert_eq!(actual, &expected);
+ assert_eq!(actual, expected);
// Testing failed region 1 moves to a new Datanode, 4.
- let env = TestingEnvBuilder::new().with_failed_region(1).build().await;
- let updated = test(env, 4).await;
+ let env = TestingEnvBuilder::new().build().await;
+ let updated = test(env, 1, 4).await;
let actual = &updated.table_route.as_ref().unwrap().region_routes;
// Expected region routes:
@@ -383,17 +395,17 @@ mod tests {
// 4 => 3
let peers = &updated.peers;
assert_eq!(peers.len(), 4);
- let expected = vec![
+ let expected = &vec![
new_region_route(1, peers, 4),
new_region_route(2, peers, 1),
new_region_route(3, peers, 2),
new_region_route(4, peers, 3),
];
- assert_eq!(actual, &expected);
+ assert_eq!(actual, expected);
// Testing failed region 3 moves to a new Datanode, 4.
- let env = TestingEnvBuilder::new().with_failed_region(3).build().await;
- let updated = test(env, 4).await;
+ let env = TestingEnvBuilder::new().build().await;
+ let updated = test(env, 3, 4).await;
let actual = &updated.table_route.as_ref().unwrap().region_routes;
// Expected region routes:
@@ -404,12 +416,112 @@ mod tests {
// 4 => 3
let peers = &updated.peers;
assert_eq!(peers.len(), 3);
- let expected = vec![
+ let expected = &vec![
new_region_route(1, peers, 1),
new_region_route(2, peers, 1),
new_region_route(3, peers, 4),
new_region_route(4, peers, 3),
];
- assert_eq!(actual, &expected);
+ assert_eq!(actual, expected);
+ }
+
+ #[tokio::test(flavor = "multi_thread")]
+ async fn test_update_metadata_concurrently() {
+ common_telemetry::init_default_ut_logging();
+
+ // Test the correctness of concurrently updating the region distribution in table global
+ // value, and region routes in table route value. Region 1 moves to Datanode 2; region 2
+ // moves to Datanode 3.
+ //
+ // Datanode => Regions
+ // Before: | After:
+ // 1 => 1, 2 |
+ // 2 => 3 | 2 => 3, 1
+ // 3 => 4 | 3 => 4, 2
+ //
+ // region number => leader node
+ // Before: | After:
+ // 1 => 1 | 1 => 2
+ // 2 => 1 | 2 => 3
+ // 3 => 2 | 3 => 2
+ // 4 => 3 | 4 => 3
+ //
+ // Test case runs 10 times to enlarge the possibility of concurrent updating.
+ for _ in 0..10 {
+ let env = TestingEnvBuilder::new().build().await;
+
+ let ctx_1 = env.context.clone();
+ let ctx_2 = env.context.clone();
+
+ let failed_region_1 = env.failed_region(1).await;
+ let failed_region_2 = env.failed_region(2).await;
+
+ let catalog_name = failed_region_1.catalog.clone();
+ let schema_name = failed_region_1.schema.clone();
+ let table_name = failed_region_1.table.clone();
+ let table_id = failed_region_1.table_id as u64;
+
+ futures::future::join_all(vec![
+ tokio::spawn(async move {
+ let state = UpdateRegionMetadata::new(Peer::new(2, ""));
+ state
+ .update_metadata(&ctx_1, &failed_region_1)
+ .await
+ .unwrap();
+ }),
+ tokio::spawn(async move {
+ let state = UpdateRegionMetadata::new(Peer::new(3, ""));
+ state
+ .update_metadata(&ctx_2, &failed_region_2)
+ .await
+ .unwrap();
+ }),
+ ])
+ .await;
+
+ let table_route_key = TableRouteKey {
+ table_id,
+ catalog_name: &catalog_name,
+ schema_name: &schema_name,
+ table_name: &table_name,
+ };
+ let table_route_value = table_routes::get_table_route_value(
+ &env.context.selector_ctx.kv_store,
+ &table_route_key,
+ )
+ .await
+ .unwrap();
+ let peers = &table_route_value.peers;
+ let actual = &table_route_value
+ .table_route
+ .as_ref()
+ .unwrap()
+ .region_routes;
+ let expected = &vec![
+ new_region_route(1, peers, 2),
+ new_region_route(2, peers, 3),
+ new_region_route(3, peers, 2),
+ new_region_route(4, peers, 3),
+ ];
+ assert_eq!(peers.len(), 2);
+ assert_eq!(actual, expected);
+
+ let table_global_key = TableGlobalKey {
+ catalog_name,
+ schema_name,
+ table_name,
+ };
+ let table_global_value = table_routes::get_table_global_value(
+ &env.context.selector_ctx.kv_store,
+ &table_global_key,
+ )
+ .await
+ .unwrap()
+ .unwrap();
+ let map = table_global_value.regions_id_map;
+ assert_eq!(map.len(), 2);
+ assert_eq!(map.get(&2), Some(&vec![3, 1]));
+ assert_eq!(map.get(&3), Some(&vec![4, 2]));
+ }
}
}
diff --git a/src/meta-srv/src/service/lock.rs b/src/meta-srv/src/service/lock.rs
index 3be36152cfc9..81f218027e07 100644
--- a/src/meta-srv/src/service/lock.rs
+++ b/src/meta-srv/src/service/lock.rs
@@ -13,11 +13,9 @@
// limitations under the License.
use api::v1::meta::{lock_server, LockRequest, LockResponse, UnlockRequest, UnlockResponse};
-use snafu::OptionExt;
use tonic::{Request, Response};
use super::GrpcResult;
-use crate::error;
use crate::lock::Opts;
use crate::metasrv::MetaSrv;
@@ -29,8 +27,7 @@ impl lock_server::Lock for MetaSrv {
} = request.into_inner();
let expire_secs = Some(expire_secs as u64);
- let lock = self.lock().context(error::LockNotConfigSnafu)?;
- let key = lock.lock(name, Opts { expire_secs }).await?;
+ let key = self.lock().lock(name, Opts { expire_secs }).await?;
let resp = LockResponse {
key,
@@ -43,8 +40,7 @@ impl lock_server::Lock for MetaSrv {
async fn unlock(&self, request: Request<UnlockRequest>) -> GrpcResult<UnlockResponse> {
let UnlockRequest { key, .. } = request.into_inner();
- let lock = self.lock().context(error::LockNotConfigSnafu)?;
- let _ = lock.unlock(key).await?;
+ let _ = self.lock().unlock(key).await?;
let resp = UnlockResponse {
..Default::default()
diff --git a/src/meta-srv/src/test_util.rs b/src/meta-srv/src/test_util.rs
index c3a3a575d265..06737e3528c8 100644
--- a/src/meta-srv/src/test_util.rs
+++ b/src/meta-srv/src/test_util.rs
@@ -17,6 +17,7 @@ use std::sync::Arc;
use common_procedure::local::{LocalManager, ManagerConfig};
use crate::handler::{HeartbeatMailbox, Pushers};
+use crate::lock::memory::MemLock;
use crate::metasrv::SelectorContext;
use crate::procedure::region_failover::RegionFailoverManager;
use crate::procedure::state_store::MetaStateStore;
@@ -48,5 +49,6 @@ pub(crate) fn create_region_failover_manager() -> Arc<RegionFailoverManager> {
procedure_manager,
selector,
selector_ctx,
+ Arc::new(MemLock::default()),
))
}
diff --git a/tests-integration/tests/region_failover.rs b/tests-integration/tests/region_failover.rs
index 706fdfd1a629..83af81f2c22c 100644
--- a/tests-integration/tests/region_failover.rs
+++ b/tests-integration/tests/region_failover.rs
@@ -134,6 +134,7 @@ async fn run_region_failover_procedure(cluster: &GreptimeDbCluster, failed_regio
catalog: None,
schema: None,
},
+ dist_lock: meta_srv.lock().clone(),
},
);
let procedure_with_id = ProcedureWithId::with_random_id(Box::new(procedure));
|
feat
|
update table metadata in lock (#1634)
|
fbf1ddd006320cff3da19e6fbf53b0ffc4f75031
|
2023-05-08 08:04:30
|
gitccl
|
feat: open catalogs and schemas in parallel (#1527)
| false
|
diff --git a/src/catalog/src/remote/manager.rs b/src/catalog/src/remote/manager.rs
index fe5d75a6b5a3..36eb5a2a82ad 100644
--- a/src/catalog/src/remote/manager.rs
+++ b/src/catalog/src/remote/manager.rs
@@ -29,7 +29,6 @@ use parking_lot::RwLock;
use snafu::{OptionExt, ResultExt};
use table::engine::manager::TableEngineManagerRef;
use table::engine::{EngineContext, TableReference};
-use table::metadata::TableId;
use table::requests::{CreateTableRequest, OpenTableRequest};
use table::TableRef;
use tokio::sync::Mutex;
@@ -80,16 +79,6 @@ impl RemoteCatalogManager {
}) as _
}
- fn new_schema_provider(&self, catalog_name: &str, schema_name: &str) -> SchemaProviderRef {
- Arc::new(RemoteSchemaProvider {
- catalog_name: catalog_name.to_string(),
- schema_name: schema_name.to_string(),
- node_id: self.node_id,
- backend: self.backend.clone(),
- engine_manager: self.engine_manager.clone(),
- }) as _
- }
-
async fn iter_remote_catalogs(
&self,
) -> Pin<Box<dyn Stream<Item = Result<CatalogKey>> + Send + '_>> {
@@ -114,178 +103,45 @@ impl RemoteCatalogManager {
}))
}
- async fn iter_remote_schemas(
- &self,
- catalog_name: &str,
- ) -> Pin<Box<dyn Stream<Item = Result<SchemaKey>> + Send + '_>> {
- let schema_prefix = build_schema_prefix(catalog_name);
- let mut schemas = self.backend.range(schema_prefix.as_bytes());
-
- Box::pin(stream!({
- while let Some(r) = schemas.next().await {
- let Kv(k, _) = r?;
- if !k.starts_with(schema_prefix.as_bytes()) {
- debug!("Ignoring non-schema key: {}", String::from_utf8_lossy(&k));
- continue;
- }
-
- let schema_key = SchemaKey::parse(&String::from_utf8_lossy(&k))
- .context(InvalidCatalogValueSnafu)?;
- yield Ok(schema_key)
- }
- }))
- }
-
- /// Iterate over all table entries on metasrv
- async fn iter_remote_tables(
- &self,
- catalog_name: &str,
- schema_name: &str,
- ) -> Pin<Box<dyn Stream<Item = Result<(TableGlobalKey, TableGlobalValue)>> + Send + '_>> {
- let table_prefix = build_table_global_prefix(catalog_name, schema_name);
- let mut tables = self.backend.range(table_prefix.as_bytes());
- Box::pin(stream!({
- while let Some(r) = tables.next().await {
- let Kv(k, v) = r?;
- if !k.starts_with(table_prefix.as_bytes()) {
- debug!("Ignoring non-table prefix: {}", String::from_utf8_lossy(&k));
- continue;
- }
- let table_key = TableGlobalKey::parse(&String::from_utf8_lossy(&k))
- .context(InvalidCatalogValueSnafu)?;
- let table_value =
- TableGlobalValue::from_bytes(&v).context(InvalidCatalogValueSnafu)?;
-
- info!(
- "Found catalog table entry, key: {}, value: {:?}",
- table_key, table_value
- );
- // metasrv has allocated region ids to current datanode
- if table_value
- .regions_id_map
- .get(&self.node_id)
- .map(|v| !v.is_empty())
- .unwrap_or(false)
- {
- yield Ok((table_key, table_value))
- }
- }
- }))
- }
-
/// Fetch catalogs/schemas/tables from remote catalog manager along with max table id allocated.
async fn initiate_catalogs(&self) -> Result<HashMap<String, CatalogProviderRef>> {
let mut res = HashMap::new();
let mut catalogs = self.iter_remote_catalogs().await;
+ let mut joins = Vec::new();
while let Some(r) = catalogs.next().await {
- let mut max_table_id = MAX_SYS_TABLE_ID;
let CatalogKey { catalog_name, .. } = r?;
info!("Fetch catalog from metasrv: {}", catalog_name);
let catalog = res
.entry(catalog_name.clone())
.or_insert_with(|| self.new_catalog_provider(&catalog_name))
.clone();
- increment_gauge!(crate::metrics::METRIC_CATALOG_MANAGER_CATALOG_COUNT, 1.0);
- self.initiate_schemas(&catalog_name, catalog, &mut max_table_id)
- .await?;
-
- info!(
- "Catalog name: {}, max table id allocated: {}",
- &catalog_name, max_table_id
- );
- }
+ let node_id = self.node_id;
+ let backend = self.backend.clone();
+ let engine_manager = self.engine_manager.clone();
- Ok(res)
- }
+ increment_gauge!(crate::metrics::METRIC_CATALOG_MANAGER_CATALOG_COUNT, 1.0);
- async fn initiate_schemas(
- &self,
- catalog_name: &str,
- catalog: CatalogProviderRef,
- max_table_id: &mut TableId,
- ) -> Result<()> {
- let mut schemas = self.iter_remote_schemas(catalog_name).await;
- while let Some(r) = schemas.next().await {
- let SchemaKey {
- catalog_name,
- schema_name,
- ..
- } = r?;
- info!("Found schema: {}.{}", catalog_name, schema_name);
- let schema = match catalog.schema(&schema_name).await? {
- None => {
- let schema = self.new_schema_provider(&catalog_name, &schema_name);
- catalog
- .register_schema(schema_name.clone(), schema.clone())
+ joins.push(common_runtime::spawn_bg(async move {
+ let max_table_id =
+ initiate_schemas(node_id, backend, engine_manager, &catalog_name, catalog)
.await?;
- info!("Registered schema: {}", &schema_name);
- schema
- }
- Some(schema) => schema,
- };
-
- info!(
- "Fetch schema from metasrv: {}.{}",
- &catalog_name, &schema_name
- );
- increment_gauge!(
- crate::metrics::METRIC_CATALOG_MANAGER_SCHEMA_COUNT,
- 1.0,
- &[crate::metrics::db_label(&catalog_name, &schema_name)],
- );
- self.initiate_tables(&catalog_name, &schema_name, schema, max_table_id)
- .await?;
+ info!(
+ "Catalog name: {}, max table id allocated: {}",
+ &catalog_name, max_table_id
+ );
+ Ok(())
+ }));
}
- Ok(())
- }
- /// Initiates all tables inside a catalog by fetching data from metasrv.
- async fn initiate_tables<'a>(
- &'a self,
- catalog_name: &'a str,
- schema_name: &'a str,
- schema: SchemaProviderRef,
- max_table_id: &mut TableId,
- ) -> Result<()> {
- info!("initializing tables in {}.{}", catalog_name, schema_name);
- let mut table_num = 0;
- let tables = self.iter_remote_tables(catalog_name, schema_name).await;
-
- let kvs = tables.try_collect::<Vec<_>>().await?;
- let node_id = self.node_id;
- let joins = kvs
+ futures::future::try_join_all(joins)
+ .await
+ .context(ParallelOpenTableSnafu)?
.into_iter()
- .map(|(table_key, table_value)| {
- let engine_manager = self.engine_manager.clone();
- common_runtime::spawn_bg(async move {
- open_or_create_table(node_id, engine_manager, &table_key, &table_value).await
- })
- })
- .collect::<Vec<_>>();
- let vec = futures::future::join_all(joins).await;
- for res in vec {
- let table_ref = res.context(ParallelOpenTableSnafu)??;
- let table_info = table_ref.table_info();
- let table_name = &table_info.name;
- let table_id = table_info.ident.table_id;
- schema.register_table(table_name.clone(), table_ref).await?;
- info!("Registered table {}", table_name);
- *max_table_id = (*max_table_id).max(table_id);
- table_num += 1;
- }
+ .collect::<Result<Vec<_>>>()?;
- increment_gauge!(
- crate::metrics::METRIC_CATALOG_MANAGER_TABLE_COUNT,
- 1.0,
- &[crate::metrics::db_label(catalog_name, schema_name)],
- );
- info!(
- "initialized tables in {}.{}, total: {}",
- catalog_name, schema_name, table_num
- );
- Ok(())
+ Ok(res)
}
pub async fn create_catalog_and_schema(
@@ -293,7 +149,13 @@ impl RemoteCatalogManager {
catalog_name: &str,
schema_name: &str,
) -> Result<CatalogProviderRef> {
- let schema_provider = self.new_schema_provider(catalog_name, schema_name);
+ let schema_provider = new_schema_provider(
+ self.node_id,
+ self.backend.clone(),
+ self.engine_manager.clone(),
+ catalog_name,
+ schema_name,
+ );
let catalog_provider = self.new_catalog_provider(catalog_name);
catalog_provider
@@ -332,6 +194,213 @@ impl RemoteCatalogManager {
}
}
+fn new_schema_provider(
+ node_id: u64,
+ backend: KvBackendRef,
+ engine_manager: TableEngineManagerRef,
+ catalog_name: &str,
+ schema_name: &str,
+) -> SchemaProviderRef {
+ Arc::new(RemoteSchemaProvider {
+ catalog_name: catalog_name.to_string(),
+ schema_name: schema_name.to_string(),
+ node_id,
+ backend,
+ engine_manager,
+ }) as _
+}
+
+async fn iter_remote_schemas<'a>(
+ backend: &'a KvBackendRef,
+ catalog_name: &'a str,
+) -> Pin<Box<dyn Stream<Item = Result<SchemaKey>> + Send + 'a>> {
+ let schema_prefix = build_schema_prefix(catalog_name);
+ let mut schemas = backend.range(schema_prefix.as_bytes());
+
+ Box::pin(stream!({
+ while let Some(r) = schemas.next().await {
+ let Kv(k, _) = r?;
+ if !k.starts_with(schema_prefix.as_bytes()) {
+ debug!("Ignoring non-schema key: {}", String::from_utf8_lossy(&k));
+ continue;
+ }
+
+ let schema_key =
+ SchemaKey::parse(&String::from_utf8_lossy(&k)).context(InvalidCatalogValueSnafu)?;
+ yield Ok(schema_key)
+ }
+ }))
+}
+
+/// Initiates all schemas inside the catalog by fetching data from metasrv.
+/// Return maximum table id in the catalog.
+async fn initiate_schemas(
+ node_id: u64,
+ backend: KvBackendRef,
+ engine_manager: TableEngineManagerRef,
+ catalog_name: &str,
+ catalog: CatalogProviderRef,
+) -> Result<u32> {
+ let mut schemas = iter_remote_schemas(&backend, catalog_name).await;
+ let mut joins = Vec::new();
+ while let Some(r) = schemas.next().await {
+ let SchemaKey {
+ catalog_name,
+ schema_name,
+ ..
+ } = r?;
+
+ info!("Found schema: {}.{}", catalog_name, schema_name);
+ let schema = match catalog.schema(&schema_name).await? {
+ None => {
+ let schema = new_schema_provider(
+ node_id,
+ backend.clone(),
+ engine_manager.clone(),
+ &catalog_name,
+ &schema_name,
+ );
+ catalog
+ .register_schema(schema_name.clone(), schema.clone())
+ .await?;
+ info!("Registered schema: {}", &schema_name);
+ schema
+ }
+ Some(schema) => schema,
+ };
+
+ info!(
+ "Fetch schema from metasrv: {}.{}",
+ &catalog_name, &schema_name
+ );
+ increment_gauge!(
+ crate::metrics::METRIC_CATALOG_MANAGER_SCHEMA_COUNT,
+ 1.0,
+ &[crate::metrics::db_label(&catalog_name, &schema_name)],
+ );
+
+ let backend = backend.clone();
+ let engine_manager = engine_manager.clone();
+
+ joins.push(common_runtime::spawn_bg(async move {
+ initiate_tables(
+ node_id,
+ backend,
+ engine_manager,
+ &catalog_name,
+ &schema_name,
+ schema,
+ )
+ .await
+ }));
+ }
+
+ let mut max_table_id = MAX_SYS_TABLE_ID;
+ if let Some(found_max_table_id) = futures::future::try_join_all(joins)
+ .await
+ .context(ParallelOpenTableSnafu)?
+ .into_iter()
+ .collect::<Result<Vec<_>>>()?
+ .into_iter()
+ .max()
+ {
+ max_table_id = max_table_id.max(found_max_table_id);
+ }
+
+ Ok(max_table_id)
+}
+
+/// Iterate over all table entries on metasrv
+async fn iter_remote_tables<'a>(
+ node_id: u64,
+ backend: &'a KvBackendRef,
+ catalog_name: &'a str,
+ schema_name: &'a str,
+) -> Pin<Box<dyn Stream<Item = Result<(TableGlobalKey, TableGlobalValue)>> + Send + 'a>> {
+ let table_prefix = build_table_global_prefix(catalog_name, schema_name);
+ let mut tables = backend.range(table_prefix.as_bytes());
+ Box::pin(stream!({
+ while let Some(r) = tables.next().await {
+ let Kv(k, v) = r?;
+ if !k.starts_with(table_prefix.as_bytes()) {
+ debug!("Ignoring non-table prefix: {}", String::from_utf8_lossy(&k));
+ continue;
+ }
+ let table_key = TableGlobalKey::parse(&String::from_utf8_lossy(&k))
+ .context(InvalidCatalogValueSnafu)?;
+ let table_value = TableGlobalValue::from_bytes(&v).context(InvalidCatalogValueSnafu)?;
+
+ info!(
+ "Found catalog table entry, key: {}, value: {:?}",
+ table_key, table_value
+ );
+ // metasrv has allocated region ids to current datanode
+ if table_value
+ .regions_id_map
+ .get(&node_id)
+ .map(|v| !v.is_empty())
+ .unwrap_or(false)
+ {
+ yield Ok((table_key, table_value))
+ }
+ }
+ }))
+}
+
+/// Initiates all tables inside the catalog by fetching data from metasrv.
+/// Return maximum table id in the schema.
+async fn initiate_tables(
+ node_id: u64,
+ backend: KvBackendRef,
+ engine_manager: TableEngineManagerRef,
+ catalog_name: &str,
+ schema_name: &str,
+ schema: SchemaProviderRef,
+) -> Result<u32> {
+ info!("initializing tables in {}.{}", catalog_name, schema_name);
+ let tables = iter_remote_tables(node_id, &backend, catalog_name, schema_name).await;
+
+ let kvs = tables.try_collect::<Vec<_>>().await?;
+ let table_num = kvs.len();
+ let joins = kvs
+ .into_iter()
+ .map(|(table_key, table_value)| {
+ let engine_manager = engine_manager.clone();
+ let schema = schema.clone();
+ common_runtime::spawn_bg(async move {
+ let table_ref =
+ open_or_create_table(node_id, engine_manager, &table_key, &table_value).await?;
+ let table_info = table_ref.table_info();
+ let table_name = &table_info.name;
+ schema.register_table(table_name.clone(), table_ref).await?;
+ info!("Registered table {}", table_name);
+ Ok(table_info.ident.table_id)
+ })
+ })
+ .collect::<Vec<_>>();
+
+ let max_table_id = futures::future::try_join_all(joins)
+ .await
+ .context(ParallelOpenTableSnafu)?
+ .into_iter()
+ .collect::<Result<Vec<_>>>()?
+ .into_iter()
+ .max()
+ .unwrap_or(MAX_SYS_TABLE_ID);
+
+ increment_gauge!(
+ crate::metrics::METRIC_CATALOG_MANAGER_TABLE_COUNT,
+ 1.0,
+ &[crate::metrics::db_label(catalog_name, schema_name)],
+ );
+ info!(
+ "initialized tables in {}.{}, total: {}",
+ catalog_name, schema_name, table_num
+ );
+
+ Ok(max_table_id)
+}
+
async fn open_or_create_table(
node_id: u64,
engine_manager: TableEngineManagerRef,
@@ -507,7 +576,13 @@ impl CatalogManager for RemoteCatalogManager {
.context(CatalogNotFoundSnafu {
catalog_name: &catalog_name,
})?;
- let schema_provider = self.new_schema_provider(&catalog_name, &schema_name);
+ let schema_provider = new_schema_provider(
+ self.node_id,
+ self.backend.clone(),
+ self.engine_manager.clone(),
+ &catalog_name,
+ &schema_name,
+ );
catalog_provider
.register_schema(schema_name, schema_provider)
.await?;
|
feat
|
open catalogs and schemas in parallel (#1527)
|
ef75e8f7c33bc15708e522b096cc53cd00d92332
|
2023-08-28 17:37:52
|
LFC
|
feat: create distributed Mito2 table (#2246)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index 1a3c89a18683..b8947d5c2b16 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -767,7 +767,7 @@ version = "0.3.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cdca6a10ecad987bda04e95606ef85a5417dcaac1a78455242d72e031e2b6b62"
dependencies = [
- "heck 0.4.1",
+ "heck",
"proc-macro2",
"quote",
"syn 2.0.28",
@@ -1475,7 +1475,7 @@ version = "3.2.25"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ae6371b8bdc8b7d3959e9cf7b22d4435ef3e79e138688421ec654acf8c81b008"
dependencies = [
- "heck 0.4.1",
+ "heck",
"proc-macro-error",
"proc-macro2",
"quote",
@@ -1488,7 +1488,7 @@ version = "4.3.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "54a9bb5758fc5dfe728d1019941681eccaf0cf8a4189b692a0ee2f2ecf90a050"
dependencies = [
- "heck 0.4.1",
+ "heck",
"proc-macro2",
"quote",
"syn 2.0.28",
@@ -1530,6 +1530,7 @@ dependencies = [
"datanode",
"datatypes",
"derive-new",
+ "derive_builder 0.12.0",
"enum_dispatch",
"futures-util",
"moka 0.9.9",
@@ -1689,7 +1690,7 @@ dependencies = [
"paste",
"regex",
"snafu",
- "strum 0.21.0",
+ "strum 0.25.0",
"tokio",
"tokio-util",
"url",
@@ -1700,7 +1701,7 @@ name = "common-error"
version = "0.4.0-nightly"
dependencies = [
"snafu",
- "strum 0.24.1",
+ "strum 0.25.0",
]
[[package]]
@@ -4153,11 +4154,13 @@ checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b"
[[package]]
name = "greptime-proto"
version = "0.1.0"
-source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=39b0ea8d086d0ab762046b0f473aa3ef8bd347f9#39b0ea8d086d0ab762046b0f473aa3ef8bd347f9"
+source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=ec2d346e09a2f6db3b1d0aaf010e89ed8a69eccc#ec2d346e09a2f6db3b1d0aaf010e89ed8a69eccc"
dependencies = [
"prost",
"serde",
"serde_json",
+ "strum 0.25.0",
+ "strum_macros 0.25.2",
"tonic 0.9.2",
"tonic-build",
]
@@ -4273,15 +4276,6 @@ dependencies = [
"http",
]
-[[package]]
-name = "heck"
-version = "0.3.3"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6d621efb26863f0e9924c6ac577e8275e5e6b77455db64ffa6c65c904e9e132c"
-dependencies = [
- "unicode-segmentation",
-]
-
[[package]]
name = "heck"
version = "0.4.1"
@@ -5345,6 +5339,7 @@ dependencies = [
"session",
"snafu",
"store-api",
+ "strum 0.25.0",
"table",
"tokio",
"tokio-stream",
@@ -5574,7 +5569,7 @@ dependencies = [
"snafu",
"storage",
"store-api",
- "strum 0.21.0",
+ "strum 0.25.0",
"table",
"tokio",
"tokio-util",
@@ -5654,7 +5649,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "56b0d8a0db9bf6d2213e11f2c701cb91387b0614361625ab7b9743b41aa4938f"
dependencies = [
"darling 0.20.3",
- "heck 0.4.1",
+ "heck",
"num-bigint",
"proc-macro-crate 1.3.1",
"proc-macro-error",
@@ -7098,7 +7093,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "119533552c9a7ffacc21e099c24a0ac8bb19c2a2a3f363de84cd9b844feab270"
dependencies = [
"bytes",
- "heck 0.4.1",
+ "heck",
"itertools 0.10.5",
"lazy_static",
"log",
@@ -8918,7 +8913,7 @@ dependencies = [
"snafu",
"snap",
"sql",
- "strum 0.24.1",
+ "strum 0.25.0",
"table",
"tikv-jemalloc-ctl",
"tokio",
@@ -9140,7 +9135,7 @@ version = "0.7.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "990079665f075b699031e9c08fd3ab99be5029b96f3b78dc0709e8f77e4efebf"
dependencies = [
- "heck 0.4.1",
+ "heck",
"proc-macro2",
"quote",
"syn 1.0.109",
@@ -9403,7 +9398,7 @@ checksum = "9966e64ae989e7e575b19d7265cb79d7fc3cbbdf179835cb0d716f294c2049c9"
dependencies = [
"dotenvy",
"either",
- "heck 0.4.1",
+ "heck",
"once_cell",
"proc-macro2",
"quote",
@@ -9608,23 +9603,11 @@ version = "0.10.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623"
-[[package]]
-name = "strum"
-version = "0.21.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "aaf86bbcfd1fa9670b7a129f64fc0c9fcbbfe4f1bc4210e9e98fe71ffc12cde2"
-dependencies = [
- "strum_macros 0.21.1",
-]
-
[[package]]
name = "strum"
version = "0.24.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "063e6045c0e62079840579a7e47a355ae92f60eb74daaf156fb1e84ba164e63f"
-dependencies = [
- "strum_macros 0.24.3",
-]
[[package]]
name = "strum"
@@ -9635,25 +9618,13 @@ dependencies = [
"strum_macros 0.25.2",
]
-[[package]]
-name = "strum_macros"
-version = "0.21.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d06aaeeee809dbc59eb4556183dd927df67db1540de5be8d3ec0b6636358a5ec"
-dependencies = [
- "heck 0.3.3",
- "proc-macro2",
- "quote",
- "syn 1.0.109",
-]
-
[[package]]
name = "strum_macros"
version = "0.24.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1e385be0d24f186b4ce2f9982191e7101bb737312ad61c1f2f984f34bcf85d59"
dependencies = [
- "heck 0.4.1",
+ "heck",
"proc-macro2",
"quote",
"rustversion",
@@ -9666,7 +9637,7 @@ version = "0.25.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ad8d03b598d3d0fff69bf533ee3ef19b8eeb342729596df84bcc7e1f96ec4059"
dependencies = [
- "heck 0.4.1",
+ "heck",
"proc-macro2",
"quote",
"rustversion",
@@ -9716,7 +9687,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e3ae64fb7ad0670c7d6d53d57b1b91beb2212afc30e164cc8edb02d6b2cff32a"
dependencies = [
"gix",
- "heck 0.4.1",
+ "heck",
"prettyplease 0.2.12",
"prost",
"prost-build",
@@ -9738,7 +9709,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "658f6cbbd29a250869b87e1bb5a4b42db534cacfc1c03284f2536cd36b6c1617"
dependencies = [
"git2",
- "heck 0.4.1",
+ "heck",
"prettyplease 0.2.12",
"prost",
"prost-build",
@@ -10777,7 +10748,7 @@ version = "0.0.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "95d27d749378ceab6ec22188ed7ad102205c89ddb92ab662371c850ffc71aa1a"
dependencies = [
- "heck 0.4.1",
+ "heck",
"log",
"proc-macro2",
"quote",
@@ -10795,7 +10766,7 @@ version = "0.0.13"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5c8d9ecedde2fd77e975c38eeb9ca40b34ad0247b2259c6e6bbd2a8d6cc2444f"
dependencies = [
- "heck 0.4.1",
+ "heck",
"log",
"proc-macro2",
"quote",
diff --git a/Cargo.toml b/Cargo.toml
index d81105b40f46..bfbc1049d98e 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -77,7 +77,7 @@ datafusion-substrait = { git = "https://github.com/waynexia/arrow-datafusion.git
derive_builder = "0.12"
futures = "0.3"
futures-util = "0.3"
-greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "39b0ea8d086d0ab762046b0f473aa3ef8bd347f9" }
+greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "ec2d346e09a2f6db3b1d0aaf010e89ed8a69eccc" }
itertools = "0.10"
lazy_static = "1.4"
once_cell = "1.18"
@@ -93,6 +93,7 @@ snafu = { version = "0.7", features = ["backtraces"] }
sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "296a4f6c73b129d6f565a42a2e5e53c6bc2b9da4", features = [
"visitor",
] }
+strum = { version = "0.25", features = ["derive"] }
tempfile = "3"
tokio = { version = "1.28", features = ["full"] }
tokio-util = { version = "0.7", features = ["io-util", "compat"] }
diff --git a/src/api/src/helper.rs b/src/api/src/helper.rs
index 20c27c82aab0..1f91313b60de 100644
--- a/src/api/src/helper.rs
+++ b/src/api/src/helper.rs
@@ -37,7 +37,6 @@ use greptime_proto::v1;
use greptime_proto::v1::ddl_request::Expr;
use greptime_proto::v1::greptime_request::Request;
use greptime_proto::v1::query_request::Query;
-use greptime_proto::v1::region::region_request;
use greptime_proto::v1::value::ValueData;
use greptime_proto::v1::{DdlRequest, IntervalMonthDayNano, QueryRequest, SemanticType};
use snafu::prelude::*;
@@ -333,21 +332,6 @@ fn query_request_type(request: &QueryRequest) -> &'static str {
}
}
-/// Returns the type name of the [RegionRequest].
-pub fn region_request_type(request: ®ion_request::Body) -> &'static str {
- match request {
- region_request::Body::Inserts(_) => "region.inserts",
- region_request::Body::Deletes(_) => "region.deletes",
- region_request::Body::Create(_) => "region.create",
- region_request::Body::Drop(_) => "region.drop",
- region_request::Body::Open(_) => "region.open",
- region_request::Body::Close(_) => "region.close",
- region_request::Body::Alter(_) => "region.alter",
- region_request::Body::Flush(_) => "region.flush",
- region_request::Body::Compact(_) => "region.compact",
- }
-}
-
/// Returns the type name of the [DdlRequest].
fn ddl_request_type(request: &DdlRequest) -> &'static str {
match request.expr {
diff --git a/src/client/Cargo.toml b/src/client/Cargo.toml
index a0b0f99e94e3..aa37d26a3d6a 100644
--- a/src/client/Cargo.toml
+++ b/src/client/Cargo.toml
@@ -22,6 +22,7 @@ common-telemetry = { workspace = true }
common-time = { workspace = true }
datafusion.workspace = true
datatypes = { workspace = true }
+derive_builder.workspace = true
enum_dispatch = "0.3"
futures-util.workspace = true
moka = { version = "0.9", features = ["future"] }
diff --git a/src/client/src/client.rs b/src/client/src/client.rs
index f5a686cc02b2..2af1d8ae8f5f 100644
--- a/src/client/src/client.rs
+++ b/src/client/src/client.rs
@@ -17,6 +17,7 @@ use std::sync::Arc;
use api::v1::greptime_database_client::GreptimeDatabaseClient;
use api::v1::health_check_client::HealthCheckClient;
use api::v1::prometheus_gateway_client::PrometheusGatewayClient;
+use api::v1::region::region_client::RegionClient as PbRegionClient;
use api::v1::HealthCheckRequest;
use arrow_flight::flight_service_client::FlightServiceClient;
use common_grpc::channel_manager::ChannelManager;
@@ -82,11 +83,6 @@ impl Client {
Default::default()
}
- pub fn with_manager(channel_manager: ChannelManager) -> Self {
- let inner = Arc::new(Inner::with_manager(channel_manager));
- Self { inner }
- }
-
pub fn with_urls<U, A>(urls: A) -> Self
where
U: AsRef<str>,
@@ -157,6 +153,11 @@ impl Client {
})
}
+ pub(crate) fn raw_region_client(&self) -> Result<PbRegionClient<Channel>> {
+ let (_, channel) = self.find_channel()?;
+ Ok(PbRegionClient::new(channel))
+ }
+
pub fn make_prometheus_gateway_client(&self) -> Result<PrometheusGatewayClient<Channel>> {
let (_, channel) = self.find_channel()?;
Ok(PrometheusGatewayClient::new(channel))
diff --git a/src/client/src/lib.rs b/src/client/src/lib.rs
index 45ae26440b8a..23a67ebae1bd 100644
--- a/src/client/src/lib.rs
+++ b/src/client/src/lib.rs
@@ -18,6 +18,7 @@ mod database;
pub mod error;
pub mod load_balance;
mod metrics;
+pub mod region;
mod stream_insert;
pub use api;
diff --git a/src/client/src/metrics.rs b/src/client/src/metrics.rs
index 49ba8bf341c7..06a2bb791f9d 100644
--- a/src/client/src/metrics.rs
+++ b/src/client/src/metrics.rs
@@ -25,3 +25,4 @@ pub const METRIC_GRPC_FLUSH_TABLE: &str = "grpc.flush_table";
pub const METRIC_GRPC_COMPACT_TABLE: &str = "grpc.compact_table";
pub const METRIC_GRPC_TRUNCATE_TABLE: &str = "grpc.truncate_table";
pub const METRIC_GRPC_DO_GET: &str = "grpc.do_get";
+pub(crate) const METRIC_REGION_REQUEST_GRPC: &str = "grpc.region_request";
diff --git a/src/client/src/region.rs b/src/client/src/region.rs
new file mode 100644
index 000000000000..b45937edeb68
--- /dev/null
+++ b/src/client/src/region.rs
@@ -0,0 +1,146 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use api::v1::region::{region_request, RegionRequest, RegionRequestHeader, RegionResponse};
+use api::v1::ResponseHeader;
+use common_error::status_code::StatusCode;
+use common_telemetry::timer;
+use snafu::OptionExt;
+
+use crate::error::{IllegalDatabaseResponseSnafu, Result, ServerSnafu};
+use crate::{metrics, Client};
+
+type AffectedRows = u64;
+
+#[derive(Debug)]
+pub struct RegionRequester {
+ trace_id: Option<u64>,
+ span_id: Option<u64>,
+ client: Client,
+}
+
+impl RegionRequester {
+ pub fn new(client: Client) -> Self {
+ // TODO(LFC): Pass in trace_id and span_id from some context when we have it.
+ Self {
+ trace_id: None,
+ span_id: None,
+ client,
+ }
+ }
+
+ pub async fn handle(self, request: region_request::Body) -> Result<AffectedRows> {
+ let request_type = request.as_ref().to_string();
+
+ let request = RegionRequest {
+ header: Some(RegionRequestHeader {
+ trace_id: self.trace_id,
+ span_id: self.span_id,
+ }),
+ body: Some(request),
+ };
+
+ let _timer = timer!(
+ metrics::METRIC_REGION_REQUEST_GRPC,
+ &[("request_type", request_type)]
+ );
+
+ let mut client = self.client.raw_region_client()?;
+
+ let RegionResponse {
+ header,
+ affected_rows,
+ } = client.handle(request).await?.into_inner();
+
+ check_response_header(header)?;
+
+ Ok(affected_rows)
+ }
+}
+
+fn check_response_header(header: Option<ResponseHeader>) -> Result<()> {
+ let status = header
+ .and_then(|header| header.status)
+ .context(IllegalDatabaseResponseSnafu {
+ err_msg: "either response header or status is missing",
+ })?;
+
+ if StatusCode::is_success(status.status_code) {
+ Ok(())
+ } else {
+ let code =
+ StatusCode::from_u32(status.status_code).context(IllegalDatabaseResponseSnafu {
+ err_msg: format!("unknown server status: {:?}", status),
+ })?;
+ ServerSnafu {
+ code,
+ msg: status.err_msg,
+ }
+ .fail()
+ }
+}
+
+#[cfg(test)]
+mod test {
+ use api::v1::Status as PbStatus;
+
+ use super::*;
+ use crate::Error::{IllegalDatabaseResponse, Server};
+
+ #[test]
+ fn test_check_response_header() {
+ let result = check_response_header(None);
+ assert!(matches!(
+ result.unwrap_err(),
+ IllegalDatabaseResponse { .. }
+ ));
+
+ let result = check_response_header(Some(ResponseHeader { status: None }));
+ assert!(matches!(
+ result.unwrap_err(),
+ IllegalDatabaseResponse { .. }
+ ));
+
+ let result = check_response_header(Some(ResponseHeader {
+ status: Some(PbStatus {
+ status_code: StatusCode::Success as u32,
+ err_msg: "".to_string(),
+ }),
+ }));
+ assert!(result.is_ok());
+
+ let result = check_response_header(Some(ResponseHeader {
+ status: Some(PbStatus {
+ status_code: u32::MAX,
+ err_msg: "".to_string(),
+ }),
+ }));
+ assert!(matches!(
+ result.unwrap_err(),
+ IllegalDatabaseResponse { .. }
+ ));
+
+ let result = check_response_header(Some(ResponseHeader {
+ status: Some(PbStatus {
+ status_code: StatusCode::Internal as u32,
+ err_msg: "blabla".to_string(),
+ }),
+ }));
+ let Server { code, msg } = result.unwrap_err() else {
+ unreachable!()
+ };
+ assert_eq!(code, StatusCode::Internal);
+ assert_eq!(msg, "blabla");
+ }
+}
diff --git a/src/common/catalog/src/consts.rs b/src/common/catalog/src/consts.rs
index 3978e3aba819..59037d770252 100644
--- a/src/common/catalog/src/consts.rs
+++ b/src/common/catalog/src/consts.rs
@@ -35,6 +35,12 @@ pub const INFORMATION_SCHEMA_TABLES_TABLE_ID: u32 = 3;
pub const INFORMATION_SCHEMA_COLUMNS_TABLE_ID: u32 = 4;
pub const MITO_ENGINE: &str = "mito";
+pub const MITO2_ENGINE: &str = "mito2";
+
+pub fn default_engine() -> &'static str {
+ MITO_ENGINE
+}
+
pub const IMMUTABLE_FILE_ENGINE: &str = "file";
pub const SEMANTIC_TYPE_PRIMARY_KEY: &str = "TAG";
diff --git a/src/common/datasource/Cargo.toml b/src/common/datasource/Cargo.toml
index f67ca0a7cd06..48e52b437322 100644
--- a/src/common/datasource/Cargo.toml
+++ b/src/common/datasource/Cargo.toml
@@ -27,7 +27,7 @@ orc-rust = "0.2"
paste = "1.0"
regex = "1.7"
snafu.workspace = true
-strum = { version = "0.21", features = ["derive"] }
+strum.workspace = true
tokio-util.workspace = true
tokio.workspace = true
url = "2.3"
diff --git a/src/common/error/Cargo.toml b/src/common/error/Cargo.toml
index e7c6abfc1a5e..28ce054822e4 100644
--- a/src/common/error/Cargo.toml
+++ b/src/common/error/Cargo.toml
@@ -6,4 +6,4 @@ license.workspace = true
[dependencies]
snafu = { version = "0.7", features = ["backtraces"] }
-strum = { version = "0.24", features = ["std", "derive"] }
+strum.workspace = true
diff --git a/src/frontend/src/instance.rs b/src/frontend/src/instance.rs
index 12ee2c01f912..c91832eee58b 100644
--- a/src/frontend/src/instance.rs
+++ b/src/frontend/src/instance.rs
@@ -38,7 +38,7 @@ use catalog::remote::CachedMetaKvBackend;
use catalog::CatalogManagerRef;
use client::client_manager::DatanodeClients;
use common_base::Plugins;
-use common_catalog::consts::MITO_ENGINE;
+use common_catalog::consts::default_engine;
use common_error::ext::BoxedError;
use common_grpc::channel_manager::{ChannelConfig, ChannelManager};
use common_meta::heartbeat::handler::parse_mailbox_message::ParseMailboxMessageHandler;
@@ -213,7 +213,6 @@ impl Instance {
let create_expr_factory = CreateExprFactory;
let row_inserter = Arc::new(RowInserter::new(
- MITO_ENGINE.to_string(),
catalog_manager.clone(),
create_expr_factory,
dist_instance.clone(),
@@ -286,7 +285,6 @@ impl Instance {
let grpc_query_handler = StandaloneGrpcQueryHandler::arc(dn_instance.clone());
let row_inserter = Arc::new(RowInserter::new(
- MITO_ENGINE.to_string(),
catalog_manager.clone(),
create_expr_factory,
grpc_query_handler.clone(),
@@ -366,7 +364,7 @@ impl Instance {
catalog_name, schema_name, table_name,
);
let _ = self
- .create_table_by_columns(ctx, table_name, columns, MITO_ENGINE)
+ .create_table_by_columns(ctx, table_name, columns, default_engine())
.await?;
info!(
"Successfully created table on insertion: {}.{}.{}",
diff --git a/src/frontend/src/row_inserter.rs b/src/frontend/src/row_inserter.rs
index d83af0bd65d9..803ca52dc5c3 100644
--- a/src/frontend/src/row_inserter.rs
+++ b/src/frontend/src/row_inserter.rs
@@ -17,6 +17,7 @@ use api::v1::ddl_request::Expr;
use api::v1::greptime_request::Request;
use api::v1::{AlterExpr, ColumnSchema, DdlRequest, Row, RowInsertRequest, RowInsertRequests};
use catalog::CatalogManagerRef;
+use common_catalog::consts::default_engine;
use common_grpc_expr::util::{extract_new_columns, ColumnExpr};
use common_query::Output;
use common_telemetry::info;
@@ -30,7 +31,6 @@ use crate::error::{CatalogSnafu, EmptyDataSnafu, Error, FindNewColumnsOnInsertio
use crate::expr_factory::CreateExprFactory;
pub struct RowInserter {
- engine_name: String,
catalog_manager: CatalogManagerRef,
create_expr_factory: CreateExprFactory,
grpc_query_handler: GrpcQueryHandlerRef<Error>,
@@ -38,13 +38,11 @@ pub struct RowInserter {
impl RowInserter {
pub fn new(
- engine_name: String,
catalog_manager: CatalogManagerRef,
create_expr_factory: CreateExprFactory,
grpc_query_handler: GrpcQueryHandlerRef<Error>,
) -> Self {
Self {
- engine_name,
catalog_manager,
create_expr_factory,
grpc_query_handler,
@@ -105,7 +103,7 @@ impl RowInserter {
let (column_schemas, _) = extract_schema_and_rows(req)?;
let create_table_expr = self
.create_expr_factory
- .create_table_expr_by_column_schemas(table_name, column_schemas, &self.engine_name)?;
+ .create_table_expr_by_column_schemas(table_name, column_schemas, default_engine())?;
let req = Request::Ddl(DdlRequest {
expr: Some(Expr::CreateTable(create_table_expr)),
diff --git a/src/meta-srv/Cargo.toml b/src/meta-srv/Cargo.toml
index 488c28fabef5..0942583b380e 100644
--- a/src/meta-srv/Cargo.toml
+++ b/src/meta-srv/Cargo.toml
@@ -44,6 +44,7 @@ serde_json = "1.0"
servers = { workspace = true }
snafu.workspace = true
store-api = { workspace = true }
+strum.workspace = true
table = { workspace = true }
tokio-stream = { version = "0.1", features = ["net"] }
tokio.workspace = true
@@ -56,6 +57,7 @@ uuid.workspace = true
[dev-dependencies]
chrono.workspace = true
+client = { workspace = true, features = ["testing"] }
common-procedure-test = { workspace = true }
session = { workspace = true }
tracing = "0.1"
diff --git a/src/meta-srv/src/error.rs b/src/meta-srv/src/error.rs
index eaafae1a40d5..f608bd90b194 100644
--- a/src/meta-srv/src/error.rs
+++ b/src/meta-srv/src/error.rs
@@ -516,6 +516,9 @@ pub enum Error {
operation: String,
location: Location,
},
+
+ #[snafu(display("Primary key '{key}' not found when creating region request, at {location}"))]
+ PrimaryKeyNotFound { key: String, location: Location },
}
pub type Result<T> = std::result::Result<T, Error>;
@@ -570,6 +573,7 @@ impl ErrorExt for Error {
| Error::UnsupportedSelectorType { .. }
| Error::InvalidArguments { .. }
| Error::InvalidHeartbeatRequest { .. }
+ | Error::PrimaryKeyNotFound { .. }
| Error::TooManyPartitions { .. } => StatusCode::InvalidArguments,
Error::LeaseKeyFromUtf8 { .. }
| Error::LeaseValueFromUtf8 { .. }
diff --git a/src/meta-srv/src/metrics.rs b/src/meta-srv/src/metrics.rs
index 0cb93e47b4a3..98e0d62d3c9f 100644
--- a/src/meta-srv/src/metrics.rs
+++ b/src/meta-srv/src/metrics.rs
@@ -19,7 +19,4 @@ pub(crate) const METRIC_META_ROUTE_REQUEST: &str = "meta.route_request";
pub(crate) const METRIC_META_HEARTBEAT_CONNECTION_NUM: &str = "meta.heartbeat_connection_num";
pub(crate) const METRIC_META_HANDLER_EXECUTE: &str = "meta.handler_execute";
-pub(crate) const METRIC_META_CREATE_TABLE_PROCEDURE_CREATE_META: &str =
- "meta.procedure.create_table.create_meta";
-pub(crate) const METRIC_META_CREATE_TABLE_PROCEDURE_CREATE_TABLE: &str =
- "meta.procedure.create_table.create_table";
+pub(crate) const METRIC_META_PROCEDURE_CREATE_TABLE: &str = "meta.procedure.create_table";
diff --git a/src/meta-srv/src/procedure/create_table.rs b/src/meta-srv/src/procedure/create_table.rs
index 01d1813728ea..1a4713d62815 100644
--- a/src/meta-srv/src/procedure/create_table.rs
+++ b/src/meta-srv/src/procedure/create_table.rs
@@ -12,8 +12,13 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+use api::v1::region::region_request::Body as PbRegionRequest;
+use api::v1::region::{ColumnDef, CreateRequest as PbCreateRegionRequest};
+use api::v1::SemanticType;
use async_trait::async_trait;
+use client::region::RegionRequester;
use client::Database;
+use common_catalog::consts::MITO2_ENGINE;
use common_error::ext::ErrorExt;
use common_error::status_code::StatusCode;
use common_meta::key::table_name::TableNameKey;
@@ -25,13 +30,16 @@ use common_procedure::{Context as ProcedureContext, LockKey, Procedure, Status};
use common_telemetry::info;
use futures::future::join_all;
use serde::{Deserialize, Serialize};
-use snafu::{ensure, ResultExt};
-use table::engine::TableReference;
+use snafu::{ensure, OptionExt, ResultExt};
+use store_api::storage::RegionId;
+use strum::AsRefStr;
+use table::engine::{region_dir, TableReference};
use table::metadata::{RawTableInfo, TableId};
use super::utils::{handle_request_datanode_error, handle_retry_error};
use crate::ddl::DdlContext;
-use crate::error::{self, Result, TableMetadataManagerSnafu};
+use crate::error::{self, PrimaryKeyNotFoundSnafu, Result, TableMetadataManagerSnafu};
+use crate::metrics;
pub struct CreateTableProcedure {
context: DdlContext,
@@ -69,6 +77,10 @@ impl CreateTableProcedure {
&self.creator.data.task.table_info
}
+ fn table_id(&self) -> TableId {
+ self.table_info().ident.table_id
+ }
+
pub fn region_routes(&self) -> &Vec<RegionRoute> {
&self.creator.data.region_routes
}
@@ -99,17 +111,126 @@ impl CreateTableProcedure {
return Ok(Status::Done);
}
- self.creator.data.state = CreateTableState::DatanodeCreateTable;
+ self.creator.data.state = if expr.engine == MITO2_ENGINE {
+ CreateTableState::DatanodeCreateRegions
+ } else {
+ CreateTableState::DatanodeCreateTable
+ };
Ok(Status::executing(true))
}
- async fn on_create_metadata(&self) -> Result<Status> {
- let _timer = common_telemetry::timer!(
- crate::metrics::METRIC_META_CREATE_TABLE_PROCEDURE_CREATE_META
- );
+ fn create_region_request_template(&self) -> Result<PbCreateRegionRequest> {
+ let create_table_expr = &self.creator.data.task.create_table;
+
+ let column_defs = create_table_expr
+ .column_defs
+ .iter()
+ .enumerate()
+ .map(|(i, c)| {
+ let semantic_type = if create_table_expr.time_index == c.name {
+ SemanticType::Timestamp
+ } else if create_table_expr.primary_keys.contains(&c.name) {
+ SemanticType::Tag
+ } else {
+ SemanticType::Field
+ };
+
+ ColumnDef {
+ name: c.name.clone(),
+ column_id: i as u32,
+ datatype: c.datatype,
+ is_nullable: c.is_nullable,
+ default_constraint: c.default_constraint.clone(),
+ semantic_type: semantic_type as i32,
+ }
+ })
+ .collect::<Vec<_>>();
+
+ let primary_key = create_table_expr
+ .primary_keys
+ .iter()
+ .map(|key| {
+ column_defs
+ .iter()
+ .find_map(|c| {
+ if &c.name == key {
+ Some(c.column_id)
+ } else {
+ None
+ }
+ })
+ .context(PrimaryKeyNotFoundSnafu { key })
+ })
+ .collect::<Result<_>>()?;
+
+ Ok(PbCreateRegionRequest {
+ region_id: 0,
+ engine: create_table_expr.engine.to_string(),
+ column_defs,
+ primary_key,
+ create_if_not_exists: true,
+ region_dir: "".to_string(),
+ options: create_table_expr.table_options.clone(),
+ })
+ }
+
+ async fn on_datanode_create_regions(&mut self) -> Result<Status> {
+ let create_table_data = &self.creator.data;
+ let region_routes = &create_table_data.region_routes;
+
+ let create_table_expr = &create_table_data.task.create_table;
+ let catalog = &create_table_expr.catalog_name;
+ let schema = &create_table_expr.schema_name;
+
+ let request_template = self.create_region_request_template()?;
+
+ let leaders = find_leaders(region_routes);
+ let mut create_table_tasks = Vec::with_capacity(leaders.len());
+
+ for datanode in leaders {
+ let clients = self.context.datanode_clients.clone();
+
+ let regions = find_leader_regions(region_routes, &datanode);
+ let requests = regions
+ .iter()
+ .map(|region_number| {
+ let region_id = RegionId::new(self.table_id(), *region_number);
+
+ let mut create_table_request = request_template.clone();
+ create_table_request.region_id = region_id.as_u64();
+ create_table_request.region_dir = region_dir(catalog, schema, region_id);
+
+ PbRegionRequest::Create(create_table_request)
+ })
+ .collect::<Vec<_>>();
+
+ create_table_tasks.push(common_runtime::spawn_bg(async move {
+ for request in requests {
+ let client = clients.get_client(&datanode).await;
+ let requester = RegionRequester::new(client);
+
+ if let Err(err) = requester.handle(request).await {
+ return Err(handle_request_datanode_error(datanode)(err));
+ }
+ }
+ Ok(())
+ }));
+ }
+
+ join_all(create_table_tasks)
+ .await
+ .into_iter()
+ .map(|e| e.context(error::JoinSnafu).flatten())
+ .collect::<Result<Vec<_>>>()?;
+
+ self.creator.data.state = CreateTableState::CreateMetadata;
+
+ Ok(Status::executing(true))
+ }
- let table_id = self.table_info().ident.table_id as TableId;
+ async fn on_create_metadata(&self) -> Result<Status> {
+ let table_id = self.table_id();
let manager = &self.context.table_metadata_manager;
let raw_table_info = self.table_info().clone();
@@ -124,15 +245,12 @@ impl CreateTableProcedure {
}
async fn on_datanode_create_table(&mut self) -> Result<Status> {
- let _timer = common_telemetry::timer!(
- crate::metrics::METRIC_META_CREATE_TABLE_PROCEDURE_CREATE_TABLE
- );
let region_routes = &self.creator.data.region_routes;
let table_name = self.table_name();
let clients = self.context.datanode_clients.clone();
let leaders = find_leaders(region_routes);
let mut joins = Vec::with_capacity(leaders.len());
- let table_id = self.table_info().ident.table_id;
+ let table_id = self.table_id();
for datanode in leaders {
let client = clients.get_client(&datanode).await;
@@ -172,9 +290,17 @@ impl Procedure for CreateTableProcedure {
}
async fn execute(&mut self, _ctx: &ProcedureContext) -> ProcedureResult<Status> {
- match self.creator.data.state {
+ let state = &self.creator.data.state;
+
+ let _timer = common_telemetry::timer!(
+ metrics::METRIC_META_PROCEDURE_CREATE_TABLE,
+ &[("step", state.as_ref().to_string())]
+ );
+
+ match state {
CreateTableState::Prepare => self.on_prepare().await,
CreateTableState::DatanodeCreateTable => self.on_datanode_create_table().await,
+ CreateTableState::DatanodeCreateRegions => self.on_datanode_create_regions().await,
CreateTableState::CreateMetadata => self.on_create_metadata().await,
}
.map_err(handle_retry_error)
@@ -213,12 +339,14 @@ impl TableCreator {
}
}
-#[derive(Debug, Clone, Serialize, Deserialize)]
+#[derive(Debug, Clone, Serialize, Deserialize, AsRefStr)]
enum CreateTableState {
/// Prepares to create the table
Prepare,
/// Datanode creates the table
DatanodeCreateTable,
+ /// Create regions on the Datanode
+ DatanodeCreateRegions,
/// Creates metadata
CreateMetadata,
}
@@ -236,3 +364,323 @@ impl CreateTableData {
self.task.table_ref()
}
}
+
+#[cfg(test)]
+mod test {
+ use std::collections::{HashMap, HashSet};
+ use std::sync::{Arc, Mutex};
+
+ use api::v1::region::region_server::RegionServer;
+ use api::v1::region::RegionResponse;
+ use api::v1::{
+ ColumnDataType, ColumnDef as PbColumnDef, CreateTableExpr, ResponseHeader,
+ Status as PbStatus,
+ };
+ use chrono::DateTime;
+ use client::client_manager::DatanodeClients;
+ use client::Client;
+ use common_grpc::channel_manager::ChannelManager;
+ use common_meta::key::TableMetadataManager;
+ use common_meta::peer::Peer;
+ use common_runtime::{Builder as RuntimeBuilder, Runtime};
+ use datatypes::prelude::ConcreteDataType;
+ use datatypes::schema::{ColumnSchema, RawSchema};
+ use servers::grpc::region_server::{RegionServerHandler, RegionServerRequestHandler};
+ use table::metadata::{RawTableMeta, TableIdent, TableType};
+ use table::requests::TableOptions;
+ use tokio::sync::mpsc;
+ use tonic::transport::Server;
+ use tower::service_fn;
+
+ use super::*;
+ use crate::handler::{HeartbeatMailbox, Pushers};
+ use crate::sequence::Sequence;
+ use crate::service::store::kv::KvBackendAdapter;
+ use crate::service::store::memory::MemStore;
+ use crate::test_util::new_region_route;
+
+ fn create_table_procedure() -> CreateTableProcedure {
+ let create_table_expr = CreateTableExpr {
+ catalog_name: "my_catalog".to_string(),
+ schema_name: "my_schema".to_string(),
+ table_name: "my_table".to_string(),
+ desc: "blabla".to_string(),
+ column_defs: vec![
+ PbColumnDef {
+ name: "ts".to_string(),
+ datatype: ColumnDataType::TimestampMillisecond as i32,
+ is_nullable: false,
+ default_constraint: vec![],
+ },
+ PbColumnDef {
+ name: "my_tag1".to_string(),
+ datatype: ColumnDataType::String as i32,
+ is_nullable: true,
+ default_constraint: vec![],
+ },
+ PbColumnDef {
+ name: "my_tag2".to_string(),
+ datatype: ColumnDataType::String as i32,
+ is_nullable: true,
+ default_constraint: vec![],
+ },
+ PbColumnDef {
+ name: "my_field_column".to_string(),
+ datatype: ColumnDataType::Int32 as i32,
+ is_nullable: true,
+ default_constraint: vec![],
+ },
+ ],
+ time_index: "ts".to_string(),
+ primary_keys: vec!["my_tag2".to_string(), "my_tag1".to_string()],
+ create_if_not_exists: false,
+ table_options: HashMap::new(),
+ table_id: None,
+ region_numbers: vec![1, 2, 3],
+ engine: MITO2_ENGINE.to_string(),
+ };
+
+ let raw_table_info = RawTableInfo {
+ ident: TableIdent::new(42),
+ name: "my_table".to_string(),
+ desc: Some("blabla".to_string()),
+ catalog_name: "my_catalog".to_string(),
+ schema_name: "my_schema".to_string(),
+ meta: RawTableMeta {
+ schema: RawSchema {
+ column_schemas: vec![
+ ColumnSchema::new(
+ "ts".to_string(),
+ ConcreteDataType::timestamp_millisecond_datatype(),
+ false,
+ ),
+ ColumnSchema::new(
+ "my_tag1".to_string(),
+ ConcreteDataType::string_datatype(),
+ true,
+ ),
+ ColumnSchema::new(
+ "my_tag2".to_string(),
+ ConcreteDataType::string_datatype(),
+ true,
+ ),
+ ColumnSchema::new(
+ "my_field_column".to_string(),
+ ConcreteDataType::int32_datatype(),
+ true,
+ ),
+ ],
+ timestamp_index: Some(0),
+ version: 0,
+ },
+ primary_key_indices: vec![1, 2],
+ value_indices: vec![2],
+ engine: MITO2_ENGINE.to_string(),
+ next_column_id: 3,
+ region_numbers: vec![1, 2, 3],
+ engine_options: HashMap::new(),
+ options: TableOptions::default(),
+ created_on: DateTime::default(),
+ partition_key_indices: vec![],
+ },
+ table_type: TableType::Base,
+ };
+
+ let peers = vec![
+ Peer::new(1, "127.0.0.1:4001"),
+ Peer::new(2, "127.0.0.1:4002"),
+ Peer::new(3, "127.0.0.1:4003"),
+ ];
+ let region_routes = vec![
+ new_region_route(1, &peers, 3),
+ new_region_route(2, &peers, 2),
+ new_region_route(3, &peers, 1),
+ ];
+
+ let kv_store = Arc::new(MemStore::new());
+
+ let mailbox_sequence = Sequence::new("test_heartbeat_mailbox", 0, 100, kv_store.clone());
+ let mailbox = HeartbeatMailbox::create(Pushers::default(), mailbox_sequence);
+
+ CreateTableProcedure::new(
+ 1,
+ CreateTableTask::new(create_table_expr, vec![], raw_table_info),
+ region_routes,
+ DdlContext {
+ datanode_clients: Arc::new(DatanodeClients::default()),
+ mailbox,
+ server_addr: "127.0.0.1:4321".to_string(),
+ table_metadata_manager: Arc::new(TableMetadataManager::new(
+ KvBackendAdapter::wrap(kv_store),
+ )),
+ },
+ )
+ }
+
+ #[test]
+ fn test_create_region_request_template() {
+ let procedure = create_table_procedure();
+
+ let template = procedure.create_region_request_template().unwrap();
+
+ let expected = PbCreateRegionRequest {
+ region_id: 0,
+ engine: MITO2_ENGINE.to_string(),
+ column_defs: vec![
+ ColumnDef {
+ name: "ts".to_string(),
+ column_id: 0,
+ datatype: ColumnDataType::TimestampMillisecond as i32,
+ is_nullable: false,
+ default_constraint: vec![],
+ semantic_type: SemanticType::Timestamp as i32,
+ },
+ ColumnDef {
+ name: "my_tag1".to_string(),
+ column_id: 1,
+ datatype: ColumnDataType::String as i32,
+ is_nullable: true,
+ default_constraint: vec![],
+ semantic_type: SemanticType::Tag as i32,
+ },
+ ColumnDef {
+ name: "my_tag2".to_string(),
+ column_id: 2,
+ datatype: ColumnDataType::String as i32,
+ is_nullable: true,
+ default_constraint: vec![],
+ semantic_type: SemanticType::Tag as i32,
+ },
+ ColumnDef {
+ name: "my_field_column".to_string(),
+ column_id: 3,
+ datatype: ColumnDataType::Int32 as i32,
+ is_nullable: true,
+ default_constraint: vec![],
+ semantic_type: SemanticType::Field as i32,
+ },
+ ],
+ primary_key: vec![2, 1],
+ create_if_not_exists: true,
+ region_dir: "".to_string(),
+ options: HashMap::new(),
+ };
+ assert_eq!(template, expected);
+ }
+
+ #[derive(Clone)]
+ struct TestingRegionServerHandler {
+ runtime: Arc<Runtime>,
+ create_region_notifier: mpsc::Sender<RegionId>,
+ }
+
+ impl TestingRegionServerHandler {
+ fn new(create_region_notifier: mpsc::Sender<RegionId>) -> Self {
+ Self {
+ runtime: Arc::new(RuntimeBuilder::default().worker_threads(2).build().unwrap()),
+ create_region_notifier,
+ }
+ }
+
+ fn new_client(&self, datanode: &Peer) -> Client {
+ let (client, server) = tokio::io::duplex(1024);
+
+ let handler =
+ RegionServerRequestHandler::new(Arc::new(self.clone()), self.runtime.clone());
+
+ tokio::spawn(async move {
+ Server::builder()
+ .add_service(RegionServer::new(handler))
+ .serve_with_incoming(futures::stream::iter(vec![Ok::<_, std::io::Error>(
+ server,
+ )]))
+ .await
+ });
+
+ let channel_manager = ChannelManager::new();
+ let mut client = Some(client);
+ channel_manager
+ .reset_with_connector(
+ datanode.addr.clone(),
+ service_fn(move |_| {
+ let client = client.take().unwrap();
+ async move { Ok::<_, std::io::Error>(client) }
+ }),
+ )
+ .unwrap();
+ Client::with_manager_and_urls(channel_manager, vec![datanode.addr.clone()])
+ }
+ }
+
+ #[async_trait]
+ impl RegionServerHandler for TestingRegionServerHandler {
+ async fn handle(&self, request: PbRegionRequest) -> servers::error::Result<RegionResponse> {
+ let PbRegionRequest::Create(request) = request else {
+ unreachable!()
+ };
+ let region_id = request.region_id.into();
+
+ self.create_region_notifier.send(region_id).await.unwrap();
+
+ Ok(RegionResponse {
+ header: Some(ResponseHeader {
+ status: Some(PbStatus {
+ status_code: 0,
+ err_msg: "".to_string(),
+ }),
+ }),
+ affected_rows: 0,
+ })
+ }
+ }
+
+ #[tokio::test]
+ async fn test_on_datanode_create_regions() {
+ let mut procedure = create_table_procedure();
+
+ let (tx, mut rx) = mpsc::channel(10);
+
+ let region_server = TestingRegionServerHandler::new(tx);
+
+ let datanodes = find_leaders(&procedure.creator.data.region_routes);
+ for peer in datanodes {
+ let client = region_server.new_client(&peer);
+ procedure
+ .context
+ .datanode_clients
+ .insert_client(peer, client)
+ .await;
+ }
+
+ let expected_created_regions = Arc::new(Mutex::new(HashSet::from([
+ RegionId::new(42, 1),
+ RegionId::new(42, 2),
+ RegionId::new(42, 3),
+ ])));
+ let handle = tokio::spawn({
+ let expected_created_regions = expected_created_regions.clone();
+ let mut max_recv = expected_created_regions.lock().unwrap().len();
+ async move {
+ while let Some(region_id) = rx.recv().await {
+ expected_created_regions.lock().unwrap().remove(®ion_id);
+
+ max_recv -= 1;
+ if max_recv == 0 {
+ break;
+ }
+ }
+ }
+ });
+
+ let status = procedure.on_datanode_create_regions().await.unwrap();
+ assert!(matches!(status, Status::Executing { persist: true }));
+ assert!(matches!(
+ procedure.creator.data.state,
+ CreateTableState::CreateMetadata
+ ));
+
+ handle.await.unwrap();
+
+ assert!(expected_created_regions.lock().unwrap().is_empty());
+ }
+}
diff --git a/src/meta-srv/src/procedure/region_failover/update_metadata.rs b/src/meta-srv/src/procedure/region_failover/update_metadata.rs
index 28922b6a7b4d..280e3c77d36e 100644
--- a/src/meta-srv/src/procedure/region_failover/update_metadata.rs
+++ b/src/meta-srv/src/procedure/region_failover/update_metadata.rs
@@ -158,7 +158,7 @@ mod tests {
use super::super::tests::{TestingEnv, TestingEnvBuilder};
use super::{State, *};
- use crate::table_routes::tests::new_region_route;
+ use crate::test_util::new_region_route;
#[tokio::test]
async fn test_next_state() {
diff --git a/src/meta-srv/src/table_routes.rs b/src/meta-srv/src/table_routes.rs
index 93aff898cc74..403ebadaf4d4 100644
--- a/src/meta-srv/src/table_routes.rs
+++ b/src/meta-srv/src/table_routes.rs
@@ -72,7 +72,7 @@ pub(crate) async fn fetch_tables(
#[cfg(test)]
pub(crate) mod tests {
- use std::collections::{BTreeMap, HashMap};
+ use std::collections::HashMap;
use chrono::DateTime;
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, MITO_ENGINE};
@@ -141,25 +141,4 @@ pub(crate) mod tests {
.await
.unwrap();
}
-
- pub(crate) fn new_region_route(
- region_number: u64,
- peers: &[Peer],
- leader_node: u64,
- ) -> RegionRoute {
- let region = Region {
- id: region_number.into(),
- name: "".to_string(),
- partition: None,
- attrs: BTreeMap::new(),
- };
-
- let leader_peer = peers.iter().find(|peer| peer.id == leader_node).cloned();
-
- RegionRoute {
- region,
- leader_peer,
- follower_peers: vec![],
- }
- }
}
diff --git a/src/meta-srv/src/test_util.rs b/src/meta-srv/src/test_util.rs
index e040fc28bff3..d364b042bc32 100644
--- a/src/meta-srv/src/test_util.rs
+++ b/src/meta-srv/src/test_util.rs
@@ -15,6 +15,8 @@
use std::sync::Arc;
use common_meta::key::TableMetadataManager;
+use common_meta::peer::Peer;
+use common_meta::rpc::router::{Region, RegionRoute};
use common_procedure::local::{LocalManager, ManagerConfig};
use crate::cluster::MetaPeerClientBuilder;
@@ -28,6 +30,21 @@ use crate::sequence::Sequence;
use crate::service::store::kv::KvBackendAdapter;
use crate::service::store::memory::MemStore;
+pub(crate) fn new_region_route(region_id: u64, peers: &[Peer], leader_node: u64) -> RegionRoute {
+ let region = Region {
+ id: region_id.into(),
+ ..Default::default()
+ };
+
+ let leader_peer = peers.iter().find(|peer| peer.id == leader_node).cloned();
+
+ RegionRoute {
+ region,
+ leader_peer,
+ follower_peers: vec![],
+ }
+}
+
pub(crate) fn create_region_failover_manager() -> Arc<RegionFailoverManager> {
let kv_store = Arc::new(MemStore::new());
diff --git a/src/mito2/Cargo.toml b/src/mito2/Cargo.toml
index 48aa8ade1734..0b9f1c885781 100644
--- a/src/mito2/Cargo.toml
+++ b/src/mito2/Cargo.toml
@@ -49,7 +49,7 @@ serde_json = "1.0"
snafu.workspace = true
storage = { workspace = true }
store-api = { workspace = true }
-strum = "0.21"
+strum.workspace = true
table = { workspace = true }
tokio-util.workspace = true
tokio.workspace = true
diff --git a/src/servers/Cargo.toml b/src/servers/Cargo.toml
index 7ad63ea38aa2..d7fd06c4f9b7 100644
--- a/src/servers/Cargo.toml
+++ b/src/servers/Cargo.toml
@@ -83,7 +83,7 @@ sha1 = "0.10"
snafu.workspace = true
snap = "1"
sql = { workspace = true }
-strum = { version = "0.24", features = ["derive"] }
+strum.workspace = true
table = { workspace = true }
tokio-rustls = "0.24"
tokio-stream = { version = "0.1", features = ["net"] }
diff --git a/src/servers/src/grpc.rs b/src/servers/src/grpc.rs
index 1e71b15b71dd..ff0ff5173a3f 100644
--- a/src/servers/src/grpc.rs
+++ b/src/servers/src/grpc.rs
@@ -88,9 +88,8 @@ impl GrpcServer {
) -> Self {
let database_handler =
GreptimeRequestHandler::new(query_handler, user_provider.clone(), runtime.clone());
- let region_server_handler = region_server_handler.map(|handler| {
- RegionServerRequestHandler::new(handler, user_provider.clone(), runtime.clone())
- });
+ let region_server_handler =
+ region_server_handler.map(|handler| RegionServerRequestHandler::new(handler, runtime));
Self {
shutdown_tx: Mutex::new(None),
user_provider,
diff --git a/src/servers/src/grpc/greptime_handler.rs b/src/servers/src/grpc/greptime_handler.rs
index 873a6293fb05..d2c1463110e7 100644
--- a/src/servers/src/grpc/greptime_handler.rs
+++ b/src/servers/src/grpc/greptime_handler.rs
@@ -71,7 +71,7 @@ impl GreptimeRequestHandler {
query_ctx.set_current_user(user_info);
let handler = self.handler.clone();
- let request_type = request_type(&query);
+ let request_type = request_type(&query).to_string();
let db = query_ctx.get_db_string();
let timer = RequestTimer::new(db.clone(), request_type);
@@ -180,13 +180,13 @@ pub(crate) fn create_query_context(header: Option<&RequestHeader>) -> QueryConte
pub(crate) struct RequestTimer {
start: Instant,
db: String,
- request_type: &'static str,
+ request_type: String,
status_code: StatusCode,
}
impl RequestTimer {
/// Returns a new timer.
- pub fn new(db: String, request_type: &'static str) -> RequestTimer {
+ pub fn new(db: String, request_type: String) -> RequestTimer {
RequestTimer {
start: Instant::now(),
db,
@@ -208,7 +208,7 @@ impl Drop for RequestTimer {
self.start.elapsed(),
&[
(METRIC_DB_LABEL, std::mem::take(&mut self.db)),
- (METRIC_TYPE_LABEL, self.request_type.to_string()),
+ (METRIC_TYPE_LABEL, std::mem::take(&mut self.request_type)),
(METRIC_CODE_LABEL, self.status_code.to_string())
]
);
diff --git a/src/servers/src/grpc/region_server.rs b/src/servers/src/grpc/region_server.rs
index 1bcccc6c3083..a93686b026aa 100644
--- a/src/servers/src/grpc/region_server.rs
+++ b/src/servers/src/grpc/region_server.rs
@@ -14,30 +14,17 @@
use std::sync::Arc;
-use api::helper::region_request_type;
-use api::v1::auth_header::AuthScheme;
use api::v1::region::region_server::Region as RegionServer;
use api::v1::region::{region_request, RegionRequest, RegionResponse};
-use api::v1::{Basic, RequestHeader};
use async_trait::async_trait;
-use auth::{Identity, Password, UserInfoRef, UserProviderRef};
-use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
-use common_catalog::parse_catalog_and_schema_from_db_string;
use common_error::ext::ErrorExt;
use common_runtime::Runtime;
use common_telemetry::{debug, error};
-use metrics::increment_counter;
-use session::context::{QueryContextBuilder, QueryContextRef};
use snafu::{OptionExt, ResultExt};
use tonic::{Request, Response};
-use crate::error::{
- AuthSnafu, InvalidQuerySnafu, JoinTaskSnafu, NotFoundAuthHeaderSnafu, Result,
- UnsupportedAuthSchemeSnafu,
-};
-use crate::grpc::greptime_handler::RequestTimer;
+use crate::error::{InvalidQuerySnafu, JoinTaskSnafu, Result};
use crate::grpc::TonicResult;
-use crate::metrics::{METRIC_AUTH_FAILURE, METRIC_CODE_LABEL};
#[async_trait]
pub trait RegionServerHandler: Send + Sync {
@@ -49,21 +36,12 @@ pub type RegionServerHandlerRef = Arc<dyn RegionServerHandler>;
#[derive(Clone)]
pub struct RegionServerRequestHandler {
handler: Arc<dyn RegionServerHandler>,
- user_provider: Option<UserProviderRef>,
runtime: Arc<Runtime>,
}
impl RegionServerRequestHandler {
- pub fn new(
- handler: Arc<dyn RegionServerHandler>,
- user_provider: Option<UserProviderRef>,
- runtime: Arc<Runtime>,
- ) -> Self {
- Self {
- handler,
- user_provider,
- runtime,
- }
+ pub fn new(handler: Arc<dyn RegionServerHandler>, runtime: Arc<Runtime>) -> Self {
+ Self { handler, runtime }
}
async fn handle(&self, request: RegionRequest) -> Result<RegionResponse> {
@@ -71,15 +49,7 @@ impl RegionServerRequestHandler {
reason: "Expecting non-empty GreptimeRequest.",
})?;
- let header = request.header.as_ref();
- let query_ctx = create_query_context(header);
- let user_info = self.auth(header, &query_ctx).await?;
- query_ctx.set_current_user(user_info);
-
let handler = self.handler.clone();
- let request_type = region_request_type(&query);
- let db = query_ctx.get_db_string();
- let timer = RequestTimer::new(db.clone(), request_type);
// Executes requests in another runtime to
// 1. prevent the execution from being cancelled unexpected by Tonic runtime;
@@ -100,87 +70,10 @@ impl RegionServerRequestHandler {
})
});
- handle.await.context(JoinTaskSnafu).map_err(|e| {
- timer.record(e.status_code());
- e
- })?
- }
-
- async fn auth(
- &self,
- header: Option<&RequestHeader>,
- query_ctx: &QueryContextRef,
- ) -> Result<Option<UserInfoRef>> {
- let Some(user_provider) = self.user_provider.as_ref() else {
- return Ok(None);
- };
-
- let auth_scheme = header
- .and_then(|header| {
- header
- .authorization
- .as_ref()
- .and_then(|x| x.auth_scheme.clone())
- })
- .context(NotFoundAuthHeaderSnafu)?;
-
- match auth_scheme {
- AuthScheme::Basic(Basic { username, password }) => user_provider
- .auth(
- Identity::UserId(&username, None),
- Password::PlainText(password.into()),
- query_ctx.current_catalog(),
- query_ctx.current_schema(),
- )
- .await
- .context(AuthSnafu),
- AuthScheme::Token(_) => UnsupportedAuthSchemeSnafu {
- name: "Token AuthScheme".to_string(),
- }
- .fail(),
- }
- .map(Some)
- .map_err(|e| {
- increment_counter!(
- METRIC_AUTH_FAILURE,
- &[(METRIC_CODE_LABEL, format!("{}", e.status_code()))]
- );
- e
- })
+ handle.await.context(JoinTaskSnafu)?
}
}
-pub(crate) fn create_query_context(header: Option<&RequestHeader>) -> QueryContextRef {
- let (catalog, schema) = header
- .map(|header| {
- // We provide dbname field in newer versions of protos/sdks
- // parse dbname from header in priority
- if !header.dbname.is_empty() {
- parse_catalog_and_schema_from_db_string(&header.dbname)
- } else {
- (
- if !header.catalog.is_empty() {
- &header.catalog
- } else {
- DEFAULT_CATALOG_NAME
- },
- if !header.schema.is_empty() {
- &header.schema
- } else {
- DEFAULT_SCHEMA_NAME
- },
- )
- }
- })
- .unwrap_or((DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME));
-
- QueryContextBuilder::default()
- .current_catalog(catalog.to_string())
- .current_schema(schema.to_string())
- .try_trace_id(header.and_then(|h: &RequestHeader| h.trace_id))
- .build()
-}
-
#[async_trait]
impl RegionServer for RegionServerRequestHandler {
async fn handle(
diff --git a/src/sql/src/parsers/create_parser.rs b/src/sql/src/parsers/create_parser.rs
index 528ebe0e0fdf..f9cc7ce953cb 100644
--- a/src/sql/src/parsers/create_parser.rs
+++ b/src/sql/src/parsers/create_parser.rs
@@ -14,6 +14,7 @@
use std::cmp::Ordering;
+use common_catalog::consts::default_engine;
use itertools::Itertools;
use once_cell::sync::Lazy;
use snafu::{ensure, OptionExt, ResultExt};
@@ -143,7 +144,7 @@ impl<'a> ParserContext<'a> {
let partitions = self.parse_partitions()?;
- let engine = self.parse_table_engine(common_catalog::consts::MITO_ENGINE)?;
+ let engine = self.parse_table_engine(default_engine())?;
let options = self
.parser
.parse_options(Keyword::WITH)
diff --git a/src/table/src/engine.rs b/src/table/src/engine.rs
index f4937bfb60a8..7d3fb5809f1a 100644
--- a/src/table/src/engine.rs
+++ b/src/table/src/engine.rs
@@ -18,7 +18,7 @@ use std::sync::Arc;
use common_base::paths::DATA_DIR;
use common_procedure::BoxedProcedure;
use datafusion_common::TableReference as DfTableReference;
-use store_api::storage::RegionNumber;
+use store_api::storage::{RegionId, RegionNumber};
use crate::error::{self, Result};
use crate::metadata::TableId;
@@ -198,6 +198,14 @@ pub fn table_dir(catalog_name: &str, schema_name: &str, table_id: TableId) -> St
format!("{DATA_DIR}{catalog_name}/{schema_name}/{table_id}/")
}
+pub fn region_dir(catalog_name: &str, schema_name: &str, region_id: RegionId) -> String {
+ format!(
+ "{}{}",
+ table_dir(catalog_name, schema_name, region_id.table_id()),
+ region_name(region_id.table_id(), region_id.region_number())
+ )
+}
+
#[cfg(test)]
mod tests {
use super::*;
@@ -212,4 +220,13 @@ mod tests {
assert_eq!("greptime.public.test", table_ref.to_string());
}
+
+ #[test]
+ fn test_region_dir() {
+ let region_id = RegionId::new(42, 1);
+ assert_eq!(
+ region_dir("my_catalog", "my_schema", region_id),
+ "data/my_catalog/my_schema/42/42_0000000001"
+ );
+ }
}
|
feat
|
create distributed Mito2 table (#2246)
|
13ed10556a78278dd71ed1880599c6cf6f8bc5d3
|
2024-12-30 14:06:08
|
zyy17
|
refactor: support to convert time string to timestamp in `convert_value()` (#5242)
| false
|
diff --git a/src/servers/src/mysql/helper.rs b/src/servers/src/mysql/helper.rs
index e18053a9b793..c933fcc8626c 100644
--- a/src/servers/src/mysql/helper.rs
+++ b/src/servers/src/mysql/helper.rs
@@ -17,7 +17,9 @@ use std::time::Duration;
use chrono::NaiveDate;
use common_query::prelude::ScalarValue;
+use common_time::Timestamp;
use datatypes::prelude::ConcreteDataType;
+use datatypes::types::TimestampType;
use datatypes::value::{self, Value};
use itertools::Itertools;
use opensrv_mysql::{to_naive_datetime, ParamValue, ValueInner};
@@ -161,7 +163,7 @@ pub fn convert_value(param: &ParamValue, t: &ConcreteDataType) -> Result<ScalarV
String::from_utf8_lossy(b).to_string(),
))),
ConcreteDataType::Binary(_) => Ok(ScalarValue::Binary(Some(b.to_vec()))),
-
+ ConcreteDataType::Timestamp(ts_type) => covert_bytes_to_timestamp(b, ts_type),
_ => error::PreparedStmtTypeMismatchSnafu {
expected: t,
actual: param.coltype,
@@ -235,8 +237,41 @@ pub fn convert_expr_to_scalar_value(param: &Expr, t: &ConcreteDataType) -> Resul
}
}
+fn covert_bytes_to_timestamp(bytes: &[u8], ts_type: &TimestampType) -> Result<ScalarValue> {
+ let ts = Timestamp::from_str_utc(&String::from_utf8_lossy(bytes))
+ .map_err(|e| {
+ error::MysqlValueConversionSnafu {
+ err_msg: e.to_string(),
+ }
+ .build()
+ })?
+ .convert_to(ts_type.unit())
+ .ok_or_else(|| {
+ error::MysqlValueConversionSnafu {
+ err_msg: "Overflow when converting timestamp to target unit".to_string(),
+ }
+ .build()
+ })?;
+ match ts_type {
+ TimestampType::Nanosecond(_) => {
+ Ok(ScalarValue::TimestampNanosecond(Some(ts.value()), None))
+ }
+ TimestampType::Microsecond(_) => {
+ Ok(ScalarValue::TimestampMicrosecond(Some(ts.value()), None))
+ }
+ TimestampType::Millisecond(_) => {
+ Ok(ScalarValue::TimestampMillisecond(Some(ts.value()), None))
+ }
+ TimestampType::Second(_) => Ok(ScalarValue::TimestampSecond(Some(ts.value()), None)),
+ }
+}
+
#[cfg(test)]
mod tests {
+ use datatypes::types::{
+ TimestampMicrosecondType, TimestampMillisecondType, TimestampNanosecondType,
+ TimestampSecondType,
+ };
use sql::dialect::MySqlDialect;
use sql::parser::{ParseOptions, ParserContext};
@@ -340,4 +375,87 @@ mod tests {
let v = convert_expr_to_scalar_value(&expr, &t).unwrap();
assert_eq!(ScalarValue::Time64Microsecond(None), v);
}
+
+ #[test]
+ fn test_convert_bytes_to_timestamp() {
+ let test_cases = vec![
+ // input unix timestamp in seconds -> nanosecond.
+ (
+ "2024-12-26 12:00:00",
+ TimestampType::Nanosecond(TimestampNanosecondType),
+ ScalarValue::TimestampNanosecond(Some(1735214400000000000), None),
+ ),
+ // input unix timestamp in seconds -> microsecond.
+ (
+ "2024-12-26 12:00:00",
+ TimestampType::Microsecond(TimestampMicrosecondType),
+ ScalarValue::TimestampMicrosecond(Some(1735214400000000), None),
+ ),
+ // input unix timestamp in seconds -> millisecond.
+ (
+ "2024-12-26 12:00:00",
+ TimestampType::Millisecond(TimestampMillisecondType),
+ ScalarValue::TimestampMillisecond(Some(1735214400000), None),
+ ),
+ // input unix timestamp in seconds -> second.
+ (
+ "2024-12-26 12:00:00",
+ TimestampType::Second(TimestampSecondType),
+ ScalarValue::TimestampSecond(Some(1735214400), None),
+ ),
+ // input unix timestamp in milliseconds -> nanosecond.
+ (
+ "2024-12-26 12:00:00.123",
+ TimestampType::Nanosecond(TimestampNanosecondType),
+ ScalarValue::TimestampNanosecond(Some(1735214400123000000), None),
+ ),
+ // input unix timestamp in milliseconds -> microsecond.
+ (
+ "2024-12-26 12:00:00.123",
+ TimestampType::Microsecond(TimestampMicrosecondType),
+ ScalarValue::TimestampMicrosecond(Some(1735214400123000), None),
+ ),
+ // input unix timestamp in milliseconds -> millisecond.
+ (
+ "2024-12-26 12:00:00.123",
+ TimestampType::Millisecond(TimestampMillisecondType),
+ ScalarValue::TimestampMillisecond(Some(1735214400123), None),
+ ),
+ // input unix timestamp in milliseconds -> second.
+ (
+ "2024-12-26 12:00:00.123",
+ TimestampType::Second(TimestampSecondType),
+ ScalarValue::TimestampSecond(Some(1735214400), None),
+ ),
+ // input unix timestamp in microseconds -> nanosecond.
+ (
+ "2024-12-26 12:00:00.123456",
+ TimestampType::Nanosecond(TimestampNanosecondType),
+ ScalarValue::TimestampNanosecond(Some(1735214400123456000), None),
+ ),
+ // input unix timestamp in microseconds -> microsecond.
+ (
+ "2024-12-26 12:00:00.123456",
+ TimestampType::Microsecond(TimestampMicrosecondType),
+ ScalarValue::TimestampMicrosecond(Some(1735214400123456), None),
+ ),
+ // input unix timestamp in microseconds -> millisecond.
+ (
+ "2024-12-26 12:00:00.123456",
+ TimestampType::Millisecond(TimestampMillisecondType),
+ ScalarValue::TimestampMillisecond(Some(1735214400123), None),
+ ),
+ // input unix timestamp in milliseconds -> second.
+ (
+ "2024-12-26 12:00:00.123456",
+ TimestampType::Second(TimestampSecondType),
+ ScalarValue::TimestampSecond(Some(1735214400), None),
+ ),
+ ];
+
+ for (input, ts_type, expected) in test_cases {
+ let result = covert_bytes_to_timestamp(input.as_bytes(), &ts_type).unwrap();
+ assert_eq!(result, expected);
+ }
+ }
}
|
refactor
|
support to convert time string to timestamp in `convert_value()` (#5242)
|
cde5a36f5e7f97262daea85dcdee201d0c2323de
|
2024-01-18 12:29:48
|
Ruihang Xia
|
feat: precise filter for mito parquet reader (#3178)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index 996469f1db29..435e661236c7 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -1916,6 +1916,7 @@ name = "common-recordbatch"
version = "0.6.0"
dependencies = [
"arc-swap",
+ "common-base",
"common-error",
"common-macro",
"datafusion",
diff --git a/src/common/recordbatch/Cargo.toml b/src/common/recordbatch/Cargo.toml
index ed56a5b5cd96..5425313bdb34 100644
--- a/src/common/recordbatch/Cargo.toml
+++ b/src/common/recordbatch/Cargo.toml
@@ -6,6 +6,7 @@ license.workspace = true
[dependencies]
arc-swap = "1.6"
+common-base.workspace = true
common-error.workspace = true
common-macro.workspace = true
datafusion-common.workspace = true
diff --git a/src/common/recordbatch/src/error.rs b/src/common/recordbatch/src/error.rs
index e5992c37d916..42a2754bb2c5 100644
--- a/src/common/recordbatch/src/error.rs
+++ b/src/common/recordbatch/src/error.rs
@@ -107,6 +107,16 @@ pub enum Error {
location: Location,
source: datatypes::error::Error,
},
+
+ #[snafu(display("Error occurs when performing arrow computation"))]
+ ArrowCompute {
+ #[snafu(source)]
+ error: datatypes::arrow::error::ArrowError,
+ location: Location,
+ },
+
+ #[snafu(display("Unsupported operation: {}", reason))]
+ UnsupportedOperation { reason: String, location: Location },
}
impl ErrorExt for Error {
@@ -120,10 +130,13 @@ impl ErrorExt for Error {
| Error::Format { .. }
| Error::InitRecordbatchStream { .. }
| Error::ColumnNotExists { .. }
- | Error::ProjectArrowRecordBatch { .. } => StatusCode::Internal,
+ | Error::ProjectArrowRecordBatch { .. }
+ | Error::ArrowCompute { .. } => StatusCode::Internal,
Error::External { source, .. } => source.status_code(),
+ Error::UnsupportedOperation { .. } => StatusCode::Unsupported,
+
Error::SchemaConversion { source, .. } | Error::CastVector { source, .. } => {
source.status_code()
}
diff --git a/src/common/recordbatch/src/filter.rs b/src/common/recordbatch/src/filter.rs
new file mode 100644
index 000000000000..3175ace37eaf
--- /dev/null
+++ b/src/common/recordbatch/src/filter.rs
@@ -0,0 +1,258 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! Util record batch stream wrapper that can perform precise filter.
+
+use datafusion::logical_expr::{Expr, Operator};
+use datafusion_common::arrow::array::{ArrayRef, Datum, Scalar};
+use datafusion_common::arrow::buffer::BooleanBuffer;
+use datafusion_common::arrow::compute::kernels::cmp;
+use datafusion_common::ScalarValue;
+use datatypes::vectors::VectorRef;
+use snafu::ResultExt;
+
+use crate::error::{ArrowComputeSnafu, Result, UnsupportedOperationSnafu};
+
+/// An inplace expr evaluator for simple filter. Only support
+/// - `col` `op` `literal`
+/// - `literal` `op` `col`
+///
+/// And the `op` is one of `=`, `!=`, `>`, `>=`, `<`, `<=`.
+///
+/// This struct contains normalized predicate expr. In the form of
+/// `col` `op` `literal` where the `col` is provided from input.
+#[derive(Debug)]
+pub struct SimpleFilterEvaluator {
+ /// Name of the referenced column.
+ column_name: String,
+ /// The literal value.
+ literal: Scalar<ArrayRef>,
+ /// The operator.
+ op: Operator,
+}
+
+impl SimpleFilterEvaluator {
+ pub fn try_new(predicate: &Expr) -> Option<Self> {
+ match predicate {
+ Expr::BinaryExpr(binary) => {
+ // check if the expr is in the supported form
+ match binary.op {
+ Operator::Eq
+ | Operator::NotEq
+ | Operator::Lt
+ | Operator::LtEq
+ | Operator::Gt
+ | Operator::GtEq => {}
+ _ => return None,
+ }
+
+ // swap the expr if it is in the form of `literal` `op` `col`
+ let mut op = binary.op;
+ let (lhs, rhs) = match (&*binary.left, &*binary.right) {
+ (Expr::Column(ref col), Expr::Literal(ref lit)) => (col, lit),
+ (Expr::Literal(ref lit), Expr::Column(ref col)) => {
+ // safety: The previous check ensures the operator is able to swap.
+ op = op.swap().unwrap();
+ (col, lit)
+ }
+ _ => return None,
+ };
+
+ Some(Self {
+ column_name: lhs.name.clone(),
+ literal: rhs.clone().to_scalar(),
+ op,
+ })
+ }
+ _ => None,
+ }
+ }
+
+ /// Get the name of the referenced column.
+ pub fn column_name(&self) -> &str {
+ &self.column_name
+ }
+
+ pub fn evaluate_scalar(&self, input: &ScalarValue) -> Result<bool> {
+ let result = self.evaluate_datum(&input.to_scalar())?;
+ Ok(result.value(0))
+ }
+
+ pub fn evaluate_array(&self, input: &ArrayRef) -> Result<BooleanBuffer> {
+ self.evaluate_datum(input)
+ }
+
+ pub fn evaluate_vector(&self, input: &VectorRef) -> Result<BooleanBuffer> {
+ self.evaluate_datum(&input.to_arrow_array())
+ }
+
+ fn evaluate_datum(&self, input: &impl Datum) -> Result<BooleanBuffer> {
+ let result = match self.op {
+ Operator::Eq => cmp::eq(input, &self.literal),
+ Operator::NotEq => cmp::neq(input, &self.literal),
+ Operator::Lt => cmp::lt(input, &self.literal),
+ Operator::LtEq => cmp::lt_eq(input, &self.literal),
+ Operator::Gt => cmp::gt(input, &self.literal),
+ Operator::GtEq => cmp::gt_eq(input, &self.literal),
+ _ => {
+ return UnsupportedOperationSnafu {
+ reason: format!("{:?}", self.op),
+ }
+ .fail()
+ }
+ };
+ result
+ .context(ArrowComputeSnafu)
+ .map(|array| array.values().clone())
+ }
+}
+
+#[cfg(test)]
+mod test {
+
+ use std::sync::Arc;
+
+ use datafusion::logical_expr::BinaryExpr;
+ use datafusion_common::Column;
+
+ use super::*;
+
+ #[test]
+ fn unsupported_filter_op() {
+ // `+` is not supported
+ let expr = Expr::BinaryExpr(BinaryExpr {
+ left: Box::new(Expr::Column(Column {
+ relation: None,
+ name: "foo".to_string(),
+ })),
+ op: Operator::Plus,
+ right: Box::new(Expr::Literal(ScalarValue::Int64(Some(1)))),
+ });
+ assert!(SimpleFilterEvaluator::try_new(&expr).is_none());
+
+ // two literal is not supported
+ let expr = Expr::BinaryExpr(BinaryExpr {
+ left: Box::new(Expr::Literal(ScalarValue::Int64(Some(1)))),
+ op: Operator::Eq,
+ right: Box::new(Expr::Literal(ScalarValue::Int64(Some(1)))),
+ });
+ assert!(SimpleFilterEvaluator::try_new(&expr).is_none());
+
+ // two column is not supported
+ let expr = Expr::BinaryExpr(BinaryExpr {
+ left: Box::new(Expr::Column(Column {
+ relation: None,
+ name: "foo".to_string(),
+ })),
+ op: Operator::Eq,
+ right: Box::new(Expr::Column(Column {
+ relation: None,
+ name: "bar".to_string(),
+ })),
+ });
+ assert!(SimpleFilterEvaluator::try_new(&expr).is_none());
+
+ // compound expr is not supported
+ let expr = Expr::BinaryExpr(BinaryExpr {
+ left: Box::new(Expr::BinaryExpr(BinaryExpr {
+ left: Box::new(Expr::Column(Column {
+ relation: None,
+ name: "foo".to_string(),
+ })),
+ op: Operator::Eq,
+ right: Box::new(Expr::Literal(ScalarValue::Int64(Some(1)))),
+ })),
+ op: Operator::Eq,
+ right: Box::new(Expr::Literal(ScalarValue::Int64(Some(1)))),
+ });
+ assert!(SimpleFilterEvaluator::try_new(&expr).is_none());
+ }
+
+ #[test]
+ fn supported_filter_op() {
+ // equal
+ let expr = Expr::BinaryExpr(BinaryExpr {
+ left: Box::new(Expr::Column(Column {
+ relation: None,
+ name: "foo".to_string(),
+ })),
+ op: Operator::Eq,
+ right: Box::new(Expr::Literal(ScalarValue::Int64(Some(1)))),
+ });
+ let _ = SimpleFilterEvaluator::try_new(&expr).unwrap();
+
+ // swap operands
+ let expr = Expr::BinaryExpr(BinaryExpr {
+ left: Box::new(Expr::Literal(ScalarValue::Int64(Some(1)))),
+ op: Operator::Lt,
+ right: Box::new(Expr::Column(Column {
+ relation: None,
+ name: "foo".to_string(),
+ })),
+ });
+ let evaluator = SimpleFilterEvaluator::try_new(&expr).unwrap();
+ assert_eq!(evaluator.op, Operator::Gt);
+ assert_eq!(evaluator.column_name, "foo".to_string());
+ }
+
+ #[test]
+ fn run_on_array() {
+ let expr = Expr::BinaryExpr(BinaryExpr {
+ left: Box::new(Expr::Column(Column {
+ relation: None,
+ name: "foo".to_string(),
+ })),
+ op: Operator::Eq,
+ right: Box::new(Expr::Literal(ScalarValue::Int64(Some(1)))),
+ });
+ let evaluator = SimpleFilterEvaluator::try_new(&expr).unwrap();
+
+ let input_1 = Arc::new(datatypes::arrow::array::Int64Array::from(vec![1, 2, 3])) as _;
+ let result = evaluator.evaluate_array(&input_1).unwrap();
+ assert_eq!(result, BooleanBuffer::from(vec![true, false, false]));
+
+ let input_2 = Arc::new(datatypes::arrow::array::Int64Array::from(vec![1, 1, 1])) as _;
+ let result = evaluator.evaluate_array(&input_2).unwrap();
+ assert_eq!(result, BooleanBuffer::from(vec![true, true, true]));
+
+ let input_3 = Arc::new(datatypes::arrow::array::Int64Array::new_null(0)) as _;
+ let result = evaluator.evaluate_array(&input_3).unwrap();
+ assert_eq!(result, BooleanBuffer::from(vec![]));
+ }
+
+ #[test]
+ fn run_on_scalar() {
+ let expr = Expr::BinaryExpr(BinaryExpr {
+ left: Box::new(Expr::Column(Column {
+ relation: None,
+ name: "foo".to_string(),
+ })),
+ op: Operator::Lt,
+ right: Box::new(Expr::Literal(ScalarValue::Int64(Some(1)))),
+ });
+ let evaluator = SimpleFilterEvaluator::try_new(&expr).unwrap();
+
+ let input_1 = ScalarValue::Int64(Some(1));
+ let result = evaluator.evaluate_scalar(&input_1).unwrap();
+ assert!(!result);
+
+ let input_2 = ScalarValue::Int64(Some(0));
+ let result = evaluator.evaluate_scalar(&input_2).unwrap();
+ assert!(result);
+
+ let input_3 = ScalarValue::Int64(None);
+ let result = evaluator.evaluate_scalar(&input_3).unwrap();
+ assert!(!result);
+ }
+}
diff --git a/src/common/recordbatch/src/lib.rs b/src/common/recordbatch/src/lib.rs
index 889046c16624..6e87d3f6c21e 100644
--- a/src/common/recordbatch/src/lib.rs
+++ b/src/common/recordbatch/src/lib.rs
@@ -14,6 +14,7 @@
pub mod adapter;
pub mod error;
+pub mod filter;
mod recordbatch;
pub mod util;
diff --git a/src/mito2/src/engine/prune_test.rs b/src/mito2/src/engine/prune_test.rs
index 29dde1e8b40e..27a66d68c6d2 100644
--- a/src/mito2/src/engine/prune_test.rs
+++ b/src/mito2/src/engine/prune_test.rs
@@ -76,16 +76,11 @@ async fn test_read_parquet_stats() {
+-------+---------+---------------------+
| tag_0 | field_0 | ts |
+-------+---------+---------------------+
-| 0 | 0.0 | 1970-01-01T00:00:00 |
-| 1 | 1.0 | 1970-01-01T00:00:01 |
| 10 | 10.0 | 1970-01-01T00:00:10 |
| 11 | 11.0 | 1970-01-01T00:00:11 |
| 12 | 12.0 | 1970-01-01T00:00:12 |
| 13 | 13.0 | 1970-01-01T00:00:13 |
| 14 | 14.0 | 1970-01-01T00:00:14 |
-| 2 | 2.0 | 1970-01-01T00:00:02 |
-| 3 | 3.0 | 1970-01-01T00:00:03 |
-| 4 | 4.0 | 1970-01-01T00:00:04 |
| 5 | 5.0 | 1970-01-01T00:00:05 |
| 6 | 6.0 | 1970-01-01T00:00:06 |
| 7 | 7.0 | 1970-01-01T00:00:07 |
diff --git a/src/mito2/src/error.rs b/src/mito2/src/error.rs
index 5e1a983b9a2f..b84c78386606 100644
--- a/src/mito2/src/error.rs
+++ b/src/mito2/src/error.rs
@@ -536,6 +536,12 @@ pub enum Error {
error: std::io::Error,
location: Location,
},
+
+ #[snafu(display("Failed to filter record batch"))]
+ FilterRecordBatch {
+ source: common_recordbatch::error::Error,
+ location: Location,
+ },
}
pub type Result<T, E = Error> = std::result::Result<T, E>;
@@ -632,6 +638,7 @@ impl ErrorExt for Error {
CleanDir { .. } => StatusCode::Unexpected,
InvalidConfig { .. } => StatusCode::InvalidArguments,
StaleLogEntry { .. } => StatusCode::Unexpected,
+ FilterRecordBatch { source, .. } => source.status_code(),
Upload { .. } => StatusCode::StorageUnavailable,
}
}
diff --git a/src/mito2/src/metrics.rs b/src/mito2/src/metrics.rs
index 87244d4c3165..27e2c3961317 100644
--- a/src/mito2/src/metrics.rs
+++ b/src/mito2/src/metrics.rs
@@ -124,6 +124,9 @@ lazy_static! {
/// Counter of row groups read.
pub static ref READ_ROW_GROUPS_TOTAL: IntCounterVec =
register_int_counter_vec!("greptime_mito_read_row_groups_total", "mito read row groups total", &[TYPE_LABEL]).unwrap();
+ /// Counter of filtered rows by precise filter.
+ pub static ref PRECISE_FILTER_ROWS_TOTAL: IntCounterVec =
+ register_int_counter_vec!("greptime_mito_precise_filter_rows_total", "mito precise filter rows total", &[TYPE_LABEL]).unwrap();
// ------- End of query metrics.
// Cache related metrics.
diff --git a/src/mito2/src/read.rs b/src/mito2/src/read.rs
index c3ce229780fb..a4b3d8dbe23d 100644
--- a/src/mito2/src/read.rs
+++ b/src/mito2/src/read.rs
@@ -33,7 +33,7 @@ use datatypes::arrow::compute::SortOptions;
use datatypes::arrow::row::{RowConverter, SortField};
use datatypes::prelude::{ConcreteDataType, DataType, ScalarVector};
use datatypes::types::TimestampType;
-use datatypes::value::ValueRef;
+use datatypes::value::{Value, ValueRef};
use datatypes::vectors::{
BooleanVector, Helper, TimestampMicrosecondVector, TimestampMillisecondVector,
TimestampNanosecondVector, TimestampSecondVector, UInt32Vector, UInt64Vector, UInt8Vector,
@@ -58,6 +58,8 @@ use crate::memtable::BoxedBatchIterator;
pub struct Batch {
/// Primary key encoded in a comparable form.
primary_key: Vec<u8>,
+ /// Possibly decoded `primary_key` values. Some places would decode it in advance.
+ pk_values: Option<Vec<Value>>,
/// Timestamps of rows, should be sorted and not null.
timestamps: VectorRef,
/// Sequences of rows
@@ -104,6 +106,22 @@ impl Batch {
&self.primary_key
}
+ /// Returns possibly decoded primary-key values.
+ pub fn pk_values(&self) -> Option<&[Value]> {
+ self.pk_values.as_deref()
+ }
+
+ /// Sets possibly decoded primary-key values.
+ pub fn set_pk_values(&mut self, pk_values: Vec<Value>) {
+ self.pk_values = Some(pk_values);
+ }
+
+ /// Removes possibly decoded primary-key values. For testing only.
+ #[cfg(any(test, feature = "test"))]
+ pub fn remove_pk_values(&mut self) {
+ self.pk_values = None;
+ }
+
/// Returns fields in the batch.
pub fn fields(&self) -> &[BatchColumn] {
&self.fields
@@ -195,6 +213,7 @@ impl Batch {
// Now we need to clone the primary key. We could try `Bytes` if
// this becomes a bottleneck.
primary_key: self.primary_key.clone(),
+ pk_values: self.pk_values.clone(),
timestamps: self.timestamps.slice(offset, length),
sequences: Arc::new(self.sequences.get_slice(offset, length)),
op_types: Arc::new(self.op_types.get_slice(offset, length)),
@@ -653,6 +672,7 @@ impl BatchBuilder {
Ok(Batch {
primary_key: self.primary_key,
+ pk_values: None,
timestamps,
sequences,
op_types,
diff --git a/src/mito2/src/read/projection.rs b/src/mito2/src/read/projection.rs
index 5f4fd67edc3a..c849a2582a8c 100644
--- a/src/mito2/src/read/projection.rs
+++ b/src/mito2/src/read/projection.rs
@@ -177,10 +177,14 @@ impl ProjectionMapper {
// Skips decoding pk if we don't need to output it.
let pk_values = if self.has_tags {
- self.codec
- .decode(batch.primary_key())
- .map_err(BoxedError::new)
- .context(ExternalSnafu)?
+ match batch.pk_values() {
+ Some(v) => v.to_vec(),
+ None => self
+ .codec
+ .decode(batch.primary_key())
+ .map_err(BoxedError::new)
+ .context(ExternalSnafu)?,
+ }
} else {
Vec::new()
};
diff --git a/src/mito2/src/sst/parquet.rs b/src/mito2/src/sst/parquet.rs
index 42c8eff87fbf..d0334690cc1c 100644
--- a/src/mito2/src/sst/parquet.rs
+++ b/src/mito2/src/sst/parquet.rs
@@ -77,6 +77,9 @@ mod tests {
use std::sync::Arc;
use common_time::Timestamp;
+ use datafusion_common::{Column, ScalarValue};
+ use datafusion_expr::{BinaryExpr, Expr, Operator};
+ use table::predicate::Predicate;
use super::*;
use crate::cache::{CacheManager, PageKey};
@@ -283,4 +286,140 @@ mod tests {
offset_index,
);
}
+
+ #[tokio::test]
+ async fn test_read_with_tag_filter() {
+ let mut env = TestEnv::new();
+ let object_store = env.init_object_store_manager();
+ let handle = sst_file_handle(0, 1000);
+ let file_path = handle.file_path(FILE_DIR);
+ let metadata = Arc::new(sst_region_metadata());
+ let source = new_source(&[
+ new_batch_by_range(&["a", "d"], 0, 60),
+ new_batch_by_range(&["b", "f"], 0, 40),
+ new_batch_by_range(&["b", "h"], 100, 200),
+ ]);
+ // Use a small row group size for test.
+ let write_opts = WriteOptions {
+ row_group_size: 50,
+ ..Default::default()
+ };
+ // Prepare data.
+ let mut writer = ParquetWriter::new(
+ file_path,
+ metadata.clone(),
+ object_store.clone(),
+ Indexer::default(),
+ );
+ writer
+ .write_all(source, &write_opts)
+ .await
+ .unwrap()
+ .unwrap();
+
+ // Predicate
+ let predicate = Some(Predicate::new(vec![Expr::BinaryExpr(BinaryExpr {
+ left: Box::new(Expr::Column(Column {
+ relation: None,
+ name: "tag_0".to_string(),
+ })),
+ op: Operator::Eq,
+ right: Box::new(Expr::Literal(ScalarValue::Utf8(Some("a".to_string())))),
+ })
+ .into()]));
+
+ let builder = ParquetReaderBuilder::new(FILE_DIR.to_string(), handle.clone(), object_store)
+ .predicate(predicate);
+ let mut reader = builder.build().await.unwrap();
+ check_reader_result(
+ &mut reader,
+ &[
+ new_batch_by_range(&["a", "d"], 0, 50),
+ new_batch_by_range(&["a", "d"], 50, 60),
+ ],
+ )
+ .await;
+ }
+
+ #[tokio::test]
+ async fn test_read_empty_batch() {
+ let mut env = TestEnv::new();
+ let object_store = env.init_object_store_manager();
+ let handle = sst_file_handle(0, 1000);
+ let file_path = handle.file_path(FILE_DIR);
+ let metadata = Arc::new(sst_region_metadata());
+ let source = new_source(&[
+ new_batch_by_range(&["a", "z"], 0, 0),
+ new_batch_by_range(&["a", "z"], 100, 100),
+ new_batch_by_range(&["a", "z"], 200, 230),
+ ]);
+ // Use a small row group size for test.
+ let write_opts = WriteOptions {
+ row_group_size: 50,
+ ..Default::default()
+ };
+ // Prepare data.
+ let mut writer = ParquetWriter::new(
+ file_path,
+ metadata.clone(),
+ object_store.clone(),
+ Indexer::default(),
+ );
+ writer
+ .write_all(source, &write_opts)
+ .await
+ .unwrap()
+ .unwrap();
+
+ let builder = ParquetReaderBuilder::new(FILE_DIR.to_string(), handle.clone(), object_store);
+ let mut reader = builder.build().await.unwrap();
+ check_reader_result(&mut reader, &[new_batch_by_range(&["a", "z"], 200, 230)]).await;
+ }
+
+ #[tokio::test]
+ async fn test_read_with_field_filter() {
+ let mut env = TestEnv::new();
+ let object_store = env.init_object_store_manager();
+ let handle = sst_file_handle(0, 1000);
+ let file_path = handle.file_path(FILE_DIR);
+ let metadata = Arc::new(sst_region_metadata());
+ let source = new_source(&[
+ new_batch_by_range(&["a", "d"], 0, 60),
+ new_batch_by_range(&["b", "f"], 0, 40),
+ new_batch_by_range(&["b", "h"], 100, 200),
+ ]);
+ // Use a small row group size for test.
+ let write_opts = WriteOptions {
+ row_group_size: 50,
+ ..Default::default()
+ };
+ // Prepare data.
+ let mut writer = ParquetWriter::new(
+ file_path,
+ metadata.clone(),
+ object_store.clone(),
+ Indexer::default(),
+ );
+ writer
+ .write_all(source, &write_opts)
+ .await
+ .unwrap()
+ .unwrap();
+
+ // Predicate
+ let predicate = Some(Predicate::new(vec![Expr::BinaryExpr(BinaryExpr {
+ left: Box::new(Expr::Column(Column {
+ relation: None,
+ name: "field_0".to_string(),
+ })),
+ op: Operator::GtEq,
+ right: Box::new(Expr::Literal(ScalarValue::UInt64(Some(150)))),
+ })
+ .into()]));
+
+ let builder = ParquetReaderBuilder::new(FILE_DIR.to_string(), handle.clone(), object_store)
+ .predicate(predicate);
+ let mut reader = builder.build().await.unwrap();
+ check_reader_result(&mut reader, &[new_batch_by_range(&["b", "h"], 150, 200)]).await;
+ }
}
diff --git a/src/mito2/src/sst/parquet/format.rs b/src/mito2/src/sst/parquet/format.rs
index 46bb00390827..dd083047e07c 100644
--- a/src/mito2/src/sst/parquet/format.rs
+++ b/src/mito2/src/sst/parquet/format.rs
@@ -118,8 +118,14 @@ pub(crate) struct ReadFormat {
metadata: RegionMetadataRef,
/// SST file schema.
arrow_schema: SchemaRef,
- // Field column id to its index in `schema` (SST schema).
+ /// Field column id to its index in `schema` (SST schema).
+ /// In SST schema, fields are stored in the front of the schema.
field_id_to_index: HashMap<ColumnId, usize>,
+ /// Field column id to their index in the projected schema (
+ /// the schema of [Batch]).
+ ///
+ /// This field is set at the first call to [convert_record_batch](Self::convert_record_batch).
+ field_id_to_projected_index: Option<HashMap<ColumnId, usize>>,
}
impl ReadFormat {
@@ -136,6 +142,7 @@ impl ReadFormat {
metadata,
arrow_schema,
field_id_to_index,
+ field_id_to_projected_index: None,
}
}
@@ -180,7 +187,7 @@ impl ReadFormat {
///
/// Note that the `record_batch` may only contains a subset of columns if it is projected.
pub(crate) fn convert_record_batch(
- &self,
+ &mut self,
record_batch: &RecordBatch,
batches: &mut VecDeque<Batch>,
) -> Result<()> {
@@ -197,6 +204,10 @@ impl ReadFormat {
}
);
+ if self.field_id_to_projected_index.is_none() {
+ self.init_id_to_projected_index(record_batch);
+ }
+
let mut fixed_pos_columns = record_batch
.columns()
.iter()
@@ -259,6 +270,19 @@ impl ReadFormat {
Ok(())
}
+ fn init_id_to_projected_index(&mut self, record_batch: &RecordBatch) {
+ let mut name_to_projected_index = HashMap::new();
+ for (index, field) in record_batch.schema().fields().iter().enumerate() {
+ let Some(column) = self.metadata.column_by_name(field.name()) else {
+ continue;
+ };
+ if column.semantic_type == SemanticType::Field {
+ name_to_projected_index.insert(column.column_id, index);
+ }
+ }
+ self.field_id_to_projected_index = Some(name_to_projected_index);
+ }
+
/// Returns min values of specific column in row groups.
pub(crate) fn min_values(
&self,
@@ -478,15 +502,25 @@ impl ReadFormat {
Some(Arc::new(UInt64Array::from_iter(values)))
}
- /// Field index of the primary key.
+ /// Index in SST of the primary key.
fn primary_key_position(&self) -> usize {
self.arrow_schema.fields.len() - 3
}
- /// Field index of the time index.
+ /// Index in SST of the time index.
fn time_index_position(&self) -> usize {
self.arrow_schema.fields.len() - FIXED_POS_COLUMN_NUM
}
+
+ /// Index of a field column by its column id.
+ /// This function is only available after the first call to
+ /// [convert_record_batch](Self::convert_record_batch). Otherwise
+ /// it always return `None`
+ pub fn field_index_by_id(&self, column_id: ColumnId) -> Option<usize> {
+ self.field_id_to_projected_index
+ .as_ref()
+ .and_then(|m| m.get(&column_id).copied())
+ }
}
/// Gets the arrow schema to store in parquet.
@@ -771,7 +805,7 @@ mod tests {
fn test_convert_empty_record_batch() {
let metadata = build_test_region_metadata();
let arrow_schema = build_test_arrow_schema();
- let read_format = ReadFormat::new(metadata);
+ let mut read_format = ReadFormat::new(metadata);
assert_eq!(arrow_schema, *read_format.arrow_schema());
let record_batch = RecordBatch::new_empty(arrow_schema);
@@ -785,7 +819,7 @@ mod tests {
#[test]
fn test_convert_record_batch() {
let metadata = build_test_region_metadata();
- let read_format = ReadFormat::new(metadata);
+ let mut read_format = ReadFormat::new(metadata);
let columns: Vec<ArrayRef> = vec![
Arc::new(Int64Array::from(vec![1, 1, 10, 10])), // field1
diff --git a/src/mito2/src/sst/parquet/reader.rs b/src/mito2/src/sst/parquet/reader.rs
index e07773d5af41..6ba5d63af9db 100644
--- a/src/mito2/src/sst/parquet/reader.rs
+++ b/src/mito2/src/sst/parquet/reader.rs
@@ -15,12 +15,17 @@
//! Parquet reader.
use std::collections::{BTreeSet, VecDeque};
+use std::ops::BitAnd;
use std::sync::Arc;
use std::time::{Duration, Instant};
+use api::v1::SemanticType;
use async_trait::async_trait;
+use common_recordbatch::filter::SimpleFilterEvaluator;
use common_telemetry::{debug, warn};
use common_time::range::TimestampRange;
+use datafusion_common::arrow::array::BooleanArray;
+use datafusion_common::arrow::buffer::BooleanBuffer;
use datatypes::arrow::record_batch::RecordBatch;
use object_store::ObjectStore;
use parquet::arrow::arrow_reader::ParquetRecordBatchReader;
@@ -36,11 +41,14 @@ use tokio::io::BufReader;
use crate::cache::CacheManagerRef;
use crate::error::{
- ArrowReaderSnafu, InvalidMetadataSnafu, InvalidParquetSnafu, OpenDalSnafu, ReadParquetSnafu,
- Result,
+ ArrowReaderSnafu, FieldTypeMismatchSnafu, FilterRecordBatchSnafu, InvalidMetadataSnafu,
+ InvalidParquetSnafu, OpenDalSnafu, ReadParquetSnafu, Result,
+};
+use crate::metrics::{
+ PRECISE_FILTER_ROWS_TOTAL, READ_ROWS_TOTAL, READ_ROW_GROUPS_TOTAL, READ_STAGE_ELAPSED,
};
-use crate::metrics::{READ_ROWS_TOTAL, READ_ROW_GROUPS_TOTAL, READ_STAGE_ELAPSED};
use crate::read::{Batch, BatchReader};
+use crate::row_converter::{McmpRowCodec, RowCodec, SortField};
use crate::sst::file::FileHandle;
use crate::sst::index::applier::SstIndexApplierRef;
use crate::sst::parquet::format::ReadFormat;
@@ -180,12 +188,31 @@ impl ParquetReaderBuilder {
..Default::default()
};
+ let predicate = if let Some(p) = &self.predicate {
+ p.exprs()
+ .iter()
+ .filter_map(|expr| SimpleFilterEvaluator::try_new(expr.df_expr()))
+ .collect()
+ } else {
+ vec![]
+ };
+
+ let codec = McmpRowCodec::new(
+ read_format
+ .metadata()
+ .primary_key_columns()
+ .map(|c| SortField::new(c.column_schema.data_type.clone()))
+ .collect(),
+ );
+
Ok(ParquetReader {
row_groups,
read_format,
reader_builder,
+ predicate,
current_reader: None,
batches: VecDeque::new(),
+ codec,
metrics,
})
}
@@ -316,6 +343,7 @@ struct Metrics {
num_row_groups_inverted_index_selected: usize,
/// Number of row groups to read after filtering by min-max index.
num_row_groups_min_max_selected: usize,
+ num_rows_precise_filtered: usize,
/// Duration to build the parquet reader.
build_cost: Duration,
/// Duration to scan the reader.
@@ -400,10 +428,14 @@ pub struct ParquetReader {
/// The builder contains the file handle, so don't drop the builder while using
/// the [ParquetReader].
reader_builder: RowGroupReaderBuilder,
+ /// Predicate pushed down to this reader.
+ predicate: Vec<SimpleFilterEvaluator>,
/// Reader of current row group.
current_reader: Option<ParquetRecordBatchReader>,
/// Buffered batches to return.
batches: VecDeque<Batch>,
+ /// Decoder for primary keys
+ codec: McmpRowCodec,
/// Local metrics.
metrics: Metrics,
}
@@ -419,16 +451,18 @@ impl BatchReader for ParquetReader {
}
// We need to fetch next record batch and convert it to batches.
- let Some(record_batch) = self.fetch_next_record_batch().await? else {
- self.metrics.scan_cost += start.elapsed();
- return Ok(None);
- };
- self.metrics.num_record_batches += 1;
-
- self.read_format
- .convert_record_batch(&record_batch, &mut self.batches)?;
- self.metrics.num_batches += self.batches.len();
+ while self.batches.is_empty() {
+ let Some(record_batch) = self.fetch_next_record_batch().await? else {
+ self.metrics.scan_cost += start.elapsed();
+ return Ok(None);
+ };
+ self.metrics.num_record_batches += 1;
+ self.read_format
+ .convert_record_batch(&record_batch, &mut self.batches)?;
+ self.prune_batches()?;
+ self.metrics.num_batches += self.batches.len();
+ }
let batch = self.batches.pop_front();
self.metrics.scan_cost += start.elapsed();
self.metrics.num_rows += batch.as_ref().map(|b| b.num_rows()).unwrap_or(0);
@@ -467,6 +501,9 @@ impl Drop for ParquetReader {
READ_ROW_GROUPS_TOTAL
.with_label_values(&["min_max_index_selected"])
.inc_by(self.metrics.num_row_groups_min_max_selected as u64);
+ PRECISE_FILTER_ROWS_TOTAL
+ .with_label_values(&["parquet"])
+ .inc_by(self.metrics.num_rows_precise_filtered as u64);
}
}
@@ -515,6 +552,104 @@ impl ParquetReader {
Ok(None)
}
+ /// Prunes batches by the pushed down predicate.
+ fn prune_batches(&mut self) -> Result<()> {
+ // fast path
+ if self.predicate.is_empty() {
+ return Ok(());
+ }
+
+ let mut new_batches = VecDeque::new();
+ let batches = std::mem::take(&mut self.batches);
+ for batch in batches {
+ let num_rows_before_filter = batch.num_rows();
+ let Some(batch_filtered) = self.precise_filter(batch)? else {
+ // the entire batch is filtered out
+ self.metrics.num_rows_precise_filtered += num_rows_before_filter;
+ continue;
+ };
+
+ // update metric
+ let filtered_rows = num_rows_before_filter - batch_filtered.num_rows();
+ self.metrics.num_rows_precise_filtered += filtered_rows;
+
+ if !batch_filtered.is_empty() {
+ new_batches.push_back(batch_filtered);
+ }
+ }
+ self.batches = new_batches;
+
+ Ok(())
+ }
+
+ /// TRY THE BEST to perform pushed down predicate precisely on the input batch.
+ /// Return the filtered batch. If the entire batch is filtered out, return None.
+ ///
+ /// Supported filter expr type is defined in [SimpleFilterEvaluator].
+ ///
+ /// When a filter is referencing primary key column, this method will decode
+ /// the primary key and put it into the batch.
+ fn precise_filter(&self, mut input: Batch) -> Result<Option<Batch>> {
+ let mut mask = BooleanBuffer::new_set(input.num_rows());
+
+ // Run filter one by one and combine them result
+ // TODO(ruihang): run primary key filter first. It may short circuit other filters
+ for filter in &self.predicate {
+ let column_name = filter.column_name();
+ let Some(column_metadata) = self.read_format.metadata().column_by_name(column_name)
+ else {
+ // column not found, skip
+ // in situation like an column is added later
+ continue;
+ };
+ let result = match column_metadata.semantic_type {
+ SemanticType::Tag => {
+ let pk_values = self.codec.decode(input.primary_key())?;
+ // Safety: this is a primary key
+ let pk_index = self
+ .read_format
+ .metadata()
+ .primary_key_index(column_metadata.column_id)
+ .unwrap();
+ let pk_value = pk_values[pk_index]
+ .try_to_scalar_value(&column_metadata.column_schema.data_type)
+ .context(FieldTypeMismatchSnafu)?;
+ if filter
+ .evaluate_scalar(&pk_value)
+ .context(FilterRecordBatchSnafu)?
+ {
+ input.set_pk_values(pk_values);
+ continue;
+ } else {
+ // PK not match means the entire batch is filtered out.
+ return Ok(None);
+ }
+ }
+ SemanticType::Field => {
+ let Some(field_index) = self
+ .read_format
+ .field_index_by_id(column_metadata.column_id)
+ else {
+ continue;
+ };
+ let field_col = &input.fields()[field_index].data;
+ filter
+ .evaluate_vector(field_col)
+ .context(FilterRecordBatchSnafu)?
+ }
+ SemanticType::Timestamp => filter
+ .evaluate_vector(input.timestamps())
+ .context(FilterRecordBatchSnafu)?,
+ };
+
+ mask = mask.bitand(&result);
+ }
+
+ input.filter(&BooleanArray::from(mask).into())?;
+
+ Ok(Some(input))
+ }
+
#[cfg(test)]
pub fn parquet_metadata(&self) -> Arc<ParquetMetaData> {
self.reader_builder.parquet_meta.clone()
diff --git a/src/mito2/src/test_util.rs b/src/mito2/src/test_util.rs
index 63d73c776e27..6164861b99bd 100644
--- a/src/mito2/src/test_util.rs
+++ b/src/mito2/src/test_util.rs
@@ -538,7 +538,8 @@ pub fn new_batch(
/// Ensure the reader returns batch as `expect`.
pub async fn check_reader_result<R: BatchReader>(reader: &mut R, expect: &[Batch]) {
let mut result = Vec::new();
- while let Some(batch) = reader.next_batch().await.unwrap() {
+ while let Some(mut batch) = reader.next_batch().await.unwrap() {
+ batch.remove_pk_values();
result.push(batch);
}
diff --git a/src/mito2/src/test_util/sst_util.rs b/src/mito2/src/test_util/sst_util.rs
index 7f4e7741757b..30a9db1f903c 100644
--- a/src/mito2/src/test_util/sst_util.rs
+++ b/src/mito2/src/test_util/sst_util.rs
@@ -114,7 +114,7 @@ pub fn sst_file_handle(start_ms: i64, end_ms: i64) -> FileHandle {
}
pub fn new_batch_by_range(tags: &[&str], start: usize, end: usize) -> Batch {
- assert!(end > start);
+ assert!(end >= start);
let pk = new_primary_key(tags);
let timestamps: Vec<_> = (start..end).map(|v| v as i64).collect();
let sequences = vec![1000; end - start];
diff --git a/src/store-api/src/metadata.rs b/src/store-api/src/metadata.rs
index 86ae565318a9..0ca91095148d 100644
--- a/src/store-api/src/metadata.rs
+++ b/src/store-api/src/metadata.rs
@@ -254,7 +254,10 @@ impl RegionMetadata {
.map(|id| self.column_by_id(*id).unwrap())
}
- /// Returns all field columns.
+ /// Returns all field columns before projection.
+ ///
+ /// **Use with caution**. On read path where might have projection, this method
+ /// can return columns that not present in data batch.
pub fn field_columns(&self) -> impl Iterator<Item = &ColumnMetadata> {
self.column_metadatas
.iter()
diff --git a/src/table/src/predicate.rs b/src/table/src/predicate.rs
index a8377d382ed0..514541f2dd5c 100644
--- a/src/table/src/predicate.rs
+++ b/src/table/src/predicate.rs
@@ -48,6 +48,11 @@ impl Predicate {
Self { exprs }
}
+ /// Returns the logical exprs.
+ pub fn exprs(&self) -> &[Expr] {
+ &self.exprs
+ }
+
/// Builds physical exprs according to provided schema.
pub fn to_physical_exprs(
&self,
|
feat
|
precise filter for mito parquet reader (#3178)
|
863ee073a9a1ec8a9da7b1efaaeabd28e9b5221e
|
2024-08-22 17:33:20
|
Ning Sun
|
chore: add commerial support section (#4601)
| false
|
diff --git a/README.md b/README.md
index 3d4e5428c27a..1eb0db19ff59 100644
--- a/README.md
+++ b/README.md
@@ -150,7 +150,7 @@ Our official Grafana dashboard is available at [grafana](grafana/README.md) dire
## Project Status
-The current version has not yet reached the standards for General Availability.
+The current version has not yet reached the standards for General Availability.
According to our Greptime 2024 Roadmap, we aim to achieve a production-level version with the release of v1.0 by the end of 2024. [Join Us](https://github.com/GreptimeTeam/greptimedb/issues/3412)
We welcome you to test and use GreptimeDB. Some users have already adopted it in their production environments. If you're interested in trying it out, please use the latest stable release available.
@@ -172,6 +172,13 @@ In addition, you may:
- Connect us with [Linkedin](https://www.linkedin.com/company/greptime/)
- Follow us on [Twitter](https://twitter.com/greptime)
+## Commerial Support
+
+If you are running GreptimeDB OSS in your organization, we offer additional
+enterprise addons, installation service, training and consulting. [Contact
+us](https://greptime.com/contactus) and we will reach out to you with more
+detail of our commerial license.
+
## License
GreptimeDB uses the [Apache License 2.0](https://apache.org/licenses/LICENSE-2.0.txt) to strike a balance between
|
chore
|
add commerial support section (#4601)
|
1c9bf2e2a720003af78c25cc5928cc4779771555
|
2023-12-26 18:45:44
|
Ruihang Xia
|
fix: change CI target repo to the origin one (#3011)
| false
|
diff --git a/.github/workflows/doc-label.yml b/.github/workflows/doc-label.yml
index 298c7e3cecce..930134674abd 100644
--- a/.github/workflows/doc-label.yml
+++ b/.github/workflows/doc-label.yml
@@ -1,6 +1,6 @@
name: "PR Doc Labeler"
on:
- pull_request:
+ pull_request_target:
types: [opened, edited, synchronize, ready_for_review, auto_merge_enabled, labeled, unlabeled]
permissions:
diff --git a/.github/workflows/size-label.yml b/.github/workflows/size-label.yml
index 2b504f32f6a2..5e45dec5ed2f 100644
--- a/.github/workflows/size-label.yml
+++ b/.github/workflows/size-label.yml
@@ -1,6 +1,6 @@
name: size-labeler
-on: [pull_request]
+on: [pull_request_target]
jobs:
labeler:
|
fix
|
change CI target repo to the origin one (#3011)
|
3633f25d0c4332873cad277f9090ae45052b4148
|
2024-11-19 20:50:33
|
discord9
|
feat: also shutdown gracefully on sigterm on unix (#5023)
| false
|
diff --git a/src/cmd/src/lib.rs b/src/cmd/src/lib.rs
index 731f527daf7c..3a719b9589a7 100644
--- a/src/cmd/src/lib.rs
+++ b/src/cmd/src/lib.rs
@@ -43,6 +43,31 @@ lazy_static::lazy_static! {
prometheus::register_int_gauge_vec!("greptime_app_version", "app version", &["version", "short_version", "app"]).unwrap();
}
+/// wait for the close signal, for unix platform it's SIGINT or SIGTERM
+#[cfg(unix)]
+async fn start_wait_for_close_signal() -> std::io::Result<()> {
+ use tokio::signal::unix::{signal, SignalKind};
+ let mut sigint = signal(SignalKind::interrupt())?;
+ let mut sigterm = signal(SignalKind::terminate())?;
+
+ tokio::select! {
+ _ = sigint.recv() => {
+ info!("Received SIGINT, shutting down");
+ }
+ _ = sigterm.recv() => {
+ info!("Received SIGTERM, shutting down");
+ }
+ }
+
+ Ok(())
+}
+
+/// wait for the close signal, for non-unix platform it's ctrl-c
+#[cfg(not(unix))]
+async fn start_wait_for_close_signal() -> std::io::Result<()> {
+ tokio::signal::ctrl_c().await
+}
+
#[async_trait]
pub trait App: Send {
fn name(&self) -> &str;
@@ -69,9 +94,9 @@ pub trait App: Send {
self.start().await?;
if self.wait_signal() {
- if let Err(e) = tokio::signal::ctrl_c().await {
- error!(e; "Failed to listen for ctrl-c signal");
- // It's unusual to fail to listen for ctrl-c signal, maybe there's something unexpected in
+ if let Err(e) = start_wait_for_close_signal().await {
+ error!(e; "Failed to listen for close signal");
+ // It's unusual to fail to listen for close signal, maybe there's something unexpected in
// the underlying system. So we stop the app instead of running nonetheless to let people
// investigate the issue.
}
|
feat
|
also shutdown gracefully on sigterm on unix (#5023)
|
e767f372417e99f44d9fe08eef55e65c55c505f3
|
2024-03-11 14:58:40
|
Weny Xu
|
fix: fix f64 has no sufficient precision during parsing (#3483)
| false
|
diff --git a/Cargo.toml b/Cargo.toml
index 83fd64c6778e..c1604c1ff997 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -134,7 +134,7 @@ reqwest = { version = "0.11", default-features = false, features = [
rskafka = "0.5"
rust_decimal = "1.33"
serde = { version = "1.0", features = ["derive"] }
-serde_json = "1.0"
+serde_json = { version = "1.0", features = ["float_roundtrip"] }
serde_with = "3"
smallvec = { version = "1", features = ["serde"] }
snafu = "0.7"
diff --git a/src/datatypes/src/value.rs b/src/datatypes/src/value.rs
index bfd4a11103e4..64635eeae2d0 100644
--- a/src/datatypes/src/value.rs
+++ b/src/datatypes/src/value.rs
@@ -2417,4 +2417,12 @@ mod tests {
);
check_value_ref_size_eq(&ValueRef::Decimal128(Decimal128::new(1234, 3, 1)), 32)
}
+
+ #[test]
+ fn test_incorrect_default_value_issue_3479() {
+ let value = OrderedF64::from(0.047318541668048164);
+ let serialized = serde_json::to_string(&value).unwrap();
+ let deserialized: OrderedF64 = serde_json::from_str(&serialized).unwrap();
+ assert_eq!(value, deserialized);
+ }
}
diff --git a/src/sql/src/parsers/create_parser.rs b/src/sql/src/parsers/create_parser.rs
index 00a797d44b23..606ee3ebef08 100644
--- a/src/sql/src/parsers/create_parser.rs
+++ b/src/sql/src/parsers/create_parser.rs
@@ -1495,4 +1495,25 @@ ENGINE=mito";
ParserContext::create_with_dialect(sql, &GreptimeDbDialect {}, ParseOptions::default());
let _ = result.unwrap();
}
+
+ #[test]
+ fn test_incorrect_default_value_issue_3479() {
+ let sql = r#"CREATE TABLE `ExcePTuRi`(
+non TIMESTAMP(6) TIME INDEX,
+`iUSTO` DOUBLE DEFAULT 0.047318541668048164
+)"#;
+ let result =
+ ParserContext::create_with_dialect(sql, &GreptimeDbDialect {}, ParseOptions::default())
+ .unwrap();
+ assert_eq!(1, result.len());
+ match &result[0] {
+ Statement::CreateTable(c) => {
+ assert_eq!(
+ "`iUSTO` DOUBLE DEFAULT 0.047318541668048164",
+ c.columns[1].to_string()
+ );
+ }
+ _ => unreachable!(),
+ }
+ }
}
diff --git a/src/sql/src/statements.rs b/src/sql/src/statements.rs
index a8f38e545d59..d51378c0982f 100644
--- a/src/sql/src/statements.rs
+++ b/src/sql/src/statements.rs
@@ -904,6 +904,29 @@ mod tests {
);
}
+ #[test]
+ fn test_incorrect_default_value_issue_3479() {
+ let opts = vec![ColumnOptionDef {
+ name: None,
+ option: ColumnOption::Default(Expr::Value(SqlValue::Number(
+ "0.047318541668048164".into(),
+ false,
+ ))),
+ }];
+ let constraint = parse_column_default_constraint(
+ "coll",
+ &ConcreteDataType::float64_datatype(),
+ &opts,
+ None,
+ )
+ .unwrap()
+ .unwrap();
+ assert_eq!("0.047318541668048164", constraint.to_string());
+ let encoded: Vec<u8> = constraint.clone().try_into().unwrap();
+ let decoded = ColumnDefaultConstraint::try_from(encoded.as_ref()).unwrap();
+ assert_eq!(decoded, constraint);
+ }
+
#[test]
pub fn test_sql_column_def_to_grpc_column_def() {
// test basic
diff --git a/tests/cases/standalone/common/create/create.result b/tests/cases/standalone/common/create/create.result
index 664c94fedebc..ed0529b9345e 100644
--- a/tests/cases/standalone/common/create/create.result
+++ b/tests/cases/standalone/common/create/create.result
@@ -155,6 +155,23 @@ CREATE TABLE test_like_2 LIKE test_like_1;
Error: 4000(TableAlreadyExists), Table already exists: `greptime.public.test_like_2`
+CREATE TABLE `ExcePTuRi`(
+non TIMESTAMP(6) TIME INDEX,
+`iUSTO` DOUBLE DEFAULT 0.047318541668048164
+)
+ENGINE=mito;
+
+Affected Rows: 0
+
+DESC table `ExcePTuRi`;
+
++--------+----------------------+-----+------+----------------------+---------------+
+| Column | Type | Key | Null | Default | Semantic Type |
++--------+----------------------+-----+------+----------------------+---------------+
+| non | TimestampMicrosecond | PRI | NO | | TIMESTAMP |
+| iUSTO | Float64 | | YES | 0.047318541668048164 | FIELD |
++--------+----------------------+-----+------+----------------------+---------------+
+
DESC TABLE test_like_1;
+--------+----------------------+-----+------+---------+---------------+
@@ -183,3 +200,7 @@ DROP TABLE test_like_2;
Affected Rows: 0
+DROP table `ExcePTuRi`;
+
+Affected Rows: 0
+
diff --git a/tests/cases/standalone/common/create/create.sql b/tests/cases/standalone/common/create/create.sql
index f27b2bc6e812..9f541da3126e 100644
--- a/tests/cases/standalone/common/create/create.sql
+++ b/tests/cases/standalone/common/create/create.sql
@@ -64,6 +64,14 @@ CREATE TABLE test_like_2 LIKE test_like_1;
CREATE TABLE test_like_2 LIKE test_like_1;
+CREATE TABLE `ExcePTuRi`(
+non TIMESTAMP(6) TIME INDEX,
+`iUSTO` DOUBLE DEFAULT 0.047318541668048164
+)
+ENGINE=mito;
+
+DESC table `ExcePTuRi`;
+
DESC TABLE test_like_1;
DESC TABLE test_like_2;
@@ -71,3 +79,5 @@ DESC TABLE test_like_2;
DROP TABLE test_like_1;
DROP TABLE test_like_2;
+
+DROP table `ExcePTuRi`;
|
fix
|
fix f64 has no sufficient precision during parsing (#3483)
|
825e4beeadf83b28dfdd70cf44880c2e05afa6a5
|
2023-10-12 15:38:05
|
Ruihang Xia
|
build(ci): pin linux runner to ubuntu-20.04 (#2586)
| false
|
diff --git a/.github/workflows/apidoc.yml b/.github/workflows/apidoc.yml
index 05653d99112e..55d0031e374d 100644
--- a/.github/workflows/apidoc.yml
+++ b/.github/workflows/apidoc.yml
@@ -17,7 +17,7 @@ env:
jobs:
apidoc:
- runs-on: ubuntu-latest
+ runs-on: ubuntu-20.04
steps:
- uses: actions/checkout@v3
- uses: arduino/setup-protoc@v1
diff --git a/.github/workflows/dev-build.yml b/.github/workflows/dev-build.yml
index 35e9bec323aa..2c8a759a8bcb 100644
--- a/.github/workflows/dev-build.yml
+++ b/.github/workflows/dev-build.yml
@@ -16,11 +16,11 @@ on:
description: The runner uses to build linux-amd64 artifacts
default: ec2-c6i.4xlarge-amd64
options:
- - ubuntu-latest
- - ubuntu-latest-8-cores
- - ubuntu-latest-16-cores
- - ubuntu-latest-32-cores
- - ubuntu-latest-64-cores
+ - ubuntu-20.04
+ - ubuntu-20.04-8-cores
+ - ubuntu-20.04-16-cores
+ - ubuntu-20.04-32-cores
+ - ubuntu-20.04-64-cores
- ec2-c6i.xlarge-amd64 # 4C8G
- ec2-c6i.2xlarge-amd64 # 8C16G
- ec2-c6i.4xlarge-amd64 # 16C32G
@@ -78,7 +78,7 @@ jobs:
allocate-runners:
name: Allocate runners
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
- runs-on: ubuntu-latest
+ runs-on: ubuntu-20.04
outputs:
linux-amd64-runner: ${{ steps.start-linux-amd64-runner.outputs.label }}
linux-arm64-runner: ${{ steps.start-linux-arm64-runner.outputs.label }}
@@ -209,7 +209,7 @@ jobs:
build-linux-amd64-artifacts,
build-linux-arm64-artifacts,
]
- runs-on: ubuntu-latest
+ runs-on: ubuntu-20.04
outputs:
build-result: ${{ steps.set-build-result.outputs.build-result }}
steps:
@@ -241,7 +241,7 @@ jobs:
allocate-runners,
release-images-to-dockerhub,
]
- runs-on: ubuntu-latest
+ runs-on: ubuntu-20.04
continue-on-error: true
steps:
- uses: actions/checkout@v3
@@ -268,7 +268,7 @@ jobs:
name: Stop linux-amd64 runner
# Only run this job when the runner is allocated.
if: ${{ always() }}
- runs-on: ubuntu-latest
+ runs-on: ubuntu-20.04
needs: [
allocate-runners,
build-linux-amd64-artifacts,
@@ -293,7 +293,7 @@ jobs:
name: Stop linux-arm64 runner
# Only run this job when the runner is allocated.
if: ${{ always() }}
- runs-on: ubuntu-latest
+ runs-on: ubuntu-20.04
needs: [
allocate-runners,
build-linux-arm64-artifacts,
@@ -320,7 +320,7 @@ jobs:
needs: [
release-images-to-dockerhub
]
- runs-on: ubuntu-latest
+ runs-on: ubuntu-20.04
env:
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
steps:
diff --git a/.github/workflows/develop.yml b/.github/workflows/develop.yml
index f2d9b27bcb0a..7644505de98a 100644
--- a/.github/workflows/develop.yml
+++ b/.github/workflows/develop.yml
@@ -34,7 +34,7 @@ env:
jobs:
typos:
name: Spell Check with Typos
- runs-on: ubuntu-latest
+ runs-on: ubuntu-20.04
steps:
- uses: actions/checkout@v3
- uses: crate-ci/[email protected]
@@ -42,7 +42,7 @@ jobs:
check:
name: Check
if: github.event.pull_request.draft == false
- runs-on: ubuntu-latest
+ runs-on: ubuntu-20.04
timeout-minutes: 60
steps:
- uses: actions/checkout@v3
@@ -60,7 +60,7 @@ jobs:
toml:
name: Toml Check
if: github.event.pull_request.draft == false
- runs-on: ubuntu-latest
+ runs-on: ubuntu-20.04
timeout-minutes: 60
steps:
- uses: actions/checkout@v3
@@ -80,7 +80,7 @@ jobs:
runs-on: ${{ matrix.os }}
strategy:
matrix:
- os: [ ubuntu-latest-8-cores ]
+ os: [ ubuntu-20.04-8-cores ]
timeout-minutes: 60
steps:
- uses: actions/checkout@v3
@@ -105,7 +105,7 @@ jobs:
fmt:
name: Rustfmt
if: github.event.pull_request.draft == false
- runs-on: ubuntu-latest
+ runs-on: ubuntu-20.04
timeout-minutes: 60
steps:
- uses: actions/checkout@v3
@@ -124,7 +124,7 @@ jobs:
clippy:
name: Clippy
if: github.event.pull_request.draft == false
- runs-on: ubuntu-latest
+ runs-on: ubuntu-20.04
timeout-minutes: 60
steps:
- uses: actions/checkout@v3
@@ -142,7 +142,7 @@ jobs:
coverage:
if: github.event.pull_request.draft == false
- runs-on: ubuntu-latest-8-cores
+ runs-on: ubuntu-20.04-8-cores
timeout-minutes: 60
steps:
- uses: actions/checkout@v3
diff --git a/.github/workflows/doc-issue.yml b/.github/workflows/doc-issue.yml
index 67a654d14740..da8fc8a40ce1 100644
--- a/.github/workflows/doc-issue.yml
+++ b/.github/workflows/doc-issue.yml
@@ -11,7 +11,7 @@ on:
jobs:
doc_issue:
if: github.event.label.name == 'doc update required'
- runs-on: ubuntu-latest
+ runs-on: ubuntu-20.04
steps:
- name: create an issue in doc repo
uses: dacbd/create-issue-action@main
@@ -25,7 +25,7 @@ jobs:
${{ github.event.issue.html_url || github.event.pull_request.html_url }}
cloud_issue:
if: github.event.label.name == 'cloud followup required'
- runs-on: ubuntu-latest
+ runs-on: ubuntu-20.04
steps:
- name: create an issue in cloud repo
uses: dacbd/create-issue-action@main
diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml
index 24df04f26b48..5cd39adbe64e 100644
--- a/.github/workflows/docs.yml
+++ b/.github/workflows/docs.yml
@@ -30,7 +30,7 @@ name: CI
jobs:
typos:
name: Spell Check with Typos
- runs-on: ubuntu-latest
+ runs-on: ubuntu-20.04
steps:
- uses: actions/checkout@v3
- uses: crate-ci/[email protected]
@@ -38,33 +38,33 @@ jobs:
check:
name: Check
if: github.event.pull_request.draft == false
- runs-on: ubuntu-latest
+ runs-on: ubuntu-20.04
steps:
- run: 'echo "No action required"'
fmt:
name: Rustfmt
if: github.event.pull_request.draft == false
- runs-on: ubuntu-latest
+ runs-on: ubuntu-20.04
steps:
- run: 'echo "No action required"'
clippy:
name: Clippy
if: github.event.pull_request.draft == false
- runs-on: ubuntu-latest
+ runs-on: ubuntu-20.04
steps:
- run: 'echo "No action required"'
coverage:
if: github.event.pull_request.draft == false
- runs-on: ubuntu-latest
+ runs-on: ubuntu-20.04
steps:
- run: 'echo "No action required"'
sqlness:
name: Sqlness Test
if: github.event.pull_request.draft == false
- runs-on: ubuntu-latest
+ runs-on: ubuntu-20.04
steps:
- run: 'echo "No action required"'
diff --git a/.github/workflows/license.yaml b/.github/workflows/license.yaml
index a336476644fe..00264c110195 100644
--- a/.github/workflows/license.yaml
+++ b/.github/workflows/license.yaml
@@ -8,7 +8,7 @@ on:
types: [opened, synchronize, reopened, ready_for_review]
jobs:
license-header-check:
- runs-on: ubuntu-latest
+ runs-on: ubuntu-20.04
name: license-header-check
steps:
- uses: actions/checkout@v2
diff --git a/.github/workflows/nightly-build.yml b/.github/workflows/nightly-build.yml
index 9a436e19660e..6e0fc8ef574a 100644
--- a/.github/workflows/nightly-build.yml
+++ b/.github/workflows/nightly-build.yml
@@ -14,11 +14,11 @@ on:
description: The runner uses to build linux-amd64 artifacts
default: ec2-c6i.2xlarge-amd64
options:
- - ubuntu-latest
- - ubuntu-latest-8-cores
- - ubuntu-latest-16-cores
- - ubuntu-latest-32-cores
- - ubuntu-latest-64-cores
+ - ubuntu-20.04
+ - ubuntu-20.04-8-cores
+ - ubuntu-20.04-16-cores
+ - ubuntu-20.04-32-cores
+ - ubuntu-20.04-64-cores
- ec2-c6i.xlarge-amd64 # 4C8G
- ec2-c6i.2xlarge-amd64 # 8C16G
- ec2-c6i.4xlarge-amd64 # 16C32G
@@ -70,7 +70,7 @@ jobs:
allocate-runners:
name: Allocate runners
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
- runs-on: ubuntu-latest
+ runs-on: ubuntu-20.04
outputs:
linux-amd64-runner: ${{ steps.start-linux-amd64-runner.outputs.label }}
linux-arm64-runner: ${{ steps.start-linux-arm64-runner.outputs.label }}
@@ -175,7 +175,7 @@ jobs:
build-linux-amd64-artifacts,
build-linux-arm64-artifacts,
]
- runs-on: ubuntu-latest
+ runs-on: ubuntu-20.04
outputs:
nightly-build-result: ${{ steps.set-nightly-build-result.outputs.nightly-build-result }}
steps:
@@ -205,7 +205,7 @@ jobs:
allocate-runners,
release-images-to-dockerhub,
]
- runs-on: ubuntu-latest
+ runs-on: ubuntu-20.04
# When we push to ACR, it's easy to fail due to some unknown network issues.
# However, we don't want to fail the whole workflow because of this.
# The ACR have daily sync with DockerHub, so don't worry about the image not being updated.
@@ -234,7 +234,7 @@ jobs:
name: Stop linux-amd64 runner
# Only run this job when the runner is allocated.
if: ${{ always() }}
- runs-on: ubuntu-latest
+ runs-on: ubuntu-20.04
needs: [
allocate-runners,
build-linux-amd64-artifacts,
@@ -259,7 +259,7 @@ jobs:
name: Stop linux-arm64 runner
# Only run this job when the runner is allocated.
if: ${{ always() }}
- runs-on: ubuntu-latest
+ runs-on: ubuntu-20.04
needs: [
allocate-runners,
build-linux-arm64-artifacts,
@@ -286,7 +286,7 @@ jobs:
needs: [
release-images-to-dockerhub
]
- runs-on: ubuntu-latest
+ runs-on: ubuntu-20.04
env:
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
steps:
diff --git a/.github/workflows/pr-title-checker.yml b/.github/workflows/pr-title-checker.yml
index 9279a83ad1d4..36506c91e998 100644
--- a/.github/workflows/pr-title-checker.yml
+++ b/.github/workflows/pr-title-checker.yml
@@ -10,7 +10,7 @@ on:
jobs:
check:
- runs-on: ubuntu-latest
+ runs-on: ubuntu-20.04
timeout-minutes: 10
steps:
- uses: thehanimo/[email protected]
@@ -19,7 +19,7 @@ jobs:
pass_on_octokit_error: false
configuration_path: ".github/pr-title-checker-config.json"
breaking:
- runs-on: ubuntu-latest
+ runs-on: ubuntu-20.04
timeout-minutes: 10
steps:
- uses: thehanimo/[email protected]
diff --git a/.github/workflows/release-dev-builder-images.yaml b/.github/workflows/release-dev-builder-images.yaml
index 39eaec216c41..b44b60dae16e 100644
--- a/.github/workflows/release-dev-builder-images.yaml
+++ b/.github/workflows/release-dev-builder-images.yaml
@@ -27,7 +27,7 @@ jobs:
release-dev-builder-images:
name: Release dev builder images
if: ${{ inputs.release_dev_builder_ubuntu_image || inputs.release_dev_builder_centos_image || inputs.release_dev_builder_android_image }} # Only manually trigger this job.
- runs-on: ubuntu-latest-16-cores
+ runs-on: ubuntu-20.04-16-cores
steps:
- name: Checkout
uses: actions/checkout@v3
@@ -46,7 +46,7 @@ jobs:
release-dev-builder-images-cn: # Note: Be careful issue: https://github.com/containers/skopeo/issues/1874 and we decide to use the latest stable skopeo container.
name: Release dev builder images to CN region
- runs-on: ubuntu-latest
+ runs-on: ubuntu-20.04
needs: [
release-dev-builder-images
]
diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml
index 6d5534620958..7d6522f188ea 100644
--- a/.github/workflows/release.yml
+++ b/.github/workflows/release.yml
@@ -18,11 +18,11 @@ on:
description: The runner uses to build linux-amd64 artifacts
default: ec2-c6i.4xlarge-amd64
options:
- - ubuntu-latest
- - ubuntu-latest-8-cores
- - ubuntu-latest-16-cores
- - ubuntu-latest-32-cores
- - ubuntu-latest-64-cores
+ - ubuntu-20.04
+ - ubuntu-20.04-8-cores
+ - ubuntu-20.04-16-cores
+ - ubuntu-20.04-32-cores
+ - ubuntu-20.04-64-cores
- ec2-c6i.xlarge-amd64 # 4C8G
- ec2-c6i.2xlarge-amd64 # 8C16G
- ec2-c6i.4xlarge-amd64 # 16C32G
@@ -97,7 +97,7 @@ jobs:
allocate-runners:
name: Allocate runners
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
- runs-on: ubuntu-latest
+ runs-on: ubuntu-20.04
outputs:
linux-amd64-runner: ${{ steps.start-linux-amd64-runner.outputs.label }}
linux-arm64-runner: ${{ steps.start-linux-arm64-runner.outputs.label }}
@@ -306,7 +306,7 @@ jobs:
allocate-runners,
release-images-to-dockerhub,
]
- runs-on: ubuntu-latest
+ runs-on: ubuntu-20.04
# When we push to ACR, it's easy to fail due to some unknown network issues.
# However, we don't want to fail the whole workflow because of this.
# The ACR have daily sync with DockerHub, so don't worry about the image not being updated.
@@ -339,7 +339,7 @@ jobs:
build-macos-artifacts,
release-images-to-dockerhub,
]
- runs-on: ubuntu-latest
+ runs-on: ubuntu-20.04
steps:
- uses: actions/checkout@v3
with:
@@ -357,7 +357,7 @@ jobs:
name: Stop linux-amd64 runner
# Only run this job when the runner is allocated.
if: ${{ always() }}
- runs-on: ubuntu-latest
+ runs-on: ubuntu-20.04
needs: [
allocate-runners,
build-linux-amd64-artifacts,
@@ -382,7 +382,7 @@ jobs:
name: Stop linux-arm64 runner
# Only run this job when the runner is allocated.
if: ${{ always() }}
- runs-on: ubuntu-latest
+ runs-on: ubuntu-20.04
needs: [
allocate-runners,
build-linux-arm64-artifacts,
|
build
|
pin linux runner to ubuntu-20.04 (#2586)
|
56691ff03b484ea6874ad41c03aedda334742e5a
|
2023-09-12 18:27:15
|
JeremyHi
|
refactor: mailbox timeout (#2330)
| false
|
diff --git a/src/meta-srv/src/handler.rs b/src/meta-srv/src/handler.rs
index e3ad12ba3cdc..513bc8e1c6cc 100644
--- a/src/meta-srv/src/handler.rs
+++ b/src/meta-srv/src/handler.rs
@@ -15,7 +15,7 @@
use std::collections::{BTreeMap, HashSet};
use std::ops::Range;
use std::sync::Arc;
-use std::time::Duration;
+use std::time::{Duration, Instant};
use api::v1::meta::mailbox_message::Payload;
use api::v1::meta::{
@@ -262,7 +262,7 @@ pub struct HeartbeatMailbox {
pushers: Pushers,
sequence: Sequence,
senders: DashMap<MessageId, oneshot::Sender<Result<MailboxMessage>>>,
- timeouts: DashMap<MessageId, Duration>,
+ timeouts: DashMap<MessageId, Instant>,
timeout_notify: Notify,
}
@@ -309,7 +309,7 @@ impl HeartbeatMailbox {
self.timeout_notify.notified().await;
}
- let now = Duration::from_millis(common_time::util::current_time_millis() as u64);
+ let now = Instant::now();
let timeout_ids = self
.timeouts
.iter()
@@ -364,8 +364,7 @@ impl Mailbox for HeartbeatMailbox {
let (tx, rx) = oneshot::channel();
let _ = self.senders.insert(message_id, tx);
- let deadline =
- Duration::from_millis(common_time::util::current_time_millis() as u64) + timeout;
+ let deadline = Instant::now() + timeout;
let _ = self.timeouts.insert(message_id, deadline);
self.timeout_notify.notify_one();
|
refactor
|
mailbox timeout (#2330)
|
63e1892dc1c631f30c5f1652ee3bb0e6cb4f9c75
|
2024-08-13 16:52:48
|
zyy17
|
refactor(plugin): add SetupPlugin and StartPlugin error (#4554)
| false
|
diff --git a/src/datanode/src/error.rs b/src/datanode/src/error.rs
index cd1265569ef8..d070c82fb175 100644
--- a/src/datanode/src/error.rs
+++ b/src/datanode/src/error.rs
@@ -395,6 +395,20 @@ pub enum Error {
#[snafu(implicit)]
location: Location,
},
+
+ #[snafu(display("Failed to setup plugin"))]
+ SetupPlugin {
+ #[snafu(implicit)]
+ location: Location,
+ source: BoxedError,
+ },
+
+ #[snafu(display("Failed to start plugin"))]
+ StartPlugin {
+ #[snafu(implicit)]
+ location: Location,
+ source: BoxedError,
+ },
}
pub type Result<T> = std::result::Result<T, Error>;
@@ -442,9 +456,12 @@ impl ErrorExt for Error {
AsyncTaskExecute { source, .. } => source.status_code(),
- CreateDir { .. } | RemoveDir { .. } | ShutdownInstance { .. } | DataFusion { .. } => {
- StatusCode::Internal
- }
+ CreateDir { .. }
+ | RemoveDir { .. }
+ | ShutdownInstance { .. }
+ | DataFusion { .. }
+ | SetupPlugin { .. }
+ | StartPlugin { .. } => StatusCode::Internal,
RegionNotFound { .. } => StatusCode::RegionNotFound,
RegionNotReady { .. } => StatusCode::RegionNotReady,
diff --git a/src/frontend/src/error.rs b/src/frontend/src/error.rs
index c925b2ea1a4f..cb0df405c597 100644
--- a/src/frontend/src/error.rs
+++ b/src/frontend/src/error.rs
@@ -364,6 +364,20 @@ pub enum Error {
#[snafu(implicit)]
location: Location,
},
+
+ #[snafu(display("Failed to setup plugin"))]
+ SetupPlugin {
+ #[snafu(implicit)]
+ location: Location,
+ source: BoxedError,
+ },
+
+ #[snafu(display("Failed to start plugin"))]
+ StartPlugin {
+ #[snafu(implicit)]
+ location: Location,
+ source: BoxedError,
+ },
}
pub type Result<T> = std::result::Result<T, Error>;
@@ -416,7 +430,10 @@ impl ErrorExt for Error {
Error::FindDatanode { .. } => StatusCode::RegionNotReady,
- Error::VectorToGrpcColumn { .. } | Error::CacheRequired { .. } => StatusCode::Internal,
+ Error::VectorToGrpcColumn { .. }
+ | Error::CacheRequired { .. }
+ | Error::SetupPlugin { .. }
+ | Error::StartPlugin { .. } => StatusCode::Internal,
Error::InvalidRegionRequest { .. } => StatusCode::IllegalState,
diff --git a/src/meta-srv/src/error.rs b/src/meta-srv/src/error.rs
index 6e9efc48570b..8c5312fe9195 100644
--- a/src/meta-srv/src/error.rs
+++ b/src/meta-srv/src/error.rs
@@ -851,6 +851,20 @@ pub enum Error {
#[snafu(source(from(common_config::error::Error, Box::new)))]
source: Box<common_config::error::Error>,
},
+
+ #[snafu(display("Failed to setup plugin"))]
+ SetupPlugin {
+ #[snafu(implicit)]
+ location: Location,
+ source: BoxedError,
+ },
+
+ #[snafu(display("Failed to start plugin"))]
+ StartPlugin {
+ #[snafu(implicit)]
+ location: Location,
+ source: BoxedError,
+ },
}
impl Error {
@@ -902,7 +916,9 @@ impl ErrorExt for Error {
| Error::Join { .. }
| Error::WeightArray { .. }
| Error::NotSetWeightArray { .. }
- | Error::PeerUnavailable { .. } => StatusCode::Internal,
+ | Error::PeerUnavailable { .. }
+ | Error::SetupPlugin { .. }
+ | Error::StartPlugin { .. } => StatusCode::Internal,
Error::Unsupported { .. } => StatusCode::Unsupported,
|
refactor
|
add SetupPlugin and StartPlugin error (#4554)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.