hash
stringlengths
40
40
date
stringdate
2022-04-19 15:26:27
2025-03-21 10:49:23
author
stringclasses
86 values
commit_message
stringlengths
12
115
is_merge
bool
1 class
git_diff
stringlengths
214
553k
type
stringclasses
15 values
masked_commit_message
stringlengths
8
110
225ae953d1d741522c339775c53d08501c67d08d
2024-01-09 12:30:42
Wei
feat: add parquet metadata to cache (#3097)
false
diff --git a/src/mito2/src/access_layer.rs b/src/mito2/src/access_layer.rs index a3ebf5c994bf..d057323083ae 100644 --- a/src/mito2/src/access_layer.rs +++ b/src/mito2/src/access_layer.rs @@ -89,10 +89,11 @@ impl AccessLayer { write_opts: &WriteOptions, ) -> Result<Option<SstInfo>> { let path = location::sst_file_path(&self.region_dir, request.file_id); + let region_id = request.metadata.region_id; - if let Some(write_cache) = request.cache_manager.write_cache() { + let sst_info = if let Some(write_cache) = request.cache_manager.write_cache() { // Write to the write cache. - return write_cache + write_cache .write_and_upload_sst( SstUploadRequest { file_id: request.file_id, @@ -104,12 +105,25 @@ impl AccessLayer { }, write_opts, ) - .await; + .await? + } else { + // Write cache is disabled. + let mut writer = ParquetWriter::new(path, request.metadata, self.object_store.clone()); + writer.write_all(request.source, write_opts).await? + }; + + // Put parquet metadata to cache manager. + if let Some(sst_info) = &sst_info { + if let Some(parquet_metadata) = &sst_info.file_metadata { + request.cache_manager.put_parquet_meta_data( + region_id, + request.file_id, + parquet_metadata.clone(), + ) + } } - // Write cache is disabled. - let mut writer = ParquetWriter::new(path, request.metadata, self.object_store.clone()); - writer.write_all(request.source, write_opts).await + Ok(sst_info) } } diff --git a/src/mito2/src/compaction/twcs.rs b/src/mito2/src/compaction/twcs.rs index 9cf45cdf9089..e97030ac383c 100644 --- a/src/mito2/src/compaction/twcs.rs +++ b/src/mito2/src/compaction/twcs.rs @@ -306,6 +306,7 @@ impl TwcsCompactionTask { let metadata = self.metadata.clone(); let sst_layer = self.sst_layer.clone(); let region_id = self.region_id; + let file_id = output.output_file_id; let cache_manager = self.cache_manager.clone(); let storage = self.storage.clone(); futs.push(async move { @@ -314,7 +315,7 @@ impl TwcsCompactionTask { let file_meta_opt = sst_layer .write_sst( SstWriteRequest { - file_id: output.output_file_id, + file_id, metadata, source: Source::Reader(reader), cache_manager, @@ -325,7 +326,7 @@ impl TwcsCompactionTask { .await? .map(|sst_info| FileMeta { region_id, - file_id: output.output_file_id, + file_id, time_range: sst_info.time_range, level: output.output_level, file_size: sst_info.file_size, diff --git a/src/mito2/src/flush.rs b/src/mito2/src/flush.rs index 381fd3b8c837..10a1de0e5de2 100644 --- a/src/mito2/src/flush.rs +++ b/src/mito2/src/flush.rs @@ -331,6 +331,7 @@ impl RegionFlushTask { // No data written. continue; }; + flushed_bytes += sst_info.file_size; let file_meta = FileMeta { region_id: self.region_id, diff --git a/src/mito2/src/sst/parquet.rs b/src/mito2/src/sst/parquet.rs index 5b6a088729c5..fe328f16b121 100644 --- a/src/mito2/src/sst/parquet.rs +++ b/src/mito2/src/sst/parquet.rs @@ -22,6 +22,8 @@ pub mod row_group; mod stats; pub mod writer; +use std::sync::Arc; + use common_base::readable_size::ReadableSize; use parquet::file::metadata::ParquetMetaData; @@ -62,7 +64,7 @@ pub struct SstInfo { /// Number of rows. pub num_rows: usize, /// File Meta Data - pub file_metadata: Option<ParquetMetaData>, + pub file_metadata: Option<Arc<ParquetMetaData>>, } #[cfg(test)] diff --git a/src/mito2/src/sst/parquet/writer.rs b/src/mito2/src/sst/parquet/writer.rs index 5d8392b6d58d..2ed226791dca 100644 --- a/src/mito2/src/sst/parquet/writer.rs +++ b/src/mito2/src/sst/parquet/writer.rs @@ -14,6 +14,8 @@ //! Parquet writer. +use std::sync::Arc; + use common_datasource::file_format::parquet::BufferedWriter; use common_telemetry::debug; use common_time::Timestamp; @@ -121,7 +123,7 @@ impl ParquetWriter { time_range, file_size, num_rows: stats.num_rows, - file_metadata: Some(parquet_metadata), + file_metadata: Some(Arc::new(parquet_metadata)), })) }
feat
add parquet metadata to cache (#3097)
1f57c6b1f0a7c5260a2c0523cdf4eec2ee252880
2023-11-08 13:08:04
Yingwen
feat(mito): Add metrics to read path (#2701)
false
diff --git a/src/mito2/src/memtable/time_series.rs b/src/mito2/src/memtable/time_series.rs index f1f53315012a..5b6dd7958564 100644 --- a/src/mito2/src/memtable/time_series.rs +++ b/src/mito2/src/memtable/time_series.rs @@ -17,9 +17,10 @@ use std::collections::{BTreeMap, Bound, HashSet}; use std::fmt::{Debug, Formatter}; use std::sync::atomic::{AtomicI64, AtomicU32, Ordering}; use std::sync::{Arc, RwLock}; +use std::time::{Duration, Instant}; use api::v1::OpType; -use common_telemetry::{debug, error}; +use common_telemetry::{debug, error, trace}; use common_time::Timestamp; use datafusion::physical_plan::PhysicalExpr; use datafusion_common::ScalarValue; @@ -47,6 +48,7 @@ use crate::memtable::{ AllocTracker, BoxedBatchIterator, KeyValues, Memtable, MemtableBuilder, MemtableId, MemtableRef, MemtableStats, }; +use crate::metrics::{READ_ROWS_TOTAL, READ_STAGE_ELAPSED}; use crate::read::{Batch, BatchBuilder, BatchColumn}; use crate::row_converter::{McmpRowCodec, RowCodec, SortField}; @@ -319,6 +321,7 @@ impl SeriesSet { pk_schema: primary_key_schema, primary_key_builders, codec: self.codec.clone(), + metrics: Metrics::default(), } } } @@ -346,6 +349,21 @@ fn primary_key_builders( (builders, Arc::new(arrow::datatypes::Schema::new(fields))) } +/// Metrics for reading the memtable. +#[derive(Debug, Default)] +struct Metrics { + /// Total series in the memtable. + total_series: usize, + /// Number of series pruned. + num_pruned_series: usize, + /// Number of rows read. + num_rows: usize, + /// Number of batch read. + num_batches: usize, + /// Duration to scan the memtable. + scan_cost: Duration, +} + struct Iter { metadata: RegionMetadataRef, series: Arc<SeriesRwLockMap>, @@ -355,12 +373,30 @@ struct Iter { pk_schema: arrow::datatypes::SchemaRef, primary_key_builders: Vec<Box<dyn MutableVector>>, codec: Arc<McmpRowCodec>, + metrics: Metrics, +} + +impl Drop for Iter { + fn drop(&mut self) { + debug!( + "Iter {} time series memtable, metrics: {:?}", + self.metadata.region_id, self.metrics + ); + + READ_ROWS_TOTAL + .with_label_values(&["time_series_memtable"]) + .inc_by(self.metrics.num_rows as u64); + READ_STAGE_ELAPSED + .with_label_values(&["scan_memtable"]) + .observe(self.metrics.scan_cost.as_secs_f64()); + } } impl Iterator for Iter { type Item = Result<Batch>; fn next(&mut self) -> Option<Self::Item> { + let start = Instant::now(); let map = self.series.read().unwrap(); let range = match &self.last_key { None => map.range::<Vec<u8>, _>(..), @@ -371,7 +407,10 @@ impl Iterator for Iter { // TODO(hl): maybe yield more than one time series to amortize range overhead. for (primary_key, series) in range { + self.metrics.total_series += 1; + let mut series = series.write().unwrap(); + let start = Instant::now(); if !self.predicate.is_empty() && !prune_primary_key( &self.codec, @@ -383,15 +422,23 @@ impl Iterator for Iter { ) { // read next series + self.metrics.num_pruned_series += 1; continue; } self.last_key = Some(primary_key.clone()); let values = series.compact(&self.metadata); - return Some( - values.and_then(|v| v.to_batch(primary_key, &self.metadata, &self.projection)), - ); + let batch = + values.and_then(|v| v.to_batch(primary_key, &self.metadata, &self.projection)); + + // Update metrics. + self.metrics.num_batches += 1; + self.metrics.num_rows += batch.as_ref().map(|b| b.num_rows()).unwrap_or(0); + self.metrics.scan_cost += start.elapsed(); + return Some(batch); } + self.metrics.scan_cost += start.elapsed(); + None } } @@ -410,12 +457,7 @@ fn prune_primary_key( } if let Some(rb) = series.pk_cache.as_ref() { - let res = prune_inner(predicate, rb).unwrap_or(true); - debug!( - "Prune primary key: {:?}, predicate: {:?}, res: {:?}", - rb, predicate, res - ); - res + prune_inner(predicate, rb).unwrap_or(true) } else { let rb = match pk_to_record_batch(codec, pk, builders, pk_schema) { Ok(rb) => rb, @@ -425,7 +467,6 @@ fn prune_primary_key( } }; let res = prune_inner(predicate, &rb).unwrap_or(true); - debug!("Prune primary key: {:?}, res: {:?}", rb, res); series.update_pk_cache(rb); res } @@ -452,9 +493,11 @@ fn prune_inner(predicates: &[Arc<dyn PhysicalExpr>], primary_key: &RecordBatch) unreachable!("Unexpected primary key record batch evaluation result: {:?}, primary key: {:?}", eva, primary_key); } }; - debug!( + trace!( "Evaluate primary key {:?} against filter: {:?}, result: {:?}", - primary_key, expr, result + primary_key, + expr, + result ); if !result { return Ok(false); diff --git a/src/mito2/src/metrics.rs b/src/mito2/src/metrics.rs index 257bb537e50d..7b8aa475da93 100644 --- a/src/mito2/src/metrics.rs +++ b/src/mito2/src/metrics.rs @@ -105,4 +105,21 @@ lazy_static! { /// Counter of failed compaction task. pub static ref COMPACTION_FAILURE_COUNT: IntCounter = register_int_counter!("mito_compaction_failure_total", "mito compaction failure total").unwrap(); + // ------- End of compaction metrics. + + // Query metrics. + /// Timer of different stages in query. + pub static ref READ_STAGE_ELAPSED: HistogramVec = register_histogram_vec!( + "mito_read_stage_elapsed", + "mito read stage elapsed", + &[STAGE_LABEL] + ) + .unwrap(); + /// Counter of rows read. + pub static ref READ_ROWS_TOTAL: IntCounterVec = + register_int_counter_vec!("mito_read_rows_total", "mito read rows total", &[TYPE_LABEL]).unwrap(); + /// Counter of filtered rows during merge. + pub static ref MERGE_FILTER_ROWS_TOTAL: IntCounterVec = + register_int_counter_vec!("mito_merge_filter_rows_total", "mito merge filter rows total", &[TYPE_LABEL]).unwrap(); + // ------- End of query metrics. } diff --git a/src/mito2/src/read/merge.rs b/src/mito2/src/read/merge.rs index 187449c01694..ad1f51cf1dc3 100644 --- a/src/mito2/src/read/merge.rs +++ b/src/mito2/src/read/merge.rs @@ -17,12 +17,15 @@ use std::cmp::Ordering; use std::collections::BinaryHeap; use std::mem; +use std::time::{Duration, Instant}; use async_trait::async_trait; +use common_telemetry::debug; use common_time::Timestamp; use crate::error::Result; use crate::memtable::BoxedBatchIterator; +use crate::metrics::{MERGE_FILTER_ROWS_TOTAL, READ_STAGE_ELAPSED}; use crate::read::{Batch, BatchReader, BoxedBatchReader, Source}; /// Minimum batch size to output. @@ -51,11 +54,14 @@ pub struct MergeReader { /// Suggested size of each batch. The batch returned by the reader can have more rows than the /// batch size. batch_size: usize, + /// Local metrics. + metrics: Metrics, } #[async_trait] impl BatchReader for MergeReader { async fn next_batch(&mut self) -> Result<Option<Batch>> { + let start = Instant::now(); while !self.hot.is_empty() && self.batch_merger.num_rows() < self.batch_size { if let Some(current_key) = self.batch_merger.primary_key() { // If the hottest node has a different key, we have finish collecting current key. @@ -68,28 +74,55 @@ impl BatchReader for MergeReader { if self.hot.len() == 1 { // No need to do merge sort if only one batch in the hot heap. self.fetch_batch_from_hottest().await?; + self.metrics.num_fetch_by_batches += 1; } else { // We could only fetch rows that less than the next node from the hottest node. self.fetch_rows_from_hottest().await?; + self.metrics.num_fetch_by_rows += 1; } } if self.batch_merger.is_empty() { // Nothing fetched. + self.metrics.scan_cost += start.elapsed(); + // Update deleted rows num. + self.metrics.num_deleted_rows = self.batch_merger.num_deleted_rows(); Ok(None) } else { - self.batch_merger.merge_batches() + let batch = self.batch_merger.merge_batches()?; + self.metrics.scan_cost += start.elapsed(); + self.metrics.num_output_rows += batch.as_ref().map(|b| b.num_rows()).unwrap_or(0); + Ok(batch) } } } +impl Drop for MergeReader { + fn drop(&mut self) { + debug!("Merge reader finished, metrics: {:?}", self.metrics); + + MERGE_FILTER_ROWS_TOTAL + .with_label_values(&["dedup"]) + .inc_by(self.metrics.num_duplicate_rows as u64); + MERGE_FILTER_ROWS_TOTAL + .with_label_values(&["delete"]) + .inc_by(self.metrics.num_deleted_rows as u64); + READ_STAGE_ELAPSED + .with_label_values(&["merge"]) + .observe(self.metrics.scan_cost.as_secs_f64()); + } +} + impl MergeReader { /// Creates and initializes a new [MergeReader]. pub async fn new(sources: Vec<Source>, batch_size: usize) -> Result<MergeReader> { + let start = Instant::now(); + let mut metrics = Metrics::default(); + let mut cold = BinaryHeap::with_capacity(sources.len()); let hot = BinaryHeap::with_capacity(sources.len()); for source in sources { - let node = Node::new(source).await?; + let node = Node::new(source, &mut metrics).await?; if !node.is_eof() { // Ensure `cold` don't have eof nodes. cold.push(node); @@ -101,10 +134,12 @@ impl MergeReader { cold, batch_merger: BatchMerger::new(), batch_size, + metrics, }; // Initializes the reader. reader.refill_hot(); + reader.metrics.scan_cost += start.elapsed(); Ok(reader) } @@ -132,7 +167,7 @@ impl MergeReader { assert_eq!(1, self.hot.len()); let mut hottest = self.hot.pop().unwrap(); - let batch = hottest.fetch_batch().await?; + let batch = hottest.fetch_batch(&mut self.metrics).await?; self.batch_merger.push(batch)?; self.reheap(hottest) } @@ -161,12 +196,12 @@ impl MergeReader { // value directly. match timestamps.binary_search(&next_min_ts.value()) { Ok(pos) => { - // They have duplicate timestamps. Outputs timestamps before the duplciated timestamp. + // They have duplicate timestamps. Outputs timestamps before the duplicated timestamp. // Batch itself doesn't contain duplicate timestamps so timestamps before `pos` // must be less than `next_min_ts`. self.batch_merger.push(top.slice(0, pos))?; // This keep the duplicate timestamp in the node. - top_node.skip_rows(pos).await?; + top_node.skip_rows(pos, &mut self.metrics).await?; // The merge window should contain this timestamp so only nodes in the hot heap // have this timestamp. self.filter_first_duplicate_timestamp_in_hot(top_node, next_min_ts) @@ -175,7 +210,7 @@ impl MergeReader { Err(pos) => { // No duplicate timestamp. Outputs timestamp before `pos`. self.batch_merger.push(top.slice(0, pos))?; - top_node.skip_rows(pos).await?; + top_node.skip_rows(pos, &mut self.metrics).await?; self.reheap(top_node)?; } } @@ -211,16 +246,18 @@ impl MergeReader { if max_seq < next_first_seq { // The next node has larger seq. - max_seq_node.skip_rows(1).await?; + max_seq_node.skip_rows(1, &mut self.metrics).await?; + self.metrics.num_duplicate_rows += 1; if !max_seq_node.is_eof() { self.cold.push(max_seq_node); } max_seq_node = next_node; max_seq = next_first_seq; } else { - next_node.skip_rows(1).await?; + next_node.skip_rows(1, &mut self.metrics).await?; + self.metrics.num_duplicate_rows += 1; if !next_node.is_eof() { - // If the next node is + // If the next node has smaller seq, skip that row. self.cold.push(next_node); } } @@ -315,12 +352,33 @@ impl Default for MergeReaderBuilder { } } +/// Metrics for the merge reader. +#[derive(Debug, Default)] +struct Metrics { + /// Total scan cost of the reader. + scan_cost: Duration, + /// Number of times to fetch batches. + num_fetch_by_batches: usize, + /// Number of times to fetch rows. + num_fetch_by_rows: usize, + /// Number of input rows. + num_input_rows: usize, + /// Number of skipped duplicate rows. + num_duplicate_rows: usize, + /// Number of output rows. + num_output_rows: usize, + /// Number of deleted rows. + num_deleted_rows: usize, +} + /// Helper to collect and merge small batches for same primary key. struct BatchMerger { /// Buffered non-empty batches to merge. batches: Vec<Batch>, /// Number of rows in the batch. num_rows: usize, + /// Number of rows deleted. + num_deleted_rows: usize, } impl BatchMerger { @@ -329,6 +387,7 @@ impl BatchMerger { BatchMerger { batches: Vec::new(), num_rows: 0, + num_deleted_rows: 0, } } @@ -337,6 +396,11 @@ impl BatchMerger { self.num_rows } + /// Returns the number of rows deleted. + fn num_deleted_rows(&self) -> usize { + self.num_deleted_rows + } + /// Returns true if the merger is empty. fn is_empty(&self) -> bool { self.num_rows() == 0 @@ -360,7 +424,9 @@ impl BatchMerger { .map(|b| b.primary_key() == batch.primary_key()) .unwrap_or(true)); + let num_rows = batch.num_rows(); batch.filter_deleted()?; + self.num_deleted_rows += num_rows - batch.num_rows(); if batch.is_empty() { return Ok(()); } @@ -402,9 +468,11 @@ impl Node { /// Initialize a node. /// /// It tries to fetch one batch from the `source`. - async fn new(mut source: Source) -> Result<Node> { + async fn new(mut source: Source, metrics: &mut Metrics) -> Result<Node> { // Ensures batch is not empty. let current_batch = source.next_batch().await?.map(CompareFirst); + metrics.num_input_rows += current_batch.as_ref().map(|b| b.0.num_rows()).unwrap_or(0); + Ok(Node { source, current_batch, @@ -437,10 +505,15 @@ impl Node { /// /// # Panics /// Panics if the node has reached EOF. - async fn fetch_batch(&mut self) -> Result<Batch> { + async fn fetch_batch(&mut self, metrics: &mut Metrics) -> Result<Batch> { let current = self.current_batch.take().unwrap(); // Ensures batch is not empty. self.current_batch = self.source.next_batch().await?.map(CompareFirst); + metrics.num_input_rows += self + .current_batch + .as_ref() + .map(|b| b.0.num_rows()) + .unwrap_or(0); Ok(current.0) } @@ -468,13 +541,14 @@ impl Node { /// /// # Panics /// Panics if the node is EOF. - async fn skip_rows(&mut self, num_to_skip: usize) -> Result<()> { + async fn skip_rows(&mut self, num_to_skip: usize, metrics: &mut Metrics) -> Result<()> { let batch = self.current_batch(); debug_assert!(batch.num_rows() >= num_to_skip); + let remaining = batch.num_rows() - num_to_skip; if remaining == 0 { // Nothing remains, we need to fetch next batch to ensure the batch is not empty. - self.fetch_batch().await?; + self.fetch_batch(metrics).await?; } else { debug_assert!(!batch.is_empty()); self.current_batch = Some(CompareFirst(batch.slice(num_to_skip, remaining))); @@ -610,6 +684,10 @@ mod tests { ], ) .await; + + assert_eq!(8, reader.metrics.num_input_rows); + assert_eq!(6, reader.metrics.num_output_rows); + assert_eq!(2, reader.metrics.num_deleted_rows); } #[tokio::test] @@ -722,6 +800,11 @@ mod tests { ], ) .await; + + assert_eq!(11, reader.metrics.num_input_rows); + assert_eq!(7, reader.metrics.num_output_rows); + assert_eq!(2, reader.metrics.num_deleted_rows); + assert_eq!(2, reader.metrics.num_duplicate_rows); } #[tokio::test] @@ -1051,6 +1134,11 @@ mod tests { .push(new_batch(b"k1", &[2], &[10], &[OpType::Put], &[22])) .unwrap(); assert_eq!(2, merger.num_rows()); + merger + .push(new_batch(b"k1", &[3], &[10], &[OpType::Delete], &[23])) + .unwrap(); + assert_eq!(2, merger.num_rows()); + let batch = merger.merge_batches().unwrap().unwrap(); assert_eq!(2, batch.num_rows()); assert_eq!( @@ -1064,5 +1152,6 @@ mod tests { ) ); assert!(merger.is_empty()); + assert_eq!(1, merger.num_deleted_rows()); } } diff --git a/src/mito2/src/read/seq_scan.rs b/src/mito2/src/read/seq_scan.rs index 805e8d8df9e5..9e797e23cc0c 100644 --- a/src/mito2/src/read/seq_scan.rs +++ b/src/mito2/src/read/seq_scan.rs @@ -15,19 +15,22 @@ //! Sequential scan. use std::sync::Arc; +use std::time::{Duration, Instant}; use async_stream::try_stream; use common_error::ext::BoxedError; use common_recordbatch::error::ExternalSnafu; -use common_recordbatch::{RecordBatchStreamAdaptor, SendableRecordBatchStream}; +use common_recordbatch::{RecordBatch, RecordBatchStreamAdaptor, SendableRecordBatchStream}; +use common_telemetry::debug; use common_time::range::TimestampRange; use snafu::ResultExt; use table::predicate::Predicate; use crate::access_layer::AccessLayerRef; -use crate::cache::CacheManagerRef; +use crate::cache::{CacheManager, CacheManagerRef}; use crate::error::Result; use crate::memtable::MemtableRef; +use crate::metrics::READ_STAGE_ELAPSED; use crate::read::compat::{self, CompatReader}; use crate::read::merge::MergeReaderBuilder; use crate::read::projection::ProjectionMapper; @@ -105,22 +108,27 @@ impl SeqScan { /// Builds a stream for the query. pub async fn build_stream(&self) -> Result<SendableRecordBatchStream> { + let start = Instant::now(); // Scans all memtables and SSTs. Builds a merge reader to merge results. let mut reader = self.build_reader().await?; + let mut metrics = Metrics { + scan_cost: start.elapsed(), + }; // Creates a stream to poll the batch reader and convert batch into record batch. let mapper = self.mapper.clone(); let cache_manager = self.cache_manager.clone(); let stream = try_stream! { let cache = cache_manager.as_ref().map(|cache| cache.as_ref()); - while let Some(batch) = reader - .next_batch() - .await - .map_err(BoxedError::new) - .context(ExternalSnafu)? + while let Some(batch) = + Self::fetch_record_batch(&mut reader, &mapper, cache, &mut metrics).await? { - yield mapper.convert(&batch, cache)?; + yield batch; } + + debug!("Seq scan finished, region_id: {:?}, metrics: {:?}", mapper.metadata().region_id, metrics); + // Update metrics. + READ_STAGE_ELAPSED.with_label_values(&["total"]).observe(metrics.scan_cost.as_secs_f64()); }; let stream = Box::pin(RecordBatchStreamAdaptor::new( self.mapper.output_schema(), @@ -160,6 +168,39 @@ impl SeqScan { } Ok(Box::new(builder.build().await?)) } + + /// Fetch a batch from the reader and convert it into a record batch. + async fn fetch_record_batch( + reader: &mut dyn BatchReader, + mapper: &ProjectionMapper, + cache: Option<&CacheManager>, + metrics: &mut Metrics, + ) -> common_recordbatch::error::Result<Option<RecordBatch>> { + let start = Instant::now(); + + let Some(batch) = reader + .next_batch() + .await + .map_err(BoxedError::new) + .context(ExternalSnafu)? + else { + metrics.scan_cost += start.elapsed(); + + return Ok(None); + }; + + let record_batch = mapper.convert(&batch, cache)?; + metrics.scan_cost += start.elapsed(); + + Ok(Some(record_batch)) + } +} + +/// Metrics for [SeqScan]. +#[derive(Debug, Default)] +struct Metrics { + /// Duration to scan data. + scan_cost: Duration, } #[cfg(test)] diff --git a/src/mito2/src/sst/parquet/reader.rs b/src/mito2/src/sst/parquet/reader.rs index 254ecab1c119..a56e140ec033 100644 --- a/src/mito2/src/sst/parquet/reader.rs +++ b/src/mito2/src/sst/parquet/reader.rs @@ -16,9 +16,11 @@ use std::collections::{HashSet, VecDeque}; use std::sync::Arc; +use std::time::{Duration, Instant}; use async_compat::{Compat, CompatExt}; use async_trait::async_trait; +use common_telemetry::debug; use common_time::range::TimestampRange; use datatypes::arrow::record_batch::RecordBatch; use object_store::{ObjectStore, Reader}; @@ -38,6 +40,7 @@ use crate::error::{ ArrowReaderSnafu, InvalidMetadataSnafu, InvalidParquetSnafu, OpenDalSnafu, ReadParquetSnafu, Result, }; +use crate::metrics::{READ_ROWS_TOTAL, READ_STAGE_ELAPSED}; use crate::read::{Batch, BatchReader}; use crate::sst::file::FileHandle; use crate::sst::parquet::format::ReadFormat; @@ -112,6 +115,8 @@ impl ParquetReaderBuilder { /// /// This needs to perform IO operation. pub async fn build(&self) -> Result<ParquetReader> { + let start = Instant::now(); + let file_path = self.file_handle.file_path(&self.file_dir); // Now we create a reader to read the whole file. let reader = self @@ -172,6 +177,7 @@ impl ParquetReaderBuilder { .context(ReadParquetSnafu { path: &file_path })?; let reader_builder = RowGroupReaderBuilder { + file_handle: self.file_handle.clone(), file_path, parquet_meta, file_reader: reader, @@ -179,13 +185,19 @@ impl ParquetReaderBuilder { field_levels, }; + let metrics = Metrics { + read_row_groups: row_groups.len(), + build_cost: start.elapsed(), + ..Default::default() + }; + Ok(ParquetReader { - _file_handle: self.file_handle.clone(), row_groups, read_format, reader_builder, current_reader: None, batches: VecDeque::new(), + metrics, }) } @@ -247,8 +259,29 @@ impl ParquetReaderBuilder { } } +/// Parquet reader metrics. +#[derive(Debug, Default)] +struct Metrics { + /// Number of row groups to read. + read_row_groups: usize, + /// Duration to build the parquet reader. + build_cost: Duration, + /// Duration to scan the reader. + scan_cost: Duration, + /// Number of record batches read. + num_record_batches: usize, + /// Number of batches decoded. + num_batches: usize, + /// Number of rows read. + num_rows: usize, +} + /// Builder to build a [ParquetRecordBatchReader] for a row group. struct RowGroupReaderBuilder { + /// SST file to read. + /// + /// Holds the file handle to avoid the file purge purge it. + file_handle: FileHandle, /// Path of the file. file_path: String, /// Metadata of the parquet file. @@ -294,10 +327,6 @@ impl RowGroupReaderBuilder { /// Parquet batch reader to read our SST format. pub struct ParquetReader { - /// SST file to read. - /// - /// Holds the file handle to avoid the file purge purge it. - _file_handle: FileHandle, /// Indices of row groups to read. row_groups: VecDeque<usize>, /// Helper to read record batches. @@ -310,24 +339,60 @@ pub struct ParquetReader { current_reader: Option<ParquetRecordBatchReader>, /// Buffered batches to return. batches: VecDeque<Batch>, + /// Local metrics. + metrics: Metrics, } #[async_trait] impl BatchReader for ParquetReader { async fn next_batch(&mut self) -> Result<Option<Batch>> { + let start = Instant::now(); if let Some(batch) = self.batches.pop_front() { + self.metrics.scan_cost += start.elapsed(); + self.metrics.num_rows += batch.num_rows(); return Ok(Some(batch)); } // We need to fetch next record batch and convert it to batches. let Some(record_batch) = self.fetch_next_record_batch().await? else { + self.metrics.scan_cost += start.elapsed(); return Ok(None); }; + self.metrics.num_record_batches += 1; self.read_format .convert_record_batch(&record_batch, &mut self.batches)?; + self.metrics.num_batches += self.batches.len(); + + let batch = self.batches.pop_front(); + self.metrics.scan_cost += start.elapsed(); + self.metrics.num_rows += batch.as_ref().map(|b| b.num_rows()).unwrap_or(0); + Ok(batch) + } +} - Ok(self.batches.pop_front()) +impl Drop for ParquetReader { + fn drop(&mut self) { + debug!( + "Read parquet {} {}, range: {:?}, {}/{} row groups, metrics: {:?}", + self.reader_builder.file_handle.region_id(), + self.reader_builder.file_handle.file_id(), + self.reader_builder.file_handle.time_range(), + self.metrics.read_row_groups, + self.reader_builder.parquet_meta.num_row_groups(), + self.metrics + ); + + // Report metrics. + READ_STAGE_ELAPSED + .with_label_values(&["build_parquet_reader"]) + .observe(self.metrics.build_cost.as_secs_f64()); + READ_STAGE_ELAPSED + .with_label_values(&["scan_row_groups"]) + .observe(self.metrics.scan_cost.as_secs_f64()); + READ_ROWS_TOTAL + .with_label_values(&["parquet"]) + .inc_by(self.metrics.num_rows as u64); } }
feat
Add metrics to read path (#2701)
d0b360763397e760b5e9e946422eba93b00eb7e2
2023-08-14 09:49:44
Weny Xu
feat: add table route manager and upgrade tool (#2145)
false
diff --git a/Cargo.lock b/Cargo.lock index 15a87e763088..96bb529ff0ee 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1601,6 +1601,7 @@ dependencies = [ "metrics", "nu-ansi-term", "partition", + "prost", "query", "rand", "rexpect", diff --git a/src/cmd/Cargo.toml b/src/cmd/Cargo.toml index c3a8f4de8b8e..a458e7494908 100644 --- a/src/cmd/Cargo.toml +++ b/src/cmd/Cargo.toml @@ -42,6 +42,7 @@ meta-srv = { workspace = true } metrics.workspace = true nu-ansi-term = "0.46" partition = { workspace = true } +prost.workspace = true query = { workspace = true } rand.workspace = true rustyline = "10.1" diff --git a/src/cmd/src/cli/upgrade.rs b/src/cmd/src/cli/upgrade.rs index 18c203328c62..8e4d02fad8d7 100644 --- a/src/cmd/src/cli/upgrade.rs +++ b/src/cmd/src/cli/upgrade.rs @@ -16,6 +16,7 @@ use std::sync::Arc; use async_trait::async_trait; use clap::Parser; +use client::api::v1::meta::TableRouteValue; use common_meta::error as MetaError; use common_meta::helper::{CatalogKey as v1CatalogKey, SchemaKey as v1SchemaKey, TableGlobalValue}; use common_meta::key::catalog_name::{CatalogNameKey, CatalogNameValue}; @@ -24,8 +25,10 @@ use common_meta::key::schema_name::{SchemaNameKey, SchemaNameValue}; use common_meta::key::table_info::{TableInfoKey, TableInfoValue}; use common_meta::key::table_name::{TableNameKey, TableNameValue}; use common_meta::key::table_region::{RegionDistribution, TableRegionKey, TableRegionValue}; +use common_meta::key::table_route::{NextTableRouteKey, TableRouteValue as NextTableRouteValue}; use common_meta::key::TableMetaKey; use common_meta::range_stream::PaginationStream; +use common_meta::rpc::router::TableRoute; use common_meta::rpc::store::{BatchDeleteRequest, BatchPutRequest, PutRequest, RangeRequest}; use common_meta::rpc::KeyValue; use common_meta::util::get_prefix_end_key; @@ -34,6 +37,7 @@ use etcd_client::Client; use futures::TryStreamExt; use meta_srv::service::store::etcd::EtcdStore; use meta_srv::service::store::kv::{KvBackendAdapter, KvStoreRef}; +use prost::Message; use snafu::ResultExt; use crate::cli::{Instance, Tool}; @@ -45,6 +49,15 @@ pub struct UpgradeCommand { etcd_addr: String, #[clap(long)] dryrun: bool, + + #[clap(long)] + skip_table_global_keys: bool, + #[clap(long)] + skip_catalog_keys: bool, + #[clap(long)] + skip_schema_keys: bool, + #[clap(long)] + skip_table_route_keys: bool, } impl UpgradeCommand { @@ -57,6 +70,10 @@ impl UpgradeCommand { let tool = MigrateTableMetadata { etcd_store: EtcdStore::with_etcd_client(client), dryrun: self.dryrun, + skip_catalog_keys: self.skip_catalog_keys, + skip_table_global_keys: self.skip_table_global_keys, + skip_schema_keys: self.skip_schema_keys, + skip_table_route_keys: self.skip_table_route_keys, }; Ok(Instance::Tool(Box::new(tool))) } @@ -65,15 +82,32 @@ impl UpgradeCommand { struct MigrateTableMetadata { etcd_store: KvStoreRef, dryrun: bool, + + skip_table_global_keys: bool, + + skip_catalog_keys: bool, + + skip_schema_keys: bool, + + skip_table_route_keys: bool, } #[async_trait] impl Tool for MigrateTableMetadata { // migrates database's metadata from 0.3 to 0.4. async fn do_work(&self) -> Result<()> { - self.migrate_table_global_values().await?; - self.migrate_catalog_keys().await?; - self.migrate_schema_keys().await?; + if !self.skip_table_global_keys { + self.migrate_table_global_values().await?; + } + if !self.skip_catalog_keys { + self.migrate_catalog_keys().await?; + } + if !self.skip_schema_keys { + self.migrate_schema_keys().await?; + } + if !self.skip_table_route_keys { + self.migrate_table_route_keys().await?; + } Ok(()) } } @@ -81,6 +115,62 @@ impl Tool for MigrateTableMetadata { const PAGE_SIZE: usize = 1000; impl MigrateTableMetadata { + async fn migrate_table_route_keys(&self) -> Result<()> { + let key = b"__meta_table_route".to_vec(); + let range_end = get_prefix_end_key(&key); + let mut keys = Vec::new(); + info!("Start scanning key from: {}", String::from_utf8_lossy(&key)); + + let mut stream = PaginationStream::new( + KvBackendAdapter::wrap(self.etcd_store.clone()), + RangeRequest::new().with_range(key, range_end), + PAGE_SIZE, + Arc::new(|kv: KeyValue| { + let value = + TableRouteValue::decode(&kv.value[..]).context(MetaError::DecodeProtoSnafu)?; + Ok((kv.key, value)) + }), + ); + + while let Some((key, value)) = stream.try_next().await.context(error::IterStreamSnafu)? { + self.migrate_table_route_key(value).await?; + keys.push(key); + } + + info!("Total migrated TableRouteKeys: {}", keys.len()); + self.delete_migrated_keys(keys).await; + + Ok(()) + } + + async fn migrate_table_route_key(&self, value: TableRouteValue) -> Result<()> { + let table_route = TableRoute::try_from_raw( + &value.peers, + value.table_route.expect("expected table_route"), + ) + .unwrap(); + + let new_table_value = NextTableRouteValue::new(table_route.region_routes); + + let new_key = NextTableRouteKey::new(table_route.table.id as u32); + info!("Creating '{new_key}'"); + + if self.dryrun { + info!("Dryrun: do nothing"); + } else { + self.etcd_store + .put( + PutRequest::new() + .with_key(new_key.as_raw_key()) + .with_value(new_table_value.try_as_raw_value().unwrap()), + ) + .await + .unwrap(); + } + + Ok(()) + } + async fn migrate_schema_keys(&self) -> Result<()> { // The schema key prefix. let key = b"__s".to_vec(); diff --git a/src/common/meta/src/error.rs b/src/common/meta/src/error.rs index 7492c0d6ef22..ef892a24e14e 100644 --- a/src/common/meta/src/error.rs +++ b/src/common/meta/src/error.rs @@ -24,6 +24,12 @@ use table::metadata::TableId; #[derive(Debug, Snafu)] #[snafu(visibility(pub))] pub enum Error { + #[snafu(display("Failed to decode protobuf, source: {}", source))] + DecodeProto { + location: Location, + source: prost::DecodeError, + }, + #[snafu(display("Failed to encode object into json, source: {}", source))] EncodeJson { location: Location, @@ -164,7 +170,8 @@ impl ErrorExt for Error { EncodeJson { .. } | DecodeJson { .. } | PayloadNotExist { .. } - | ConvertRawKey { .. } => StatusCode::Unexpected, + | ConvertRawKey { .. } + | DecodeProto { .. } => StatusCode::Unexpected, MetaSrv { source, .. } => source.status_code(), diff --git a/src/common/meta/src/key.rs b/src/common/meta/src/key.rs index c0ff12b26310..e06122b62f75 100644 --- a/src/common/meta/src/key.rs +++ b/src/common/meta/src/key.rs @@ -53,7 +53,9 @@ pub mod schema_name; pub mod table_info; pub mod table_name; pub mod table_region; -mod table_route; +// TODO(weny): removes it. +#[allow(unused)] +pub mod table_route; use std::sync::Arc; @@ -67,6 +69,7 @@ use table_region::{TableRegionKey, TableRegionManager, TableRegionValue}; use self::catalog_name::{CatalogManager, CatalogNameValue}; use self::schema_name::{SchemaManager, SchemaNameValue}; +use self::table_route::TableRouteValue; use crate::error::{InvalidTableMetadataSnafu, Result, SerdeJsonSnafu}; pub use crate::key::table_route::{TableRouteKey, TABLE_ROUTE_PREFIX}; use crate::kv_backend::KvBackendRef; @@ -212,7 +215,8 @@ impl_table_meta_value! { TableNameValue, TableInfoValue, TableRegionValue, - DatanodeTableValue + DatanodeTableValue, + TableRouteValue } #[cfg(test)] diff --git a/src/common/meta/src/key/table_route.rs b/src/common/meta/src/key/table_route.rs index 6bd189d4c1c2..b4a06d0b2c64 100644 --- a/src/common/meta/src/key/table_route.rs +++ b/src/common/meta/src/key/table_route.rs @@ -15,12 +15,161 @@ use std::fmt::Display; use api::v1::meta::TableName; +use serde::{Deserialize, Serialize}; +use snafu::ensure; use table::metadata::TableId; -use crate::key::to_removed_key; +use crate::error::{Result, UnexpectedSnafu}; +use crate::key::{to_removed_key, TableMetaKey}; +use crate::kv_backend::KvBackendRef; +use crate::rpc::router::{RegionRoute, Table, TableRoute}; +use crate::rpc::store::{CompareAndPutRequest, MoveValueRequest}; pub const TABLE_ROUTE_PREFIX: &str = "__meta_table_route"; +pub const NEXT_TABLE_ROUTE_PREFIX: &str = "__table_route"; + +// TODO(weny): Renames it to TableRouteKey. +pub struct NextTableRouteKey { + pub table_id: TableId, +} + +impl NextTableRouteKey { + pub fn new(table_id: TableId) -> Self { + Self { table_id } + } +} + +#[derive(Debug, PartialEq, Serialize, Deserialize, Clone)] +pub struct TableRouteValue { + pub region_routes: Vec<RegionRoute>, + version: u64, +} + +impl TableRouteValue { + pub fn new(region_routes: Vec<RegionRoute>) -> Self { + Self { + region_routes, + version: 0, + } + } +} + +impl TableMetaKey for NextTableRouteKey { + fn as_raw_key(&self) -> Vec<u8> { + self.to_string().into_bytes() + } +} + +impl Display for NextTableRouteKey { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}/{}", NEXT_TABLE_ROUTE_PREFIX, self.table_id) + } +} + +pub struct TableRouteManager { + kv_backend: KvBackendRef, +} + +impl TableRouteManager { + pub fn new(kv_backend: KvBackendRef) -> Self { + Self { kv_backend } + } + + pub async fn get(&self, table_id: TableId) -> Result<Option<TableRouteValue>> { + let key = NextTableRouteKey::new(table_id); + self.kv_backend + .get(&key.as_raw_key()) + .await? + .map(|kv| TableRouteValue::try_from_raw_value(kv.value)) + .transpose() + } + + // Creates TableRoute key and value. If the key already exists, check whether the value is the same. + pub async fn create(&self, table_id: TableId, region_routes: Vec<RegionRoute>) -> Result<()> { + let key = NextTableRouteKey::new(table_id); + let val = TableRouteValue::new(region_routes); + let req = CompareAndPutRequest::new() + .with_key(key.as_raw_key()) + .with_value(val.try_as_raw_value()?); + + self.kv_backend.compare_and_put(req).await?.handle(|resp| { + if !resp.success { + let Some(cur) = resp + .prev_kv + .map(|kv|TableRouteValue::try_from_raw_value(kv.value)) + .transpose()? + else { + return UnexpectedSnafu { + err_msg: format!("compare_and_put expect None but failed with current value None, key: {key}, val: {val:?}"), + }.fail(); + }; + + ensure!( + cur==val, + UnexpectedSnafu { + err_msg: format!("current value '{cur:?}' already existed for key '{key}', {val:?} is not set"), + } + ); + } + Ok(()) + }) + } + + /// Compares and puts value of key. `expect` is the expected value, if backend's current value associated + /// with key is the same as `expect`, the value will be updated to `val`. + /// + /// - If the compare-and-set operation successfully updated value, this method will return an `Ok(Ok())` + /// - If associated value is not the same as `expect`, no value will be updated and an + /// `Ok(Err(Option<TableRoute>))` will be returned. The `Option<TableRoute>` indicates + /// the current associated value of key. + /// - If any error happens during operation, an `Err(Error)` will be returned. + pub async fn compare_and_put( + &self, + table_id: TableId, + expect: Option<TableRouteValue>, + region_routes: Vec<RegionRoute>, + ) -> Result<std::result::Result<(), Option<TableRouteValue>>> { + let key = NextTableRouteKey::new(table_id); + let raw_key = key.as_raw_key(); + + let (expect, version) = if let Some(x) = expect { + (x.try_as_raw_value()?, x.version + 1) + } else { + (vec![], 0) + }; + let value = TableRouteValue { + region_routes, + version, + }; + let raw_value = value.try_as_raw_value()?; + + let req = CompareAndPutRequest::new() + .with_key(raw_key) + .with_expect(expect) + .with_value(raw_value); + + self.kv_backend.compare_and_put(req).await?.handle(|resp| { + Ok(if resp.success { + Ok(()) + } else { + Err(resp + .prev_kv + .map(|x| TableRouteValue::try_from_raw_value(x.value)) + .transpose()?) + }) + }) + } + + pub async fn remove(&self, table_id: TableId) -> Result<()> { + let key = NextTableRouteKey::new(table_id).as_raw_key(); + let removed_key = to_removed_key(&String::from_utf8_lossy(&key)); + let req = MoveValueRequest::new(key, removed_key.as_bytes()); + self.kv_backend.move_value(req).await?; + Ok(()) + } +} + #[derive(Copy, Clone)] pub struct TableRouteKey<'a> { pub table_id: TableId, @@ -59,9 +208,51 @@ impl<'a> Display for TableRouteKey<'a> { #[cfg(test)] mod tests { - use api::v1::meta::TableName; + use std::sync::Arc; + + use api::v1::meta::TableName as PbTableName; use super::TableRouteKey; + use crate::key::table_route::{TableRouteManager, TableRouteValue}; + use crate::kv_backend::memory::MemoryKvBackend; + use crate::rpc::router::{RegionRoute, Table, TableRoute}; + use crate::table_name::TableName; + + #[tokio::test] + async fn test_table_route_manager() { + let mgr = TableRouteManager::new(Arc::new(MemoryKvBackend::default())); + + let table_id = 1024u32; + let table = Table { + id: table_id as u64, + table_name: TableName::new("foo", "bar", "baz"), + table_schema: b"mock schema".to_vec(), + }; + let region_route = RegionRoute::default(); + let region_routes = vec![region_route]; + + mgr.create(table_id, region_routes.clone()).await.unwrap(); + + let got = mgr.get(1024).await.unwrap().unwrap(); + + assert_eq!(got.region_routes, region_routes); + + let empty = mgr.get(1023).await.unwrap(); + assert!(empty.is_none()); + + let expect = TableRouteValue::new(region_routes); + + let mut updated = expect.clone(); + updated.region_routes.push(RegionRoute::default()); + + mgr.compare_and_put(1024, Some(expect.clone()), updated.region_routes.clone()) + .await + .unwrap(); + + mgr.compare_and_put(1024, Some(expect.clone()), updated.region_routes) + .await + .unwrap(); + } #[test] fn test_table_route_key() { @@ -87,7 +278,7 @@ mod tests { #[test] fn test_with_table_name() { - let table_name = TableName { + let table_name = PbTableName { catalog_name: "greptime".to_string(), schema_name: "public".to_string(), table_name: "demo".to_string(), diff --git a/src/common/meta/src/rpc/router.rs b/src/common/meta/src/rpc/router.rs index a4da3f3e1414..ac5d81501599 100644 --- a/src/common/meta/src/rpc/router.rs +++ b/src/common/meta/src/rpc/router.rs @@ -12,14 +12,14 @@ // See the License for the specific language governing permissions and // limitations under the License. -use std::collections::{HashMap, HashSet}; +use std::collections::{BTreeMap, HashMap, HashSet}; use api::v1::meta::{ Partition as PbPartition, Peer as PbPeer, Region as PbRegion, RegionRoute as PbRegionRoute, RouteRequest as PbRouteRequest, RouteResponse as PbRouteResponse, Table as PbTable, TableId as PbTableId, TableRoute as PbTableRoute, TableRouteValue as PbTableRouteValue, }; -use serde::{Deserialize, Serialize, Serializer}; +use serde::{Deserialize, Deserializer, Serialize, Serializer}; use snafu::OptionExt; use store_api::storage::{RegionId, RegionNumber}; use table::metadata::TableId; @@ -85,7 +85,6 @@ impl TableRoute { .iter() .map(|x| (x.region.id.region_number(), x.leader_peer.clone())) .collect::<HashMap<_, _>>(); - Self { table, region_routes, @@ -237,7 +236,7 @@ impl TryFrom<PbTableRouteValue> for TableRoute { pub struct Table { pub id: u64, pub table_name: TableName, - #[serde(serialize_with = "as_utf8")] + #[serde(serialize_with = "as_utf8", deserialize_with = "from_utf8")] pub table_schema: Vec<u8>, } @@ -281,7 +280,7 @@ pub struct Region { pub id: RegionId, pub name: String, pub partition: Option<Partition>, - pub attrs: HashMap<String, String>, + pub attrs: BTreeMap<String, String>, } impl From<PbRegion> for Region { @@ -290,7 +289,7 @@ impl From<PbRegion> for Region { id: r.id.into(), name: r.name, partition: r.partition.map(Into::into), - attrs: r.attrs, + attrs: r.attrs.into_iter().collect::<BTreeMap<_, _>>(), } } } @@ -301,7 +300,7 @@ impl From<Region> for PbRegion { id: region.id.into(), name: region.name, partition: region.partition.map(Into::into), - attrs: region.attrs, + attrs: region.attrs.into_iter().collect::<HashMap<_, _>>(), } } } @@ -322,6 +321,15 @@ fn as_utf8<S: Serializer>(val: &[u8], serializer: S) -> std::result::Result<S::O ) } +pub fn from_utf8<'de, D>(deserializer: D) -> std::result::Result<Vec<u8>, D::Error> +where + D: Deserializer<'de>, +{ + let s = String::deserialize(deserializer)?; + + Ok(s.into_bytes()) +} + fn as_utf8_vec<S: Serializer>( val: &[Vec<u8>], serializer: S, @@ -513,7 +521,7 @@ mod tests { id: 1.into(), name: "r1".to_string(), partition: None, - attrs: HashMap::new(), + attrs: BTreeMap::new(), }, leader_peer: Some(Peer::new(2, "a2")), follower_peers: vec![Peer::new(1, "a1"), Peer::new(3, "a3")], @@ -523,7 +531,7 @@ mod tests { id: 2.into(), name: "r2".to_string(), partition: None, - attrs: HashMap::new(), + attrs: BTreeMap::new(), }, leader_peer: Some(Peer::new(1, "a1")), follower_peers: vec![Peer::new(2, "a2"), Peer::new(3, "a3")], diff --git a/src/common/meta/src/rpc/store.rs b/src/common/meta/src/rpc/store.rs index e69a2f87bdd7..2426442e3f5d 100644 --- a/src/common/meta/src/rpc/store.rs +++ b/src/common/meta/src/rpc/store.rs @@ -612,9 +612,9 @@ impl TryFrom<PbCompareAndPutResponse> for CompareAndPutResponse { } impl CompareAndPutResponse { - pub fn handle<R, E, F>(&self, f: F) -> std::result::Result<R, E> + pub fn handle<R, E, F>(self, f: F) -> std::result::Result<R, E> where - F: FnOnce(&Self) -> std::result::Result<R, E>, + F: FnOnce(Self) -> std::result::Result<R, E>, { f(self) } diff --git a/src/frontend/src/table.rs b/src/frontend/src/table.rs index 90e3efe61879..52758ad64ade 100644 --- a/src/frontend/src/table.rs +++ b/src/frontend/src/table.rs @@ -377,7 +377,7 @@ impl PartitionExec { #[cfg(test)] pub(crate) mod test { - use std::collections::HashMap; + use std::collections::BTreeMap; use std::sync::atomic::{AtomicU32, Ordering}; use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME}; @@ -445,7 +445,7 @@ pub(crate) mod test { .try_into() .unwrap(), ), - attrs: HashMap::new(), + attrs: BTreeMap::new(), }, leader_peer: Some(Peer::new(3, "")), follower_peers: vec![], @@ -462,7 +462,7 @@ pub(crate) mod test { .try_into() .unwrap(), ), - attrs: HashMap::new(), + attrs: BTreeMap::new(), }, leader_peer: Some(Peer::new(2, "")), follower_peers: vec![], @@ -479,7 +479,7 @@ pub(crate) mod test { .try_into() .unwrap(), ), - attrs: HashMap::new(), + attrs: BTreeMap::new(), }, leader_peer: Some(Peer::new(1, "")), follower_peers: vec![], @@ -517,7 +517,7 @@ pub(crate) mod test { .try_into() .unwrap(), ), - attrs: HashMap::new(), + attrs: BTreeMap::new(), }, leader_peer: None, follower_peers: vec![], @@ -537,7 +537,7 @@ pub(crate) mod test { .try_into() .unwrap(), ), - attrs: HashMap::new(), + attrs: BTreeMap::new(), }, leader_peer: None, follower_peers: vec![], @@ -554,7 +554,7 @@ pub(crate) mod test { .try_into() .unwrap(), ), - attrs: HashMap::new(), + attrs: BTreeMap::new(), }, leader_peer: None, follower_peers: vec![],
feat
add table route manager and upgrade tool (#2145)
a23f269bb1ac365779a47a9d872b6c9e87740348
2024-12-25 12:56:21
Ruihang Xia
fix: correct write cache's metric labels (#5227)
false
diff --git a/config/config.md b/config/config.md index d3353930b163..db2f6e010286 100644 --- a/config/config.md +++ b/config/config.md @@ -421,7 +421,7 @@ | `storage` | -- | -- | The data storage options. | | `storage.data_home` | String | `/tmp/greptimedb/` | The working home directory. | | `storage.type` | String | `File` | The storage type used to store the data.<br/>- `File`: the data is stored in the local file system.<br/>- `S3`: the data is stored in the S3 object storage.<br/>- `Gcs`: the data is stored in the Google Cloud Storage.<br/>- `Azblob`: the data is stored in the Azure Blob Storage.<br/>- `Oss`: the data is stored in the Aliyun OSS. | -| `storage.cache_path` | String | Unset | Read cache configuration for object storage such as 'S3' etc, it's configured by default when using object storage. It is recommended to configure it when using object storage for better performance.<br/>A local file directory, defaults to `{data_home}/object_cache/read`. An empty string means disabling. | +| `storage.cache_path` | String | Unset | Read cache configuration for object storage such as 'S3' etc, it's configured by default when using object storage. It is recommended to configure it when using object storage for better performance.<br/>A local file directory, defaults to `{data_home}`. An empty string means disabling. | | `storage.cache_capacity` | String | Unset | The local file cache capacity in bytes. If your disk space is sufficient, it is recommended to set it larger. | | `storage.bucket` | String | Unset | The S3 bucket name.<br/>**It's only used when the storage type is `S3`, `Oss` and `Gcs`**. | | `storage.root` | String | Unset | The S3 data will be stored in the specified prefix, for example, `s3://${bucket}/${root}`.<br/>**It's only used when the storage type is `S3`, `Oss` and `Azblob`**. | @@ -460,7 +460,7 @@ | `region_engine.mito.page_cache_size` | String | Auto | Cache size for pages of SST row groups. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/8 of OS memory. | | `region_engine.mito.selector_result_cache_size` | String | Auto | Cache size for time series selector (e.g. `last_value()`). Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. | | `region_engine.mito.enable_experimental_write_cache` | Bool | `false` | Whether to enable the experimental write cache, it's enabled by default when using object storage. It is recommended to enable it when using object storage for better performance. | -| `region_engine.mito.experimental_write_cache_path` | String | `""` | File system path for write cache, defaults to `{data_home}/object_cache/write`. | +| `region_engine.mito.experimental_write_cache_path` | String | `""` | File system path for write cache, defaults to `{data_home}`. | | `region_engine.mito.experimental_write_cache_size` | String | `5GiB` | Capacity for write cache. If your disk space is sufficient, it is recommended to set it larger. | | `region_engine.mito.experimental_write_cache_ttl` | String | Unset | TTL for write cache. | | `region_engine.mito.sst_write_buffer_size` | String | `8MB` | Buffer size for SST writing. | diff --git a/config/datanode.example.toml b/config/datanode.example.toml index 90a4d69b2e89..1b062a4b3af1 100644 --- a/config/datanode.example.toml +++ b/config/datanode.example.toml @@ -294,7 +294,7 @@ data_home = "/tmp/greptimedb/" type = "File" ## Read cache configuration for object storage such as 'S3' etc, it's configured by default when using object storage. It is recommended to configure it when using object storage for better performance. -## A local file directory, defaults to `{data_home}/object_cache/read`. An empty string means disabling. +## A local file directory, defaults to `{data_home}`. An empty string means disabling. ## @toml2docs:none-default #+ cache_path = "" @@ -478,7 +478,7 @@ auto_flush_interval = "1h" ## Whether to enable the experimental write cache, it's enabled by default when using object storage. It is recommended to enable it when using object storage for better performance. enable_experimental_write_cache = false -## File system path for write cache, defaults to `{data_home}/object_cache/write`. +## File system path for write cache, defaults to `{data_home}`. experimental_write_cache_path = "" ## Capacity for write cache. If your disk space is sufficient, it is recommended to set it larger. diff --git a/src/datanode/src/store.rs b/src/datanode/src/store.rs index 52a1cba982e1..64cacd4e8583 100644 --- a/src/datanode/src/store.rs +++ b/src/datanode/src/store.rs @@ -28,7 +28,7 @@ use common_telemetry::{info, warn}; use object_store::layers::{LruCacheLayer, RetryInterceptor, RetryLayer}; use object_store::services::Fs; use object_store::util::{join_dir, normalize_dir, with_instrument_layers}; -use object_store::{Access, Error, HttpClient, ObjectStore, ObjectStoreBuilder, OBJECT_CACHE_DIR}; +use object_store::{Access, Error, HttpClient, ObjectStore, ObjectStoreBuilder}; use snafu::prelude::*; use crate::config::{HttpClientConfig, ObjectStoreConfig, DEFAULT_OBJECT_STORE_CACHE_SIZE}; @@ -147,12 +147,10 @@ async fn build_cache_layer( }; // Enable object cache by default - // Set the cache_path to be `${data_home}/object_cache/read/{name}` by default + // Set the cache_path to be `${data_home}` by default // if it's not present if cache_path.is_none() { - let object_cache_path = join_dir(data_home, OBJECT_CACHE_DIR); - let read_cache_path = join_dir(&object_cache_path, "read"); - let read_cache_path = join_dir(&read_cache_path, &name.to_lowercase()); + let read_cache_path = data_home.to_string(); tokio::fs::create_dir_all(Path::new(&read_cache_path)) .await .context(CreateDirSnafu { diff --git a/src/mito2/src/cache/file_cache.rs b/src/mito2/src/cache/file_cache.rs index eb112530cad7..51c20f742839 100644 --- a/src/mito2/src/cache/file_cache.rs +++ b/src/mito2/src/cache/file_cache.rs @@ -37,8 +37,10 @@ use crate::sst::file::FileId; use crate::sst::parquet::helper::fetch_byte_ranges; use crate::sst::parquet::metadata::MetadataLoader; -/// Subdirectory of cached files. -const FILE_DIR: &str = "files/"; +/// Subdirectory of cached files for write. +/// +/// This must contain three layers, corresponding to [`build_prometheus_metrics_layer`](object_store::layers::build_prometheus_metrics_layer). +const FILE_DIR: &str = "cache/object/write/"; /// A file cache manages files on local store and evict files based /// on size. diff --git a/src/mito2/src/cache/write_cache.rs b/src/mito2/src/cache/write_cache.rs index 8a431f22a63d..fc9972de5305 100644 --- a/src/mito2/src/cache/write_cache.rs +++ b/src/mito2/src/cache/write_cache.rs @@ -20,7 +20,6 @@ use std::time::Duration; use common_base::readable_size::ReadableSize; use common_telemetry::{debug, info}; use futures::AsyncWriteExt; -use object_store::manager::ObjectStoreManagerRef; use object_store::ObjectStore; use snafu::ResultExt; @@ -44,10 +43,6 @@ use crate::sst::{DEFAULT_WRITE_BUFFER_SIZE, DEFAULT_WRITE_CONCURRENCY}; pub struct WriteCache { /// Local file cache. file_cache: FileCacheRef, - /// Object store manager. - #[allow(unused)] - /// TODO: Remove unused after implementing async write cache - object_store_manager: ObjectStoreManagerRef, /// Puffin manager factory for index. puffin_manager_factory: PuffinManagerFactory, /// Intermediate manager for index. @@ -61,7 +56,6 @@ impl WriteCache { /// `object_store_manager` for all object stores. pub async fn new( local_store: ObjectStore, - object_store_manager: ObjectStoreManagerRef, cache_capacity: ReadableSize, ttl: Option<Duration>, puffin_manager_factory: PuffinManagerFactory, @@ -72,7 +66,6 @@ impl WriteCache { Ok(Self { file_cache, - object_store_manager, puffin_manager_factory, intermediate_manager, }) @@ -81,7 +74,6 @@ impl WriteCache { /// Creates a write cache based on local fs. pub async fn new_fs( cache_dir: &str, - object_store_manager: ObjectStoreManagerRef, cache_capacity: ReadableSize, ttl: Option<Duration>, puffin_manager_factory: PuffinManagerFactory, @@ -92,7 +84,6 @@ impl WriteCache { let local_store = new_fs_cache_store(cache_dir).await?; Self::new( local_store, - object_store_manager, cache_capacity, ttl, puffin_manager_factory, diff --git a/src/mito2/src/config.rs b/src/mito2/src/config.rs index 963089c60aed..7a1574c850ae 100644 --- a/src/mito2/src/config.rs +++ b/src/mito2/src/config.rs @@ -20,8 +20,6 @@ use std::time::Duration; use common_base::readable_size::ReadableSize; use common_telemetry::warn; -use object_store::util::join_dir; -use object_store::OBJECT_CACHE_DIR; use serde::{Deserialize, Serialize}; use serde_with::serde_as; @@ -97,7 +95,7 @@ pub struct MitoConfig { pub selector_result_cache_size: ReadableSize, /// Whether to enable the experimental write cache. pub enable_experimental_write_cache: bool, - /// File system path for write cache, defaults to `{data_home}/object_cache/write`. + /// File system path for write cache dir's root, defaults to `{data_home}`. pub experimental_write_cache_path: String, /// Capacity for write cache. pub experimental_write_cache_size: ReadableSize, @@ -234,8 +232,7 @@ impl MitoConfig { // Sets write cache path if it is empty. if self.experimental_write_cache_path.trim().is_empty() { - let object_cache_path = join_dir(data_home, OBJECT_CACHE_DIR); - self.experimental_write_cache_path = join_dir(&object_cache_path, "write"); + self.experimental_write_cache_path = data_home.to_string(); } self.index.sanitize(data_home, &self.inverted_index)?; diff --git a/src/mito2/src/test_util.rs b/src/mito2/src/test_util.rs index 314e886ba9ca..14b4bb4a9109 100644 --- a/src/mito2/src/test_util.rs +++ b/src/mito2/src/test_util.rs @@ -644,16 +644,9 @@ impl TestEnv { .unwrap(); let object_store_manager = self.get_object_store_manager().unwrap(); - let write_cache = WriteCache::new( - local_store, - object_store_manager, - capacity, - None, - puffin_mgr, - intm_mgr, - ) - .await - .unwrap(); + let write_cache = WriteCache::new(local_store, capacity, None, puffin_mgr, intm_mgr) + .await + .unwrap(); Arc::new(write_cache) } diff --git a/src/mito2/src/worker.rs b/src/mito2/src/worker.rs index 233ab9f056b1..62ad59f0701a 100644 --- a/src/mito2/src/worker.rs +++ b/src/mito2/src/worker.rs @@ -157,7 +157,6 @@ impl WorkerGroup { let purge_scheduler = Arc::new(LocalScheduler::new(config.max_background_purges)); let write_cache = write_cache_from_config( &config, - object_store_manager.clone(), puffin_manager_factory.clone(), intermediate_manager.clone(), ) @@ -303,7 +302,6 @@ impl WorkerGroup { .with_buffer_size(Some(config.index.write_buffer_size.as_bytes() as _)); let write_cache = write_cache_from_config( &config, - object_store_manager.clone(), puffin_manager_factory.clone(), intermediate_manager.clone(), ) @@ -364,7 +362,6 @@ fn region_id_to_index(id: RegionId, num_workers: usize) -> usize { async fn write_cache_from_config( config: &MitoConfig, - object_store_manager: ObjectStoreManagerRef, puffin_manager_factory: PuffinManagerFactory, intermediate_manager: IntermediateManager, ) -> Result<Option<WriteCacheRef>> { @@ -383,7 +380,6 @@ async fn write_cache_from_config( let cache = WriteCache::new_fs( &config.experimental_write_cache_path, - object_store_manager, config.experimental_write_cache_size, config.experimental_write_cache_ttl, puffin_manager_factory, diff --git a/src/object-store/src/layers.rs b/src/object-store/src/layers.rs index 20108ab63c52..8383fd237952 100644 --- a/src/object-store/src/layers.rs +++ b/src/object-store/src/layers.rs @@ -25,14 +25,14 @@ mod prometheus { static PROMETHEUS_LAYER: OnceLock<Mutex<PrometheusLayer>> = OnceLock::new(); + /// This logical tries to extract parent path from the object storage operation + /// the function also relies on assumption that the region path is built from + /// pattern `<data|index>/catalog/schema/table_id/...` OR `greptimedb/object_cache/<read|write>/...` + /// + /// We'll get the data/catalog/schema from path. pub fn build_prometheus_metrics_layer(with_path_label: bool) -> PrometheusLayer { PROMETHEUS_LAYER .get_or_init(|| { - // This logical tries to extract parent path from the object storage operation - // the function also relies on assumption that the region path is built from - // pattern `<data|index>/catalog/schema/table_id/....` - // - // We'll get the data/catalog/schema from path. let path_level = if with_path_label { 3 } else { 0 }; let layer = PrometheusLayer::builder() diff --git a/src/object-store/src/layers/lru_cache.rs b/src/object-store/src/layers/lru_cache.rs index 197a222162be..95e9349452cf 100644 --- a/src/object-store/src/layers/lru_cache.rs +++ b/src/object-store/src/layers/lru_cache.rs @@ -117,9 +117,7 @@ impl<I: Access, C: Access> LayeredAccess for LruCacheAccess<I, C> { async fn write(&self, path: &str, args: OpWrite) -> Result<(RpWrite, Self::Writer)> { let result = self.inner.write(path, args).await; - self.read_cache - .invalidate_entries_with_prefix(format!("{:x}", md5::compute(path))) - .await; + self.read_cache.invalidate_entries_with_prefix(path); result } @@ -127,9 +125,7 @@ impl<I: Access, C: Access> LayeredAccess for LruCacheAccess<I, C> { async fn delete(&self, path: &str, args: OpDelete) -> Result<RpDelete> { let result = self.inner.delete(path, args).await; - self.read_cache - .invalidate_entries_with_prefix(format!("{:x}", md5::compute(path))) - .await; + self.read_cache.invalidate_entries_with_prefix(path); result } @@ -146,8 +142,7 @@ impl<I: Access, C: Access> LayeredAccess for LruCacheAccess<I, C> { fn blocking_write(&self, path: &str, args: OpWrite) -> Result<(RpWrite, Self::BlockingWriter)> { let result = self.inner.blocking_write(path, args); - self.read_cache - .blocking_invalidate_entries_with_prefix(format!("{:x}", md5::compute(path))); + self.read_cache.invalidate_entries_with_prefix(path); result } diff --git a/src/object-store/src/layers/lru_cache/read_cache.rs b/src/object-store/src/layers/lru_cache/read_cache.rs index 874b17280d9c..1e3cf61615f5 100644 --- a/src/object-store/src/layers/lru_cache/read_cache.rs +++ b/src/object-store/src/layers/lru_cache/read_cache.rs @@ -20,7 +20,7 @@ use moka::future::Cache; use moka::notification::ListenerFuture; use opendal::raw::oio::{Read, Reader, Write}; use opendal::raw::{Access, OpDelete, OpRead, OpStat, OpWrite, RpRead}; -use opendal::{Error as OpendalError, ErrorKind, Metakey, OperatorBuilder, Result}; +use opendal::{EntryMode, Error as OpendalError, ErrorKind, Metakey, OperatorBuilder, Result}; use crate::metrics::{ OBJECT_STORE_LRU_CACHE_BYTES, OBJECT_STORE_LRU_CACHE_ENTRIES, OBJECT_STORE_LRU_CACHE_HIT, @@ -28,6 +28,10 @@ use crate::metrics::{ }; const RECOVER_CACHE_LIST_CONCURRENT: usize = 8; +/// Subdirectory of cached files for read. +/// +/// This must contain three layers, corresponding to [`build_prometheus_metrics_layer`](object_store::layers::build_prometheus_metrics_layer). +const READ_CACHE_DIR: &str = "cache/object/read"; /// Cache value for read file #[derive(Debug, Clone, PartialEq, Eq, Copy)] @@ -56,12 +60,20 @@ fn can_cache(path: &str) -> bool { /// Generate a unique cache key for the read path and range. fn read_cache_key(path: &str, args: &OpRead) -> String { format!( - "{:x}.cache-{}", + "{READ_CACHE_DIR}/{:x}.cache-{}", md5::compute(path), args.range().to_header() ) } +fn read_cache_root() -> String { + format!("/{READ_CACHE_DIR}/") +} + +fn read_cache_key_prefix(path: &str) -> String { + format!("{READ_CACHE_DIR}/{:x}", md5::compute(path)) +} + /// Local read cache for files in object storage #[derive(Debug)] pub(crate) struct ReadCache<C> { @@ -125,16 +137,9 @@ impl<C: Access> ReadCache<C> { (self.mem_cache.entry_count(), self.mem_cache.weighted_size()) } - /// Invalidate all cache items which key starts with `prefix`. - pub(crate) async fn invalidate_entries_with_prefix(&self, prefix: String) { - // Safety: always ok when building cache with `support_invalidation_closures`. - self.mem_cache - .invalidate_entries_if(move |k: &String, &_v| k.starts_with(&prefix)) - .ok(); - } - - /// Blocking version of `invalidate_entries_with_prefix`. - pub(crate) fn blocking_invalidate_entries_with_prefix(&self, prefix: String) { + /// Invalidate all cache items belong to the specific path. + pub(crate) fn invalidate_entries_with_prefix(&self, path: &str) { + let prefix = read_cache_key_prefix(path); // Safety: always ok when building cache with `support_invalidation_closures`. self.mem_cache .invalidate_entries_if(move |k: &String, &_v| k.starts_with(&prefix)) @@ -145,8 +150,9 @@ impl<C: Access> ReadCache<C> { /// Return entry count and total approximate entry size in bytes. pub(crate) async fn recover_cache(&self) -> Result<(u64, u64)> { let op = OperatorBuilder::new(self.file_cache.clone()).finish(); + let root = read_cache_root(); let mut entries = op - .list_with("/") + .list_with(&root) .metakey(Metakey::ContentLength | Metakey::ContentType) .concurrent(RECOVER_CACHE_LIST_CONCURRENT) .await?; @@ -157,7 +163,7 @@ impl<C: Access> ReadCache<C> { OBJECT_STORE_LRU_CACHE_ENTRIES.inc(); OBJECT_STORE_LRU_CACHE_BYTES.add(size as i64); // ignore root path - if entry.path() != "/" { + if entry.metadata().mode() == EntryMode::FILE { self.mem_cache .insert(read_key.to_string(), ReadResult::Success(size as u32)) .await; diff --git a/src/object-store/tests/object_store_test.rs b/src/object-store/tests/object_store_test.rs index 7e81b965fbed..d34fb57ab7d8 100644 --- a/src/object-store/tests/object_store_test.rs +++ b/src/object-store/tests/object_store_test.rs @@ -27,6 +27,9 @@ use opendal::raw::{Access, OpList, OpRead}; use opendal::services::{Azblob, Gcs, Oss}; use opendal::{EntryMode, OperatorBuilder}; +/// Duplicate of the constant in `src/layers/lru_cache/read_cache.rs` +const READ_CACHE_DIR: &str = "cache/object/read"; + async fn test_object_crud(store: &ObjectStore) -> Result<()> { // Create object handler. // Write data info object; @@ -267,7 +270,8 @@ async fn test_file_backend_with_lru_cache() -> Result<()> { async fn assert_lru_cache<C: Access>(cache_layer: &LruCacheLayer<C>, file_names: &[&str]) { for file_name in file_names { - assert!(cache_layer.contains_file(file_name).await, "{file_name}"); + let file_path = format!("{READ_CACHE_DIR}/{file_name}"); + assert!(cache_layer.contains_file(&file_path).await, "{file_path:?}"); } }
fix
correct write cache's metric labels (#5227)
f359eeb667251ba0aa546fa19cc6945053fa060a
2025-02-17 10:10:47
Ruihang Xia
feat(log-query): support specifying exclusive/inclusive for between filter (#5546)
false
diff --git a/src/log-query/src/log_query.rs b/src/log-query/src/log_query.rs index 988c9c27a9b4..be867065193c 100644 --- a/src/log-query/src/log_query.rs +++ b/src/log-query/src/log_query.rs @@ -310,7 +310,12 @@ pub enum ContentFilter { // Value-based filters /// Content exists, a.k.a. not null. Exist, - Between(String, String), + Between { + start: String, + end: String, + start_inclusive: bool, + end_inclusive: bool, + }, // TODO(ruihang): arithmetic operations // Compound filters diff --git a/src/query/src/log_query/planner.rs b/src/query/src/log_query/planner.rs index 79474fab53cb..1069444b2e21 100644 --- a/src/query/src/log_query/planner.rs +++ b/src/query/src/log_query/planner.rs @@ -163,13 +163,30 @@ impl LogQueryPlanner { log_query::ContentFilter::Exist => { Ok(col(&column_filter.column_name).is_not_null()) } - log_query::ContentFilter::Between(lower, upper) => { - Ok(col(&column_filter.column_name) - .gt_eq(lit(ScalarValue::Utf8(Some(escape_like_pattern(lower))))) - .and( - col(&column_filter.column_name) - .lt_eq(lit(ScalarValue::Utf8(Some(escape_like_pattern(upper))))), - )) + log_query::ContentFilter::Between { + start, + end, + start_inclusive, + end_inclusive, + } => { + let left = if *start_inclusive { + Expr::gt_eq + } else { + Expr::gt + }; + let right = if *end_inclusive { + Expr::lt_eq + } else { + Expr::lt + }; + Ok(left( + col(&column_filter.column_name), + lit(ScalarValue::Utf8(Some(escape_like_pattern(start)))), + ) + .and(right( + col(&column_filter.column_name), + lit(ScalarValue::Utf8(Some(escape_like_pattern(end)))), + ))) } log_query::ContentFilter::Compound(..) => Err::<Expr, _>( UnimplementedSnafu { @@ -455,4 +472,31 @@ mod tests { assert_eq!(escape_like_pattern("te_st"), "te\\_st"); assert_eq!(escape_like_pattern("te\\st"), "te\\\\st"); } + + #[tokio::test] + async fn test_build_column_filter_between() { + let table_provider = + build_test_table_provider(&[("public".to_string(), "test_table".to_string())]).await; + let planner = LogQueryPlanner::new(table_provider); + + let column_filter = ColumnFilters { + column_name: "message".to_string(), + filters: vec![ContentFilter::Between { + start: "a".to_string(), + end: "z".to_string(), + start_inclusive: true, + end_inclusive: false, + }], + }; + + let expr_option = planner.build_column_filter(&column_filter).unwrap(); + assert!(expr_option.is_some()); + + let expr = expr_option.unwrap(); + let expected_expr = col("message") + .gt_eq(lit(ScalarValue::Utf8(Some("a".to_string())))) + .and(col("message").lt(lit(ScalarValue::Utf8(Some("z".to_string()))))); + + assert_eq!(format!("{:?}", expr), format!("{:?}", expected_expr)); + } }
feat
support specifying exclusive/inclusive for between filter (#5546)
6b8cf0bbf069e6fee9ead4a23b37ad5f50841c6a
2023-08-28 14:54:12
Ruihang Xia
feat: impl region engine for mito (#2269)
false
diff --git a/Cargo.lock b/Cargo.lock index 89a4729c58a3..1a3c89a18683 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4153,7 +4153,7 @@ checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" [[package]] name = "greptime-proto" version = "0.1.0" -source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=3489b4742150abe0a769faf1bb60fbb95b061fc8#3489b4742150abe0a769faf1bb60fbb95b061fc8" +source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=39b0ea8d086d0ab762046b0f473aa3ef8bd347f9#39b0ea8d086d0ab762046b0f473aa3ef8bd347f9" dependencies = [ "prost", "serde", diff --git a/Cargo.toml b/Cargo.toml index 4c2eaa2e16a7..d81105b40f46 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -77,7 +77,7 @@ datafusion-substrait = { git = "https://github.com/waynexia/arrow-datafusion.git derive_builder = "0.12" futures = "0.3" futures-util = "0.3" -greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "3489b4742150abe0a769faf1bb60fbb95b061fc8" } +greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "39b0ea8d086d0ab762046b0f473aa3ef8bd347f9" } itertools = "0.10" lazy_static = "1.4" once_cell = "1.18" diff --git a/src/api/src/helper.rs b/src/api/src/helper.rs index bb049ecb26fd..20c27c82aab0 100644 --- a/src/api/src/helper.rs +++ b/src/api/src/helper.rs @@ -56,6 +56,10 @@ impl ColumnDataTypeWrapper { Ok(Self(datatype)) } + pub fn new(datatype: ColumnDataType) -> Self { + Self(datatype) + } + pub fn datatype(&self) -> ColumnDataType { self.0 } @@ -330,17 +334,17 @@ fn query_request_type(request: &QueryRequest) -> &'static str { } /// Returns the type name of the [RegionRequest]. -pub fn region_request_type(request: &region_request::Request) -> &'static str { +pub fn region_request_type(request: &region_request::Body) -> &'static str { match request { - region_request::Request::Inserts(_) => "region.inserts", - region_request::Request::Deletes(_) => "region.deletes", - region_request::Request::Create(_) => "region.create", - region_request::Request::Drop(_) => "region.drop ", - region_request::Request::Open(_) => "region.open", - region_request::Request::Close(_) => "region.close", - region_request::Request::Alter(_) => "region.alter", - region_request::Request::Flush(_) => "region.flush", - region_request::Request::Compact(_) => "region.compact", + region_request::Body::Inserts(_) => "region.inserts", + region_request::Body::Deletes(_) => "region.deletes", + region_request::Body::Create(_) => "region.create", + region_request::Body::Drop(_) => "region.drop", + region_request::Body::Open(_) => "region.open", + region_request::Body::Close(_) => "region.close", + region_request::Body::Alter(_) => "region.alter", + region_request::Body::Flush(_) => "region.flush", + region_request::Body::Compact(_) => "region.compact", } } diff --git a/src/datanode/src/error.rs b/src/datanode/src/error.rs index 0b4186a3e998..099edcbcef26 100644 --- a/src/datanode/src/error.rs +++ b/src/datanode/src/error.rs @@ -556,6 +556,16 @@ pub enum Error { location: Location, source: BoxedError, }, + + #[snafu(display( + "Failed to build region requests, location:{}, source: {}", + location, + source + ))] + BuildRegionRequests { + location: Location, + source: store_api::metadata::MetadataError, + }, } pub type Result<T> = std::result::Result<T, Error>; @@ -569,6 +579,7 @@ impl ErrorExt for Error { | ExecuteStatement { source, .. } | ExecuteLogicalPlan { source, .. } => source.status_code(), + BuildRegionRequests { source, .. } => source.status_code(), HandleHeartbeatResponse { source, .. } => source.status_code(), DecodeLogicalPlan { source, .. } => source.status_code(), diff --git a/src/datanode/src/region_server.rs b/src/datanode/src/region_server.rs index ae41a00f80ae..0719d9e3363b 100644 --- a/src/datanode/src/region_server.rs +++ b/src/datanode/src/region_server.rs @@ -16,15 +16,18 @@ use std::any::Any; use std::collections::HashMap; use std::sync::{Arc, Mutex, RwLock}; -use api::v1::region::region_request::Request as RequestBody; -use api::v1::region::{QueryRequest, RegionResponse}; +use api::v1::region::{region_request, QueryRequest, RegionResponse}; +use api::v1::{ResponseHeader, Status}; use arrow_flight::{FlightData, Ticket}; use async_trait::async_trait; use bytes::Bytes; +use common_error::ext::BoxedError; +use common_error::status_code::StatusCode; use common_query::logical_plan::Expr; use common_query::physical_plan::DfPhysicalPlanAdapter; use common_query::{DfPhysicalPlan, Output}; use common_recordbatch::SendableRecordBatchStream; +use common_runtime::Runtime; use common_telemetry::info; use dashmap::DashMap; use datafusion::catalog::schema::SchemaProvider; @@ -35,10 +38,10 @@ use datafusion::execution::context::SessionState; use datafusion_common::DataFusionError; use datafusion_expr::{Expr as DfExpr, TableType}; use datatypes::arrow::datatypes::SchemaRef; +use futures_util::future::try_join_all; use prost::Message; use query::QueryEngineRef; -use servers::error as servers_error; -use servers::error::Result as ServerResult; +use servers::error::{self as servers_error, ExecuteGrpcRequestSnafu, Result as ServerResult}; use servers::grpc::flight::{FlightCraft, FlightRecordBatchStream, TonicStream}; use servers::grpc::region_server::RegionServerHandler; use session::context::QueryContext; @@ -52,9 +55,9 @@ use table::table::scan::StreamScanAdapter; use tonic::{Request, Response, Result as TonicResult}; use crate::error::{ - DecodeLogicalPlanSnafu, ExecuteLogicalPlanSnafu, GetRegionMetadataSnafu, - HandleRegionRequestSnafu, RegionEngineNotFoundSnafu, RegionNotFoundSnafu, Result, - UnsupportedOutputSnafu, + BuildRegionRequestsSnafu, DecodeLogicalPlanSnafu, ExecuteLogicalPlanSnafu, + GetRegionMetadataSnafu, HandleRegionRequestSnafu, RegionEngineNotFoundSnafu, + RegionNotFoundSnafu, Result, UnsupportedOutputSnafu, }; #[derive(Clone)] @@ -63,9 +66,9 @@ pub struct RegionServer { } impl RegionServer { - pub fn new(query_engine: QueryEngineRef) -> Self { + pub fn new(query_engine: QueryEngineRef, runtime: Arc<Runtime>) -> Self { Self { - inner: Arc::new(RegionServerInner::new(query_engine)), + inner: Arc::new(RegionServerInner::new(query_engine, runtime)), } } @@ -88,8 +91,47 @@ impl RegionServer { #[async_trait] impl RegionServerHandler for RegionServer { - async fn handle(&self, _request: RequestBody) -> ServerResult<RegionResponse> { - todo!() + async fn handle(&self, request: region_request::Body) -> ServerResult<RegionResponse> { + let requests = RegionRequest::try_from_request_body(request) + .context(BuildRegionRequestsSnafu) + .map_err(BoxedError::new) + .context(ExecuteGrpcRequestSnafu)?; + let join_tasks = requests.into_iter().map(|(region_id, req)| { + let self_to_move = self.clone(); + self.inner + .runtime + .spawn(async move { self_to_move.handle_request(region_id, req).await }) + }); + + let results = try_join_all(join_tasks) + .await + .context(servers_error::JoinTaskSnafu)?; + + // merge results by simply sum up affected rows. + // only insert/delete will have multiple results. + let mut affected_rows = 0; + for result in results { + match result + .map_err(BoxedError::new) + .context(servers_error::ExecuteGrpcRequestSnafu)? + { + Output::AffectedRows(rows) => affected_rows += rows, + Output::Stream(_) | Output::RecordBatches(_) => { + // TODO: change the output type to only contains `affected_rows` + unreachable!() + } + } + } + + Ok(RegionResponse { + header: Some(ResponseHeader { + status: Some(Status { + status_code: StatusCode::Success as _, + ..Default::default() + }), + }), + affected_rows: affected_rows as _, + }) } } @@ -114,14 +156,16 @@ struct RegionServerInner { engines: RwLock<HashMap<String, RegionEngineRef>>, region_map: DashMap<RegionId, RegionEngineRef>, query_engine: QueryEngineRef, + runtime: Arc<Runtime>, } impl RegionServerInner { - pub fn new(query_engine: QueryEngineRef) -> Self { + pub fn new(query_engine: QueryEngineRef, runtime: Arc<Runtime>) -> Self { Self { engines: RwLock::new(HashMap::new()), region_map: DashMap::new(), query_engine, + runtime, } } diff --git a/src/datanode/src/server.rs b/src/datanode/src/server.rs index dbc2ececc919..6a002410957d 100644 --- a/src/datanode/src/server.rs +++ b/src/datanode/src/server.rs @@ -54,7 +54,7 @@ impl Services { .context(RuntimeResourceSnafu)?, ); - let region_server = RegionServer::new(instance.query_engine()); + let region_server = RegionServer::new(instance.query_engine(), grpc_runtime.clone()); let flight_handler = if enable_region_server { Some(Arc::new(region_server.clone()) as _) } else { diff --git a/src/mito2/src/engine.rs b/src/mito2/src/engine.rs index 1b9c938b72ce..736c2a41a234 100644 --- a/src/mito2/src/engine.rs +++ b/src/mito2/src/engine.rs @@ -19,10 +19,15 @@ mod tests; use std::sync::Arc; +use async_trait::async_trait; +use common_error::ext::BoxedError; use common_query::Output; +use common_recordbatch::SendableRecordBatchStream; use object_store::ObjectStore; use snafu::{OptionExt, ResultExt}; use store_api::logstore::LogStore; +use store_api::metadata::RegionMetadataRef; +use store_api::region_engine::RegionEngine; use store_api::region_request::RegionRequest; use store_api::storage::{RegionId, ScanRequest}; @@ -106,6 +111,15 @@ impl EngineInner { self.workers.stop().await } + fn get_metadata(&self, region_id: RegionId) -> Result<RegionMetadataRef> { + // Reading a region doesn't need to go through the region worker thread. + let region = self + .workers + .get_region(region_id) + .context(RegionNotFoundSnafu { region_id })?; + Ok(region.metadata()) + } + /// Handles [RequestBody] and return its executed result. async fn handle_request(&self, region_id: RegionId, request: RegionRequest) -> Result<Output> { // We validate and then convert the `request` into an inner `RequestBody` for ease of handling. @@ -134,3 +148,38 @@ impl EngineInner { scan_region.scanner() } } + +#[async_trait] +impl RegionEngine for MitoEngine { + fn name(&self) -> &str { + "MitoEngine" + } + + async fn handle_request( + &self, + region_id: RegionId, + request: RegionRequest, + ) -> std::result::Result<Output, BoxedError> { + self.inner + .handle_request(region_id, request) + .await + .map_err(BoxedError::new) + } + + /// Handle substrait query and return a stream of record batches + async fn handle_query( + &self, + _region_id: RegionId, + _request: ScanRequest, + ) -> std::result::Result<SendableRecordBatchStream, BoxedError> { + todo!() + } + + /// Retrieve region's metadata. + async fn get_metadata( + &self, + region_id: RegionId, + ) -> std::result::Result<RegionMetadataRef, BoxedError> { + self.inner.get_metadata(region_id).map_err(BoxedError::new) + } +} diff --git a/src/servers/src/error.rs b/src/servers/src/error.rs index 1e583c2c2f92..0cc6b9b1db55 100644 --- a/src/servers/src/error.rs +++ b/src/servers/src/error.rs @@ -89,6 +89,12 @@ pub enum Error { source: BoxedError, }, + #[snafu(display("{source}"))] + ExecuteGrpcRequest { + location: Location, + source: BoxedError, + }, + #[snafu(display("Failed to check database validity, source: {}", source))] CheckDatabaseValidity { location: Location, @@ -374,6 +380,7 @@ impl ErrorExt for Error { | ExecuteQuery { source, .. } | ExecutePlan { source, .. } | ExecuteGrpcQuery { source, .. } + | ExecuteGrpcRequest { source, .. } | CheckDatabaseValidity { source, .. } => source.status_code(), NotSupported { .. } diff --git a/src/servers/src/grpc.rs b/src/servers/src/grpc.rs index c3beda0f70db..1e71b15b71dd 100644 --- a/src/servers/src/grpc.rs +++ b/src/servers/src/grpc.rs @@ -26,7 +26,7 @@ use api::v1::greptime_database_server::GreptimeDatabase; use api::v1::greptime_database_server::GreptimeDatabaseServer; use api::v1::health_check_server::{HealthCheck, HealthCheckServer}; use api::v1::prometheus_gateway_server::{PrometheusGateway, PrometheusGatewayServer}; -use api::v1::region::region_server_server::RegionServerServer; +use api::v1::region::region_server::RegionServer; use api::v1::{HealthCheckRequest, HealthCheckResponse}; #[cfg(feature = "testing")] use arrow_flight::flight_service_server::FlightService; @@ -224,7 +224,7 @@ impl Server for GrpcServer { ))) } if let Some(region_server_handler) = &self.region_server_handler { - builder = builder.add_service(RegionServerServer::new(region_server_handler.clone())) + builder = builder.add_service(RegionServer::new(region_server_handler.clone())) } let (serve_state_tx, serve_state_rx) = oneshot::channel(); diff --git a/src/servers/src/grpc/region_server.rs b/src/servers/src/grpc/region_server.rs index e3a7c06673eb..1bcccc6c3083 100644 --- a/src/servers/src/grpc/region_server.rs +++ b/src/servers/src/grpc/region_server.rs @@ -16,9 +16,8 @@ use std::sync::Arc; use api::helper::region_request_type; use api::v1::auth_header::AuthScheme; -use api::v1::region::region_request::Request as RequestBody; -use api::v1::region::region_server_server::RegionServer as RegionServerService; -use api::v1::region::{RegionRequest, RegionResponse}; +use api::v1::region::region_server::Region as RegionServer; +use api::v1::region::{region_request, RegionRequest, RegionResponse}; use api::v1::{Basic, RequestHeader}; use async_trait::async_trait; use auth::{Identity, Password, UserInfoRef, UserProviderRef}; @@ -42,7 +41,7 @@ use crate::metrics::{METRIC_AUTH_FAILURE, METRIC_CODE_LABEL}; #[async_trait] pub trait RegionServerHandler: Send + Sync { - async fn handle(&self, request: RequestBody) -> Result<RegionResponse>; + async fn handle(&self, request: region_request::Body) -> Result<RegionResponse>; } pub type RegionServerHandlerRef = Arc<dyn RegionServerHandler>; @@ -68,7 +67,7 @@ impl RegionServerRequestHandler { } async fn handle(&self, request: RegionRequest) -> Result<RegionResponse> { - let query = request.request.context(InvalidQuerySnafu { + let query = request.body.context(InvalidQuerySnafu { reason: "Expecting non-empty GreptimeRequest.", })?; @@ -183,7 +182,7 @@ pub(crate) fn create_query_context(header: Option<&RequestHeader>) -> QueryConte } #[async_trait] -impl RegionServerService for RegionServerRequestHandler { +impl RegionServer for RegionServerRequestHandler { async fn handle( &self, request: Request<RegionRequest>, diff --git a/src/store-api/src/metadata.rs b/src/store-api/src/metadata.rs index 17aadef89685..863d3411c3b5 100644 --- a/src/store-api/src/metadata.rs +++ b/src/store-api/src/metadata.rs @@ -20,12 +20,14 @@ use std::any::Any; use std::collections::{HashMap, HashSet}; use std::sync::Arc; +use api::helper::ColumnDataTypeWrapper; +use api::v1::region::ColumnDef; use api::v1::SemanticType; use common_error::ext::ErrorExt; use common_error::status_code::StatusCode; use datatypes::arrow::datatypes::FieldRef; use datatypes::prelude::DataType; -use datatypes::schema::{ColumnSchema, Schema, SchemaRef}; +use datatypes::schema::{ColumnDefaultConstraint, ColumnSchema, Schema, SchemaRef}; use serde::de::Error; use serde::{Deserialize, Deserializer, Serialize}; use snafu::{ensure, Location, OptionExt, ResultExt, Snafu}; @@ -45,6 +47,32 @@ pub struct ColumnMetadata { pub column_id: ColumnId, } +impl ColumnMetadata { + /// Construct `Self` from protobuf struct [ColumnDef] + pub fn try_from_column_def(column_def: ColumnDef) -> Result<Self> { + let semantic_type = column_def.semantic_type(); + let column_id = column_def.column_id; + + let default_constrain = if column_def.default_constraint.is_empty() { + None + } else { + Some( + ColumnDefaultConstraint::try_from(column_def.default_constraint.as_slice()) + .context(ConvertDatatypesSnafu)?, + ) + }; + let data_type = ColumnDataTypeWrapper::new(column_def.datatype()).into(); + let column_schema = ColumnSchema::new(column_def.name, data_type, column_def.is_nullable) + .with_default_constraint(default_constrain) + .context(ConvertDatatypesSnafu)?; + Ok(Self { + column_schema, + semantic_type, + column_id, + }) + } +} + #[cfg_attr(doc, aquamarine::aquamarine)] /// General static metadata of a region. /// @@ -460,6 +488,16 @@ pub enum MetadataError { location: Location, source: serde_json::Error, }, + + #[snafu(display( + "Failed to convert with struct from datatypes, location: {}, source: {}", + location, + source + ))] + ConvertDatatypes { + location: Location, + source: datatypes::error::Error, + }, } impl ErrorExt for MetadataError { diff --git a/src/store-api/src/region_request.rs b/src/store-api/src/region_request.rs index 818bbe4f20bb..dfcca0a98d18 100644 --- a/src/store-api/src/region_request.rs +++ b/src/store-api/src/region_request.rs @@ -14,13 +14,15 @@ use std::collections::HashMap; +use api::v1::region::region_request; use api::v1::Rows; -use crate::metadata::ColumnMetadata; -use crate::storage::{AlterRequest, ColumnId, ScanRequest}; +use crate::metadata::{ColumnMetadata, MetadataError}; +use crate::storage::{AlterRequest, ColumnId, RegionId, ScanRequest}; #[derive(Debug)] pub enum RegionRequest { + // TODO: rename to InsertRequest Put(RegionPutRequest), Delete(RegionDeleteRequest), Create(RegionCreateRequest), @@ -32,6 +34,85 @@ pub enum RegionRequest { Compact(RegionCompactRequest), } +impl RegionRequest { + /// Convert [Body](region_request::Body) to a group of [RegionRequest] with region id. + /// Inserts/Deletes request might become multiple requests. Others are one-to-one. + // TODO: implement alter request + #[allow(unreachable_code)] + pub fn try_from_request_body( + body: region_request::Body, + ) -> Result<Vec<(RegionId, Self)>, MetadataError> { + match body { + region_request::Body::Inserts(inserts) => Ok(inserts + .requests + .into_iter() + .filter_map(|r| { + let region_id = r.region_id.into(); + r.rows + .map(|rows| (region_id, Self::Put(RegionPutRequest { rows }))) + }) + .collect()), + region_request::Body::Deletes(deletes) => Ok(deletes + .requests + .into_iter() + .filter_map(|r| { + let region_id = r.region_id.into(); + r.rows + .map(|rows| (region_id, Self::Delete(RegionDeleteRequest { rows }))) + }) + .collect()), + region_request::Body::Create(create) => { + let column_metadatas = create + .column_defs + .into_iter() + .map(ColumnMetadata::try_from_column_def) + .collect::<Result<Vec<_>, _>>()?; + Ok(vec![( + create.region_id.into(), + Self::Create(RegionCreateRequest { + engine: create.engine, + column_metadatas, + primary_key: create.primary_key, + create_if_not_exists: create.create_if_not_exists, + options: create.options, + region_dir: create.region_dir, + }), + )]) + } + region_request::Body::Drop(drop) => Ok(vec![( + drop.region_id.into(), + Self::Drop(RegionDropRequest {}), + )]), + region_request::Body::Open(open) => Ok(vec![( + open.region_id.into(), + Self::Open(RegionOpenRequest { + engine: open.engine, + region_dir: open.region_dir, + options: open.options, + }), + )]), + region_request::Body::Close(close) => Ok(vec![( + close.region_id.into(), + Self::Close(RegionCloseRequest {}), + )]), + region_request::Body::Alter(alter) => Ok(vec![( + alter.region_id.into(), + Self::Alter(RegionAlterRequest { + request: unimplemented!(), + }), + )]), + region_request::Body::Flush(flush) => Ok(vec![( + flush.region_id.into(), + Self::Flush(RegionFlushRequest {}), + )]), + region_request::Body::Compact(compact) => Ok(vec![( + compact.region_id.into(), + Self::Compact(RegionCompactRequest {}), + )]), + } + } +} + /// Request to put data into a region. #[derive(Debug)] pub struct RegionPutRequest {
feat
impl region engine for mito (#2269)
1f315e300f271dfc2fa57a0b5920eb4182849478
2024-06-13 16:45:38
Weny Xu
fix: retry on unknown error (#4138)
false
diff --git a/src/client/src/error.rs b/src/client/src/error.rs index 2e3e78d19b1e..c8641087d430 100644 --- a/src/client/src/error.rs +++ b/src/client/src/error.rs @@ -192,6 +192,9 @@ impl Error { } | Self::RegionServer { code: Code::Unavailable, .. + } | Self::RegionServer { + code: Code::Unknown, + .. } ) }
fix
retry on unknown error (#4138)
710e2ed133c32674c01c370b1acd07c2ecb9c0f2
2023-02-24 12:35:18
Yingwen
ci: Use fixed skywalking-eyes revision (#1076)
false
diff --git a/.github/workflows/license.yaml b/.github/workflows/license.yaml index 488757cc8008..a336476644fe 100644 --- a/.github/workflows/license.yaml +++ b/.github/workflows/license.yaml @@ -13,4 +13,4 @@ jobs: steps: - uses: actions/checkout@v2 - name: Check License Header - uses: apache/skywalking-eyes/header@main + uses: apache/skywalking-eyes/header@df70871af1a8109c9a5b1dc824faaf65246c5236
ci
Use fixed skywalking-eyes revision (#1076)
32fd850c20e56dc8ccdb3ad4655a5da8d5aebda7
2025-03-14 06:38:29
Ruihang Xia
perf: support in list in simple filter (#5709)
false
diff --git a/src/common/recordbatch/src/filter.rs b/src/common/recordbatch/src/filter.rs index 8c1ebe7d530e..32aae0190e44 100644 --- a/src/common/recordbatch/src/filter.rs +++ b/src/common/recordbatch/src/filter.rs @@ -26,6 +26,7 @@ use datafusion_common::cast::{as_boolean_array, as_null_array}; use datafusion_common::{internal_err, DataFusionError, ScalarValue}; use datatypes::arrow::array::{Array, BooleanArray, RecordBatch}; use datatypes::arrow::compute::filter_record_batch; +use datatypes::compute::or_kleene; use datatypes::vectors::VectorRef; use snafu::ResultExt; @@ -47,6 +48,8 @@ pub struct SimpleFilterEvaluator { literal: Scalar<ArrayRef>, /// The operator. op: Operator, + /// Only used when the operator is `Or`-chain. + literal_list: Vec<Scalar<ArrayRef>>, } impl SimpleFilterEvaluator { @@ -69,6 +72,7 @@ impl SimpleFilterEvaluator { column_name, literal: val.to_scalar().ok()?, op, + literal_list: vec![], }) } @@ -83,6 +87,35 @@ impl SimpleFilterEvaluator { | Operator::LtEq | Operator::Gt | Operator::GtEq => {} + Operator::Or => { + let lhs = Self::try_new(&binary.left)?; + let rhs = Self::try_new(&binary.right)?; + if lhs.column_name != rhs.column_name + || !matches!(lhs.op, Operator::Eq | Operator::Or) + || !matches!(rhs.op, Operator::Eq | Operator::Or) + { + return None; + } + let mut list = vec![]; + let placeholder_literal = lhs.literal.clone(); + // above check guarantees the op is either `Eq` or `Or` + if matches!(lhs.op, Operator::Or) { + list.extend(lhs.literal_list); + } else { + list.push(lhs.literal); + } + if matches!(rhs.op, Operator::Or) { + list.extend(rhs.literal_list); + } else { + list.push(rhs.literal); + } + return Some(Self { + column_name: lhs.column_name, + literal: placeholder_literal, + op: Operator::Or, + literal_list: list, + }); + } _ => return None, } @@ -103,6 +136,7 @@ impl SimpleFilterEvaluator { column_name: lhs.name.clone(), literal, op, + literal_list: vec![], }) } _ => None, @@ -118,19 +152,19 @@ impl SimpleFilterEvaluator { let input = input .to_scalar() .with_context(|_| ToArrowScalarSnafu { v: input.clone() })?; - let result = self.evaluate_datum(&input)?; + let result = self.evaluate_datum(&input, 1)?; Ok(result.value(0)) } pub fn evaluate_array(&self, input: &ArrayRef) -> Result<BooleanBuffer> { - self.evaluate_datum(input) + self.evaluate_datum(input, input.len()) } pub fn evaluate_vector(&self, input: &VectorRef) -> Result<BooleanBuffer> { - self.evaluate_datum(&input.to_arrow_array()) + self.evaluate_datum(&input.to_arrow_array(), input.len()) } - fn evaluate_datum(&self, input: &impl Datum) -> Result<BooleanBuffer> { + fn evaluate_datum(&self, input: &impl Datum, input_len: usize) -> Result<BooleanBuffer> { let result = match self.op { Operator::Eq => cmp::eq(input, &self.literal), Operator::NotEq => cmp::neq(input, &self.literal), @@ -138,6 +172,15 @@ impl SimpleFilterEvaluator { Operator::LtEq => cmp::lt_eq(input, &self.literal), Operator::Gt => cmp::gt(input, &self.literal), Operator::GtEq => cmp::gt_eq(input, &self.literal), + Operator::Or => { + // OR operator stands for OR-chained EQs (or INLIST in other words) + let mut result: BooleanArray = vec![false; input_len].into(); + for literal in &self.literal_list { + let rhs = cmp::eq(input, literal).context(ArrowComputeSnafu)?; + result = or_kleene(&result, &rhs).context(ArrowComputeSnafu)?; + } + Ok(result) + } _ => { return UnsupportedOperationSnafu { reason: format!("{:?}", self.op), @@ -349,4 +392,49 @@ mod test { let expected = datatypes::arrow::array::Int32Array::from(vec![5, 6]); assert_eq!(first_column_values, &expected); } + + #[test] + fn test_complex_filter_expression() { + // Create an expression tree for: col = 'B' OR col = 'C' OR col = 'D' + let col_eq_b = col("col").eq(lit("B")); + let col_eq_c = col("col").eq(lit("C")); + let col_eq_d = col("col").eq(lit("D")); + + // Build the OR chain + let col_or_expr = col_eq_b.or(col_eq_c).or(col_eq_d); + + // Check that SimpleFilterEvaluator can handle OR chain + let or_evaluator = SimpleFilterEvaluator::try_new(&col_or_expr).unwrap(); + assert_eq!(or_evaluator.column_name, "col"); + assert_eq!(or_evaluator.op, Operator::Or); + assert_eq!(or_evaluator.literal_list.len(), 3); + assert_eq!(format!("{:?}", or_evaluator.literal_list), "[Scalar(StringArray\n[\n \"B\",\n]), Scalar(StringArray\n[\n \"C\",\n]), Scalar(StringArray\n[\n \"D\",\n])]"); + + // Create a schema and batch for testing + let schema = Schema::new(vec![Field::new("col", DataType::Utf8, false)]); + let df_schema = DFSchema::try_from(schema.clone()).unwrap(); + let props = ExecutionProps::new(); + let physical_expr = create_physical_expr(&col_or_expr, &df_schema, &props).unwrap(); + + // Create test data + let col_data = Arc::new(datatypes::arrow::array::StringArray::from(vec![ + "B", "C", "E", "B", "C", "D", "F", + ])); + let batch = RecordBatch::try_new(Arc::new(schema), vec![col_data]).unwrap(); + let expected = datatypes::arrow::array::StringArray::from(vec!["B", "C", "B", "C", "D"]); + + // Filter the batch + let filtered_batch = batch_filter(&batch, &physical_expr).unwrap(); + + // Expected: rows with col in ("B", "C", "D") + // That would be rows 0, 1, 3, 4, 5 + assert_eq!(filtered_batch.num_rows(), 5); + + let col_filtered = filtered_batch + .column(0) + .as_any() + .downcast_ref::<datatypes::arrow::array::StringArray>() + .unwrap(); + assert_eq!(col_filtered, &expected); + } } diff --git a/tests/cases/standalone/optimizer/filter_push_down.result b/tests/cases/standalone/optimizer/filter_push_down.result index 33ce01865bb2..30e4789fdbb5 100644 --- a/tests/cases/standalone/optimizer/filter_push_down.result +++ b/tests/cases/standalone/optimizer/filter_push_down.result @@ -204,3 +204,26 @@ DROP TABLE integers; Affected Rows: 0 +CREATE TABLE characters(c STRING, t TIMESTAMP TIME INDEX); + +Affected Rows: 0 + +INSERT INTO characters VALUES ('a', 1), ('b', 2), ('c', 3), (NULL, 4), ('a', 5), ('b', 6), ('c', 7), (NULL, 8); + +Affected Rows: 8 + +SELECT * FROM characters WHERE c IN ('a', 'c') ORDER BY t; + ++---+-------------------------+ +| c | t | ++---+-------------------------+ +| a | 1970-01-01T00:00:00.001 | +| c | 1970-01-01T00:00:00.003 | +| a | 1970-01-01T00:00:00.005 | +| c | 1970-01-01T00:00:00.007 | ++---+-------------------------+ + +DROP TABLE characters; + +Affected Rows: 0 + diff --git a/tests/cases/standalone/optimizer/filter_push_down.sql b/tests/cases/standalone/optimizer/filter_push_down.sql index 0d47ed3713c5..36688412d140 100644 --- a/tests/cases/standalone/optimizer/filter_push_down.sql +++ b/tests/cases/standalone/optimizer/filter_push_down.sql @@ -57,3 +57,11 @@ SELECT * FROM (SELECT i1.i AS a, i2.i AS b, row_number() OVER (ORDER BY i1.i, i2 SELECT * FROM (SELECT 0=1 AS cond FROM integers i1, integers i2 GROUP BY 1) a1 WHERE cond ORDER BY 1; DROP TABLE integers; + +CREATE TABLE characters(c STRING, t TIMESTAMP TIME INDEX); + +INSERT INTO characters VALUES ('a', 1), ('b', 2), ('c', 3), (NULL, 4), ('a', 5), ('b', 6), ('c', 7), (NULL, 8); + +SELECT * FROM characters WHERE c IN ('a', 'c') ORDER BY t; + +DROP TABLE characters;
perf
support in list in simple filter (#5709)
f82ddc949145e997143bf9b47475de36ff074213
2024-01-24 13:22:47
Weny Xu
fix: fix MockInstance rebuild issue (#3218)
false
diff --git a/tests-integration/src/cluster.rs b/tests-integration/src/cluster.rs index 986bb0f66617..362a8c5e8bd0 100644 --- a/tests-integration/src/cluster.rs +++ b/tests-integration/src/cluster.rs @@ -62,7 +62,8 @@ use crate::test_util::{ pub struct GreptimeDbCluster { pub storage_guards: Vec<StorageGuard>, - pub _dir_guards: Vec<FileDirGuard>, + pub dir_guards: Vec<FileDirGuard>, + pub datanode_options: Vec<DatanodeOptions>, pub datanode_instances: HashMap<DatanodeId, Datanode>, pub kv_backend: KvBackendRef, @@ -70,7 +71,6 @@ pub struct GreptimeDbCluster { pub frontend: Arc<FeInstance>, } -#[derive(Clone)] pub struct GreptimeDbClusterBuilder { cluster_name: String, kv_backend: KvBackendRef, @@ -157,9 +157,13 @@ impl GreptimeDbClusterBuilder { self } - pub async fn build(self) -> GreptimeDbCluster { - let datanodes = self.datanodes.unwrap_or(4); - + pub async fn build_with( + &self, + datanode_options: Vec<DatanodeOptions>, + storage_guards: Vec<StorageGuard>, + dir_guards: Vec<FileDirGuard>, + ) -> GreptimeDbCluster { + let datanodes = datanode_options.len(); let channel_config = ChannelConfig::new().timeout(Duration::from_secs(20)); let datanode_clients = Arc::new(DatanodeClients::new(channel_config)); @@ -182,8 +186,9 @@ impl GreptimeDbClusterBuilder { ) .await; - let (datanode_instances, storage_guards, dir_guards) = - self.build_datanodes(meta_srv.clone(), datanodes).await; + let datanode_instances = self + .build_datanodes_with_options(&meta_srv, &datanode_options) + .await; build_datanode_clients(datanode_clients.clone(), &datanode_instances, datanodes).await; @@ -199,8 +204,9 @@ impl GreptimeDbClusterBuilder { frontend.start().await.unwrap(); GreptimeDbCluster { + datanode_options, storage_guards, - _dir_guards: dir_guards, + dir_guards, datanode_instances, kv_backend: self.kv_backend.clone(), meta_srv: meta_srv.meta_srv, @@ -208,16 +214,19 @@ impl GreptimeDbClusterBuilder { } } - async fn build_datanodes( + pub async fn build(&self) -> GreptimeDbCluster { + let datanodes = self.datanodes.unwrap_or(4); + let (datanode_options, storage_guards, dir_guards) = + self.build_datanode_options_and_guards(datanodes).await; + self.build_with(datanode_options, storage_guards, dir_guards) + .await + } + + async fn build_datanode_options_and_guards( &self, - meta_srv: MockInfo, datanodes: u32, - ) -> ( - HashMap<DatanodeId, Datanode>, - Vec<StorageGuard>, - Vec<FileDirGuard>, - ) { - let mut instances = HashMap::with_capacity(datanodes as usize); + ) -> (Vec<DatanodeOptions>, Vec<StorageGuard>, Vec<FileDirGuard>) { + let mut options = Vec::with_capacity(datanodes as usize); let mut storage_guards = Vec::with_capacity(datanodes as usize); let mut dir_guards = Vec::with_capacity(datanodes as usize); @@ -258,28 +267,41 @@ impl GreptimeDbClusterBuilder { }; opts.node_id = Some(datanode_id); - let datanode = self.create_datanode(opts, meta_srv.clone()).await; - - instances.insert(datanode_id, datanode); + options.push(opts); } ( - instances, + options, storage_guards.into_iter().flatten().collect(), dir_guards, ) } + async fn build_datanodes_with_options( + &self, + meta_srv: &MockInfo, + options: &[DatanodeOptions], + ) -> HashMap<DatanodeId, Datanode> { + let mut instances = HashMap::with_capacity(options.len()); + + for opts in options { + let datanode = self.create_datanode(opts.clone(), meta_srv.clone()).await; + instances.insert(opts.node_id.unwrap(), datanode); + } + + instances + } + async fn wait_datanodes_alive( &self, meta_peer_client: &MetaPeerClientRef, - expected_datanodes: u32, + expected_datanodes: usize, ) { for _ in 0..10 { let alive_datanodes = meta_srv::lease::filter_datanodes(1000, meta_peer_client, |_, _| true) .await .unwrap() - .len() as u32; + .len(); if alive_datanodes == expected_datanodes { return; } @@ -355,7 +377,7 @@ impl GreptimeDbClusterBuilder { async fn build_datanode_clients( clients: Arc<DatanodeClients>, instances: &HashMap<DatanodeId, Datanode>, - datanodes: u32, + datanodes: usize, ) { for i in 0..datanodes { let datanode_id = i as u64 + 1; diff --git a/tests-integration/src/standalone.rs b/tests-integration/src/standalone.rs index 2d6d00ef2e87..16b78aae346e 100644 --- a/tests-integration/src/standalone.rs +++ b/tests-integration/src/standalone.rs @@ -22,13 +22,14 @@ use common_meta::cache_invalidator::DummyCacheInvalidator; use common_meta::ddl::table_meta::TableMetadataAllocator; use common_meta::ddl_manager::DdlManager; use common_meta::key::TableMetadataManager; +use common_meta::kv_backend::KvBackendRef; use common_meta::region_keeper::MemoryRegionKeeper; use common_meta::sequence::SequenceBuilder; use common_meta::wal_options_allocator::WalOptionsAllocator; use common_procedure::options::ProcedureConfig; +use common_procedure::ProcedureManagerRef; use common_telemetry::logging::LoggingOptions; use common_wal::config::{DatanodeWalConfig, MetaSrvWalConfig}; -use datanode::config::DatanodeOptions; use datanode::datanode::DatanodeBuilder; use frontend::frontend::FrontendOptions; use frontend::instance::builder::FrontendBuilder; @@ -39,12 +40,13 @@ use crate::test_util::{self, create_tmp_dir_and_datanode_opts, StorageType, Test pub struct GreptimeDbStandalone { pub instance: Arc<Instance>, - pub datanode_opts: DatanodeOptions, pub mix_options: MixOptions, pub guard: TestGuard, + // Used in rebuild. + pub kv_backend: KvBackendRef, + pub procedure_manager: ProcedureManagerRef, } -#[derive(Clone)] pub struct GreptimeDbStandaloneBuilder { instance_name: String, wal_config: DatanodeWalConfig, @@ -104,31 +106,16 @@ impl GreptimeDbStandaloneBuilder { self } - pub async fn build(self) -> GreptimeDbStandalone { - let default_store_type = self.default_store.unwrap_or(StorageType::File); - let store_types = self.store_providers.unwrap_or_default(); - - let (opts, guard) = create_tmp_dir_and_datanode_opts( - Mode::Standalone, - default_store_type, - store_types, - &self.instance_name, - self.wal_config.clone(), - ); - - let procedure_config = ProcedureConfig::default(); - let kv_backend_config = KvBackendConfig::default(); - let (kv_backend, procedure_manager) = Instance::try_build_standalone_components( - format!("{}/kv", &opts.storage.data_home), - kv_backend_config.clone(), - procedure_config.clone(), - ) - .await - .unwrap(); - - let plugins = self.plugin.unwrap_or_default(); + pub async fn build_with( + &self, + kv_backend: KvBackendRef, + procedure_manager: ProcedureManagerRef, + guard: TestGuard, + mix_options: MixOptions, + ) -> GreptimeDbStandalone { + let plugins = self.plugin.clone().unwrap_or_default(); - let datanode = DatanodeBuilder::new(opts.clone(), plugins.clone()) + let datanode = DatanodeBuilder::new(mix_options.datanode.clone(), plugins.clone()) .with_kv_backend(kv_backend.clone()) .build() .await @@ -145,9 +132,8 @@ impl GreptimeDbStandaloneBuilder { .step(10) .build(), ); - let wal_meta = self.meta_wal_config.clone(); let wal_options_allocator = Arc::new(WalOptionsAllocator::new( - wal_meta.clone(), + mix_options.wal_meta.clone(), kv_backend.clone(), )); let table_meta_allocator = TableMetadataAllocator::new( @@ -168,11 +154,12 @@ impl GreptimeDbStandaloneBuilder { .unwrap(), ); - let instance = FrontendBuilder::new(kv_backend, datanode_manager, ddl_task_executor) - .with_plugin(plugins) - .try_build() - .await - .unwrap(); + let instance = + FrontendBuilder::new(kv_backend.clone(), datanode_manager, ddl_task_executor) + .with_plugin(plugins) + .try_build() + .await + .unwrap(); procedure_manager.start().await.unwrap(); wal_options_allocator.start().await.unwrap(); @@ -183,17 +170,47 @@ impl GreptimeDbStandaloneBuilder { GreptimeDbStandalone { instance: Arc::new(instance), - datanode_opts: opts.clone(), - mix_options: MixOptions { - data_home: opts.storage.data_home.to_string(), - procedure: procedure_config, - metadata_store: kv_backend_config, - frontend: FrontendOptions::default(), - datanode: opts, - logging: LoggingOptions::default(), - wal_meta, - }, + mix_options, guard, + kv_backend, + procedure_manager, } } + + pub async fn build(&self) -> GreptimeDbStandalone { + let default_store_type = self.default_store.unwrap_or(StorageType::File); + let store_types = self.store_providers.clone().unwrap_or_default(); + + let (opts, guard) = create_tmp_dir_and_datanode_opts( + Mode::Standalone, + default_store_type, + store_types, + &self.instance_name, + self.wal_config.clone(), + ); + + let kv_backend_config = KvBackendConfig::default(); + let procedure_config = ProcedureConfig::default(); + let (kv_backend, procedure_manager) = Instance::try_build_standalone_components( + format!("{}/kv", &opts.storage.data_home), + kv_backend_config.clone(), + procedure_config.clone(), + ) + .await + .unwrap(); + + let wal_meta = self.meta_wal_config.clone(); + let mix_options = MixOptions { + data_home: opts.storage.data_home.to_string(), + procedure: procedure_config, + metadata_store: kv_backend_config, + frontend: FrontendOptions::default(), + datanode: opts, + logging: LoggingOptions::default(), + wal_meta, + }; + + self.build_with(kv_backend, procedure_manager, guard, mix_options) + .await + } } diff --git a/tests-integration/src/test_util.rs b/tests-integration/src/test_util.rs index c3eb104cc65c..0028c559b1a0 100644 --- a/tests-integration/src/test_util.rs +++ b/tests-integration/src/test_util.rs @@ -392,7 +392,7 @@ pub async fn setup_test_http_app(store_type: StorageType, name: &str) -> (Router None, ) .with_metrics_handler(MetricsHandler) - .with_greptime_config_options(instance.datanode_opts.to_toml_string()) + .with_greptime_config_options(instance.mix_options.datanode.to_toml_string()) .build(); (http_server.build(http_server.make_app()), instance.guard) } @@ -463,7 +463,7 @@ pub async fn setup_test_prom_app_with_frontend( ) .with_prom_handler(frontend_ref.clone(), true) .with_prometheus_handler(frontend_ref) - .with_greptime_config_options(instance.datanode_opts.to_toml_string()) + .with_greptime_config_options(instance.mix_options.datanode.to_toml_string()) .build(); let app = http_server.build(http_server.make_app()); (app, instance.guard) diff --git a/tests-integration/src/tests/test_util.rs b/tests-integration/src/tests/test_util.rs index 0e50bfc0616b..c6f782502b66 100644 --- a/tests-integration/src/tests/test_util.rs +++ b/tests-integration/src/tests/test_util.rs @@ -24,7 +24,7 @@ use common_wal::config::{DatanodeWalConfig, MetaSrvWalConfig}; use frontend::instance::Instance; use rstest_reuse::{self, template}; -use crate::cluster::GreptimeDbClusterBuilder; +use crate::cluster::{GreptimeDbCluster, GreptimeDbClusterBuilder}; use crate::standalone::{GreptimeDbStandalone, GreptimeDbStandaloneBuilder}; use crate::test_util::StorageType; use crate::tests::{create_distributed_instance, MockDistributedInstance}; @@ -32,7 +32,7 @@ use crate::tests::{create_distributed_instance, MockDistributedInstance}; #[async_trait::async_trait] pub(crate) trait RebuildableMockInstance: MockInstance { // Rebuilds the instance and returns rebuilt frontend instance. - async fn rebuild(&mut self) -> Arc<Instance>; + async fn rebuild(&mut self); } pub(crate) trait MockInstance: Sync + Send { @@ -68,19 +68,78 @@ pub(crate) enum MockInstanceBuilder { Distributed(GreptimeDbClusterBuilder), } +pub(crate) enum MockInstanceImpl { + Standalone(GreptimeDbStandalone), + Distributed(GreptimeDbCluster), +} + +impl MockInstance for MockInstanceImpl { + fn frontend(&self) -> Arc<Instance> { + match self { + MockInstanceImpl::Standalone(instance) => instance.frontend(), + MockInstanceImpl::Distributed(instance) => instance.frontend.clone(), + } + } + + fn is_distributed_mode(&self) -> bool { + matches!(self, &MockInstanceImpl::Distributed(_)) + } +} + impl MockInstanceBuilder { - async fn build(&self) -> Arc<dyn MockInstance> { + async fn build(&self) -> MockInstanceImpl { match self { - MockInstanceBuilder::Standalone(builder) => Arc::new(builder.clone().build().await), + MockInstanceBuilder::Standalone(builder) => { + MockInstanceImpl::Standalone(builder.build().await) + } + MockInstanceBuilder::Distributed(builder) => { + MockInstanceImpl::Distributed(builder.build().await) + } + } + } + + async fn rebuild(&self, instance: MockInstanceImpl) -> MockInstanceImpl { + match self { + MockInstanceBuilder::Standalone(builder) => { + let MockInstanceImpl::Standalone(instance) = instance else { + unreachable!() + }; + let GreptimeDbStandalone { + mix_options, + guard, + kv_backend, + procedure_manager, + .. + } = instance; + MockInstanceImpl::Standalone( + builder + .build_with(kv_backend, procedure_manager, guard, mix_options) + .await, + ) + } MockInstanceBuilder::Distributed(builder) => { - Arc::new(MockDistributedInstance(builder.clone().build().await)) + let MockInstanceImpl::Distributed(instance) = instance else { + unreachable!() + }; + let GreptimeDbCluster { + storage_guards, + dir_guards, + datanode_options, + .. + } = instance; + + MockInstanceImpl::Distributed( + builder + .build_with(datanode_options, storage_guards, dir_guards) + .await, + ) } } } } pub(crate) struct TestContext { - instance: Arc<dyn MockInstance>, + instance: Option<MockInstanceImpl>, builder: MockInstanceBuilder, } @@ -88,26 +147,28 @@ impl TestContext { async fn new(builder: MockInstanceBuilder) -> Self { let instance = builder.build().await; - Self { instance, builder } + Self { + instance: Some(instance), + builder, + } } } #[async_trait::async_trait] impl RebuildableMockInstance for TestContext { - async fn rebuild(&mut self) -> Arc<Instance> { - let instance = self.builder.build().await; - self.instance = instance; - self.instance.frontend() + async fn rebuild(&mut self) { + let instance = self.builder.rebuild(self.instance.take().unwrap()).await; + self.instance = Some(instance); } } impl MockInstance for TestContext { fn frontend(&self) -> Arc<Instance> { - self.instance.frontend() + self.instance.as_ref().unwrap().frontend() } fn is_distributed_mode(&self) -> bool { - self.instance.is_distributed_mode() + self.instance.as_ref().unwrap().is_distributed_mode() } }
fix
fix MockInstance rebuild issue (#3218)
e7b4d2b9cd16dddd81c0878639cf23605d32c62d
2022-11-17 16:10:58
LFC
feat: Implement `table_info()` for `DistTable` (#536) (#557)
false
diff --git a/src/catalog/src/error.rs b/src/catalog/src/error.rs index a3895973941f..24ab530f4ed4 100644 --- a/src/catalog/src/error.rs +++ b/src/catalog/src/error.rs @@ -185,8 +185,8 @@ pub enum Error { source: meta_client::error::Error, }, - #[snafu(display("Invalid table schema in catalog, source: {:?}", source))] - InvalidSchemaInCatalog { + #[snafu(display("Invalid table info in catalog, source: {}", source))] + InvalidTableInfoInCatalog { #[snafu(backtrace)] source: datatypes::error::Error, }, @@ -233,7 +233,7 @@ impl ErrorExt for Error { Error::SystemCatalogTableScan { source } => source.status_code(), Error::SystemCatalogTableScanExec { source } => source.status_code(), Error::InvalidTableSchema { source, .. } => source.status_code(), - Error::InvalidSchemaInCatalog { .. } => StatusCode::Unexpected, + Error::InvalidTableInfoInCatalog { .. } => StatusCode::Unexpected, Error::Internal { source, .. } => source.status_code(), } } diff --git a/src/catalog/src/remote/manager.rs b/src/catalog/src/remote/manager.rs index 5fd814ec1c73..5c4ddd680eba 100644 --- a/src/catalog/src/remote/manager.rs +++ b/src/catalog/src/remote/manager.rs @@ -250,10 +250,7 @@ impl RemoteCatalogManager { let table_ref = self.open_or_create_table(&table_key, &table_value).await?; schema.register_table(table_key.table_name.to_string(), table_ref)?; info!("Registered table {}", &table_key.table_name); - if table_value.id > max_table_id { - info!("Max table id: {} -> {}", max_table_id, table_value.id); - max_table_id = table_value.id; - } + max_table_id = max_table_id.max(table_value.table_id()); table_num += 1; } info!( @@ -311,9 +308,10 @@ impl RemoteCatalogManager { .. } = table_key; + let table_id = table_value.table_id(); + let TableGlobalValue { - id, - meta, + table_info, regions_id_map, .. } = table_value; @@ -322,14 +320,17 @@ impl RemoteCatalogManager { catalog_name: catalog_name.clone(), schema_name: schema_name.clone(), table_name: table_name.clone(), - table_id: *id, + table_id, }; match self .engine .open_table(&context, request) .await .with_context(|_| OpenTableSnafu { - table_info: format!("{}.{}.{}, id:{}", catalog_name, schema_name, table_name, id,), + table_info: format!( + "{}.{}.{}, id:{}", + catalog_name, schema_name, table_name, table_id + ), })? { Some(table) => { info!( @@ -344,6 +345,7 @@ impl RemoteCatalogManager { catalog_name, schema_name, table_name ); + let meta = &table_info.meta; let schema = meta .schema .clone() @@ -353,7 +355,7 @@ impl RemoteCatalogManager { schema: meta.schema.clone(), })?; let req = CreateTableRequest { - id: *id, + id: table_id, catalog_name: catalog_name.clone(), schema_name: schema_name.clone(), table_name: table_name.clone(), @@ -371,7 +373,7 @@ impl RemoteCatalogManager { .context(CreateTableSnafu { table_info: format!( "{}.{}.{}, id:{}", - &catalog_name, &schema_name, &table_name, id + &catalog_name, &schema_name, &table_name, table_id ), }) } diff --git a/src/common/catalog/src/helper.rs b/src/common/catalog/src/helper.rs index e36778c94640..ccfe3629691a 100644 --- a/src/common/catalog/src/helper.rs +++ b/src/common/catalog/src/helper.rs @@ -19,7 +19,7 @@ use lazy_static::lazy_static; use regex::Regex; use serde::{Deserialize, Serialize, Serializer}; use snafu::{ensure, OptionExt, ResultExt}; -use table::metadata::{RawTableMeta, TableId, TableVersion}; +use table::metadata::{RawTableInfo, TableId, TableVersion}; use crate::consts::{ CATALOG_KEY_PREFIX, SCHEMA_KEY_PREFIX, TABLE_GLOBAL_KEY_PREFIX, TABLE_REGIONAL_KEY_PREFIX, @@ -128,15 +128,18 @@ impl TableGlobalKey { /// table id, table meta(schema...), region id allocation across datanodes. #[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] pub struct TableGlobalValue { - /// Table id is the same across all datanodes. - pub id: TableId, /// Id of datanode that created the global table info kv. only for debugging. pub node_id: u64, // TODO(LFC): Maybe remove it? /// Allocation of region ids across all datanodes. pub regions_id_map: HashMap<u64, Vec<u32>>, - // TODO(LFC): Too much for assembling the table schema that DistTable needs, find another way. - pub meta: RawTableMeta, + pub table_info: RawTableInfo, +} + +impl TableGlobalValue { + pub fn table_id(&self) -> TableId { + self.table_info.ident.table_id + } } /// Table regional info that varies between datanode, so it contains a `node_id` field. @@ -279,6 +282,7 @@ define_catalog_value!( mod tests { use datatypes::prelude::ConcreteDataType; use datatypes::schema::{ColumnSchema, RawSchema, Schema}; + use table::metadata::{RawTableMeta, TableIdent, TableType}; use super::*; @@ -339,11 +343,23 @@ mod tests { region_numbers: vec![1], }; + let table_info = RawTableInfo { + ident: TableIdent { + table_id: 42, + version: 1, + }, + name: "table_1".to_string(), + desc: Some("blah".to_string()), + catalog_name: "catalog_1".to_string(), + schema_name: "schema_1".to_string(), + meta, + table_type: TableType::Base, + }; + let value = TableGlobalValue { - id: 42, node_id: 0, regions_id_map: HashMap::from([(0, vec![1, 2, 3])]), - meta, + table_info, }; let serialized = serde_json::to_string(&value).unwrap(); let deserialized = TableGlobalValue::parse(&serialized).unwrap(); diff --git a/src/frontend/src/catalog.rs b/src/frontend/src/catalog.rs index 0cd4e9a30e03..2e5d7b64d48b 100644 --- a/src/frontend/src/catalog.rs +++ b/src/frontend/src/catalog.rs @@ -16,7 +16,7 @@ use std::any::Any; use std::collections::HashSet; use std::sync::Arc; -use catalog::error::{InvalidCatalogValueSnafu, InvalidSchemaInCatalogSnafu}; +use catalog::error::{self as catalog_err, InvalidCatalogValueSnafu}; use catalog::remote::{Kv, KvBackendRef}; use catalog::{ CatalogList, CatalogManager, CatalogProvider, CatalogProviderRef, RegisterSchemaRequest, @@ -276,17 +276,16 @@ impl SchemaProvider for FrontendSchemaProvider { let val = TableGlobalValue::parse(String::from_utf8_lossy(&res.1)) .context(InvalidCatalogValueSnafu)?; - let table = Arc::new(DistTable { + let table = Arc::new(DistTable::new( table_name, - schema: Arc::new( - val.meta - .schema + Arc::new( + val.table_info .try_into() - .context(InvalidSchemaInCatalogSnafu)?, + .context(catalog_err::InvalidTableInfoInCatalogSnafu)?, ), table_routes, datanode_clients, - }); + )); Ok(Some(table as _)) }) }) diff --git a/src/frontend/src/instance/distributed.rs b/src/frontend/src/instance/distributed.rs index ced630b6180c..7d5764b7c529 100644 --- a/src/frontend/src/instance/distributed.rs +++ b/src/frontend/src/instance/distributed.rs @@ -37,7 +37,7 @@ use sql::statements::create::Partitions; use sql::statements::sql_value_to_value; use sql::statements::statement::Statement; use sqlparser::ast::Value as SqlValue; -use table::metadata::RawTableMeta; +use table::metadata::{RawTableInfo, RawTableMeta, TableIdent, TableType}; use crate::catalog::FrontendCatalogManager; use crate::datanode::DatanodeClients; @@ -274,11 +274,23 @@ fn create_table_global_value( created_on: DateTime::default(), }; + let table_info = RawTableInfo { + ident: TableIdent { + table_id: table_route.table.id as u32, + version: 0, + }, + name: table_name.table_name.clone(), + desc: create_table.desc.clone(), + catalog_name: table_name.catalog_name.clone(), + schema_name: table_name.schema_name.clone(), + meta, + table_type: TableType::Base, + }; + Ok(TableGlobalValue { - id: table_route.table.id as u32, node_id, regions_id_map: HashMap::new(), - meta, + table_info, }) } diff --git a/src/frontend/src/table.rs b/src/frontend/src/table.rs index 903f44032a55..8499972cb445 100644 --- a/src/frontend/src/table.rs +++ b/src/frontend/src/table.rs @@ -55,10 +55,10 @@ pub(crate) mod scan; #[derive(Clone)] pub struct DistTable { - pub(crate) table_name: TableName, - pub(crate) schema: SchemaRef, - pub(crate) table_routes: Arc<TableRoutes>, - pub(crate) datanode_clients: Arc<DatanodeClients>, + table_name: TableName, + table_info: TableInfoRef, + table_routes: Arc<TableRoutes>, + datanode_clients: Arc<DatanodeClients>, } #[async_trait] @@ -68,11 +68,11 @@ impl Table for DistTable { } fn schema(&self) -> SchemaRef { - self.schema.clone() + self.table_info.meta.schema.clone() } fn table_info(&self) -> TableInfoRef { - unimplemented!() + self.table_info.clone() } async fn insert(&self, request: InsertRequest) -> table::Result<usize> { @@ -133,6 +133,20 @@ impl Table for DistTable { } impl DistTable { + pub(crate) fn new( + table_name: TableName, + table_info: TableInfoRef, + table_routes: Arc<TableRoutes>, + datanode_clients: Arc<DatanodeClients>, + ) -> Self { + Self { + table_name, + table_info, + table_routes, + datanode_clients, + } + } + // TODO(LFC): Finding regions now seems less efficient, should be further looked into. fn find_regions( &self, @@ -477,6 +491,7 @@ mod test { use sql::parser::ParserContext; use sql::statements::statement::Statement; use sqlparser::dialect::GenericDialect; + use table::metadata::{TableInfoBuilder, TableMetaBuilder}; use table::TableRef; use tempdir::TempDir; @@ -496,11 +511,22 @@ mod test { ColumnSchema::new("b", ConcreteDataType::string_datatype(), true), ]; let schema = Arc::new(Schema::new(column_schemas.clone())); + let meta = TableMetaBuilder::default() + .schema(schema) + .primary_key_indices(vec![]) + .next_column_id(1) + .build() + .unwrap(); + let table_info = TableInfoBuilder::default() + .name(&table_name.table_name) + .meta(meta) + .build() + .unwrap(); let table_routes = Arc::new(TableRoutes::new(Arc::new(MetaClient::default()))); let table = DistTable { table_name: table_name.clone(), - schema, + table_info: Arc::new(table_info), table_routes: table_routes.clone(), datanode_clients: Arc::new(DatanodeClients::new()), }; @@ -862,9 +888,20 @@ mod test { insert_testing_data(&table_name, instance.clone(), numbers, start_ts).await; } + let meta = TableMetaBuilder::default() + .schema(schema) + .primary_key_indices(vec![]) + .next_column_id(1) + .build() + .unwrap(); + let table_info = TableInfoBuilder::default() + .name(&table_name.table_name) + .meta(meta) + .build() + .unwrap(); DistTable { table_name, - schema, + table_info: Arc::new(table_info), table_routes, datanode_clients, } @@ -968,9 +1005,21 @@ mod test { ConcreteDataType::int32_datatype(), true, )])); + let table_name = TableName::new("greptime", "public", "foo"); + let meta = TableMetaBuilder::default() + .schema(schema) + .primary_key_indices(vec![]) + .next_column_id(1) + .build() + .unwrap(); + let table_info = TableInfoBuilder::default() + .name(&table_name.table_name) + .meta(meta) + .build() + .unwrap(); let table = DistTable { - table_name: TableName::new("greptime", "public", "foo"), - schema, + table_name, + table_info: Arc::new(table_info), table_routes: Arc::new(TableRoutes::new(Arc::new(MetaClient::default()))), datanode_clients: Arc::new(DatanodeClients::new()), }; diff --git a/src/meta-srv/src/service/router.rs b/src/meta-srv/src/service/router.rs index 02e65f8a0b42..11226fca1a88 100644 --- a/src/meta-srv/src/service/router.rs +++ b/src/meta-srv/src/service/router.rs @@ -184,8 +184,7 @@ async fn fetch_tables( } let tv = tv.unwrap(); - let table_id = tv.id as u64; - let tr_key = TableRouteKey::with_table_global_key(table_id, &tk); + let tr_key = TableRouteKey::with_table_global_key(tv.table_id() as u64, &tk); let tr = get_table_route_value(kv_store, &tr_key).await?; tables.push((tv, tr));
feat
Implement `table_info()` for `DistTable` (#536) (#557)
d1ee1ba56a0562f54de20dc40c5bba111e474171
2023-12-27 14:49:39
WU Jingdi
feat: support set timezone in db (#2992)
false
diff --git a/Cargo.lock b/Cargo.lock index ac08a49f6baa..97e7f48e922e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1533,6 +1533,7 @@ dependencies = [ "common-recordbatch", "common-telemetry", "common-test-util", + "common-time", "common-version", "config", "datanode", @@ -1987,6 +1988,7 @@ dependencies = [ "chrono-tz 0.8.4", "common-error", "common-macro", + "once_cell", "rand", "serde", "serde_json", diff --git a/config/frontend.example.toml b/config/frontend.example.toml index 37a31ba8799d..6bb84e66d3ab 100644 --- a/config/frontend.example.toml +++ b/config/frontend.example.toml @@ -1,5 +1,7 @@ # Node running mode, see `standalone.example.toml`. mode = "distributed" +# The default timezone of the server +# default_timezone = "UTC" [heartbeat] # Interval for sending heartbeat task to the Metasrv, 5 seconds by default. diff --git a/config/standalone.example.toml b/config/standalone.example.toml index 7db8477ec78e..0fa58dd413c6 100644 --- a/config/standalone.example.toml +++ b/config/standalone.example.toml @@ -2,6 +2,8 @@ mode = "standalone" # Whether to enable greptimedb telemetry, true by default. enable_telemetry = true +# The default timezone of the server +# default_timezone = "UTC" # HTTP server options. [http] diff --git a/src/cmd/Cargo.toml b/src/cmd/Cargo.toml index 7cf890356f6e..d9d63d74dff4 100644 --- a/src/cmd/Cargo.toml +++ b/src/cmd/Cargo.toml @@ -32,6 +32,7 @@ common-recordbatch.workspace = true common-telemetry = { workspace = true, features = [ "deadlock_detection", ] } +common-time.workspace = true config = "0.13" datanode.workspace = true datatypes.workspace = true diff --git a/src/cmd/src/error.rs b/src/cmd/src/error.rs index d90afaef2442..ec4e41a5dbe2 100644 --- a/src/cmd/src/error.rs +++ b/src/cmd/src/error.rs @@ -43,6 +43,12 @@ pub enum Error { source: common_meta::error::Error, }, + #[snafu(display("Failed to init default timezone"))] + InitTimezone { + location: Location, + source: common_time::error::Error, + }, + #[snafu(display("Failed to start procedure manager"))] StartProcedureManager { location: Location, @@ -268,6 +274,7 @@ impl ErrorExt for Error { | Error::LoadLayeredConfig { .. } | Error::IllegalConfig { .. } | Error::InvalidReplCommand { .. } + | Error::InitTimezone { .. } | Error::ConnectEtcd { .. } | Error::NotDataFromOutput { .. } | Error::CreateDir { .. } diff --git a/src/cmd/src/frontend.rs b/src/cmd/src/frontend.rs index b1d12e8844bc..607c7f3d7bd1 100644 --- a/src/cmd/src/frontend.rs +++ b/src/cmd/src/frontend.rs @@ -22,6 +22,7 @@ use client::client_manager::DatanodeClients; use common_meta::heartbeat::handler::parse_mailbox_message::ParseMailboxMessageHandler; use common_meta::heartbeat::handler::HandlerGroupExecutor; use common_telemetry::logging; +use common_time::timezone::set_default_timezone; use frontend::frontend::FrontendOptions; use frontend::heartbeat::handler::invalidate_table_cache::InvalidateTableCacheHandler; use frontend::heartbeat::HeartbeatTask; @@ -32,7 +33,7 @@ use servers::tls::{TlsMode, TlsOption}; use servers::Mode; use snafu::{OptionExt, ResultExt}; -use crate::error::{self, MissingConfigSnafu, Result, StartFrontendSnafu}; +use crate::error::{self, InitTimezoneSnafu, MissingConfigSnafu, Result, StartFrontendSnafu}; use crate::options::{CliOptions, Options}; use crate::App; @@ -217,6 +218,8 @@ impl StartCommand { logging::info!("Frontend start command: {:#?}", self); logging::info!("Frontend options: {:#?}", opts); + set_default_timezone(opts.default_timezone.as_deref()).context(InitTimezoneSnafu)?; + let meta_client_options = opts.meta_client.as_ref().context(MissingConfigSnafu { msg: "'meta_client'", })?; diff --git a/src/cmd/src/standalone.rs b/src/cmd/src/standalone.rs index 812b64d301e1..27cc19fd6655 100644 --- a/src/cmd/src/standalone.rs +++ b/src/cmd/src/standalone.rs @@ -32,6 +32,7 @@ use common_meta::wal::{WalOptionsAllocator, WalOptionsAllocatorRef}; use common_procedure::ProcedureManagerRef; use common_telemetry::info; use common_telemetry::logging::LoggingOptions; +use common_time::timezone::set_default_timezone; use datanode::config::{DatanodeOptions, ProcedureConfig, RegionEngineConfig, StorageConfig}; use datanode::datanode::{Datanode, DatanodeBuilder}; use file_engine::config::EngineConfig as FileEngineConfig; @@ -51,8 +52,8 @@ use servers::Mode; use snafu::ResultExt; use crate::error::{ - CreateDirSnafu, IllegalConfigSnafu, InitDdlManagerSnafu, InitMetadataSnafu, Result, - ShutdownDatanodeSnafu, ShutdownFrontendSnafu, StartDatanodeSnafu, StartFrontendSnafu, + CreateDirSnafu, IllegalConfigSnafu, InitDdlManagerSnafu, InitMetadataSnafu, InitTimezoneSnafu, + Result, ShutdownDatanodeSnafu, ShutdownFrontendSnafu, StartDatanodeSnafu, StartFrontendSnafu, StartProcedureManagerSnafu, StartWalOptionsAllocatorSnafu, StopProcedureManagerSnafu, }; use crate::options::{CliOptions, MixOptions, Options}; @@ -98,6 +99,7 @@ impl SubCommand { pub struct StandaloneOptions { pub mode: Mode, pub enable_telemetry: bool, + pub default_timezone: Option<String>, pub http: HttpOptions, pub grpc: GrpcOptions, pub mysql: MysqlOptions, @@ -121,6 +123,7 @@ impl Default for StandaloneOptions { Self { mode: Mode::Standalone, enable_telemetry: true, + default_timezone: None, http: HttpOptions::default(), grpc: GrpcOptions::default(), mysql: MysqlOptions::default(), @@ -147,6 +150,7 @@ impl StandaloneOptions { fn frontend_options(self) -> FrontendOptions { FrontendOptions { mode: self.mode, + default_timezone: self.default_timezone, http: self.http, grpc: self.grpc, mysql: self.mysql, @@ -369,6 +373,9 @@ impl StartCommand { info!("Building standalone instance with {opts:#?}"); + set_default_timezone(opts.frontend.default_timezone.as_deref()) + .context(InitTimezoneSnafu)?; + // Ensure the data_home directory exists. fs::create_dir_all(path::Path::new(&opts.data_home)).context(CreateDirSnafu { dir: &opts.data_home, diff --git a/src/common/time/Cargo.toml b/src/common/time/Cargo.toml index 5bdba94a7e9b..04976ebadd45 100644 --- a/src/common/time/Cargo.toml +++ b/src/common/time/Cargo.toml @@ -10,6 +10,7 @@ chrono-tz = "0.8" chrono.workspace = true common-error.workspace = true common-macro.workspace = true +once_cell.workspace = true serde = { version = "1.0", features = ["derive"] } serde_json.workspace = true snafu.workspace = true diff --git a/src/common/time/src/datetime.rs b/src/common/time/src/datetime.rs index 3a2274cae7fb..7dc872b8f2ac 100644 --- a/src/common/time/src/datetime.rs +++ b/src/common/time/src/datetime.rs @@ -20,7 +20,7 @@ use chrono::{Days, LocalResult, Months, NaiveDateTime, TimeZone as ChronoTimeZon use serde::{Deserialize, Serialize}; use crate::error::{Error, InvalidDateStrSnafu, Result}; -use crate::timezone::TimeZone; +use crate::timezone::Timezone; use crate::util::{format_utc_datetime, local_datetime_to_utc}; use crate::{Date, Interval}; @@ -110,11 +110,11 @@ impl DateTime { NaiveDateTime::from_timestamp_millis(self.0) } - pub fn to_chrono_datetime_with_timezone(&self, tz: Option<TimeZone>) -> Option<NaiveDateTime> { + pub fn to_chrono_datetime_with_timezone(&self, tz: Option<Timezone>) -> Option<NaiveDateTime> { let datetime = self.to_chrono_datetime(); datetime.map(|v| match tz { - Some(TimeZone::Offset(offset)) => offset.from_utc_datetime(&v).naive_local(), - Some(TimeZone::Named(tz)) => tz.from_utc_datetime(&v).naive_local(), + Some(Timezone::Offset(offset)) => offset.from_utc_datetime(&v).naive_local(), + Some(Timezone::Named(tz)) => tz.from_utc_datetime(&v).naive_local(), None => Utc.from_utc_datetime(&v).naive_local(), }) } @@ -155,10 +155,11 @@ impl DateTime { #[cfg(test)] mod tests { use super::*; + use crate::timezone::set_default_timezone; #[test] pub fn test_new_date_time() { - std::env::set_var("TZ", "Asia/Shanghai"); + set_default_timezone(Some("Asia/Shanghai")).unwrap(); assert_eq!("1970-01-01 08:00:00+0800", DateTime::new(0).to_string()); assert_eq!("1970-01-01 08:00:01+0800", DateTime::new(1000).to_string()); assert_eq!("1970-01-01 07:59:59+0800", DateTime::new(-1000).to_string()); @@ -166,7 +167,7 @@ mod tests { #[test] pub fn test_parse_from_string() { - std::env::set_var("TZ", "Asia/Shanghai"); + set_default_timezone(Some("Asia/Shanghai")).unwrap(); let time = "1970-01-01 00:00:00+0800"; let dt = DateTime::from_str(time).unwrap(); assert_eq!(time, &dt.to_string()); @@ -194,7 +195,7 @@ mod tests { #[test] fn test_parse_local_date_time() { - std::env::set_var("TZ", "Asia/Shanghai"); + set_default_timezone(Some("Asia/Shanghai")).unwrap(); assert_eq!( -28800000, DateTime::from_str("1970-01-01 00:00:00").unwrap().val() diff --git a/src/common/time/src/error.rs b/src/common/time/src/error.rs index 2cd4527d2d07..a1d225610531 100644 --- a/src/common/time/src/error.rs +++ b/src/common/time/src/error.rs @@ -51,8 +51,8 @@ pub enum Error { #[snafu(display("Timestamp arithmetic overflow, msg: {}", msg))] ArithmeticOverflow { msg: String, location: Location }, - #[snafu(display("Invalid time zone offset: {hours}:{minutes}"))] - InvalidTimeZoneOffset { + #[snafu(display("Invalid timezone offset: {hours}:{minutes}"))] + InvalidTimezoneOffset { hours: i32, minutes: u32, location: Location, @@ -66,8 +66,8 @@ pub enum Error { location: Location, }, - #[snafu(display("Invalid time zone string {raw}"))] - ParseTimeZoneName { raw: String, location: Location }, + #[snafu(display("Invalid timezone string {raw}"))] + ParseTimezoneName { raw: String, location: Location }, } impl ErrorExt for Error { @@ -75,9 +75,9 @@ impl ErrorExt for Error { match self { Error::ParseDateStr { .. } | Error::ParseTimestamp { .. } - | Error::InvalidTimeZoneOffset { .. } + | Error::InvalidTimezoneOffset { .. } | Error::ParseOffsetStr { .. } - | Error::ParseTimeZoneName { .. } => StatusCode::InvalidArguments, + | Error::ParseTimezoneName { .. } => StatusCode::InvalidArguments, Error::TimestampOverflow { .. } => StatusCode::Internal, Error::InvalidDateStr { .. } | Error::ArithmeticOverflow { .. } => { StatusCode::InvalidArguments @@ -96,9 +96,9 @@ impl ErrorExt for Error { | Error::TimestampOverflow { location, .. } | Error::ArithmeticOverflow { location, .. } => Some(*location), Error::ParseDateStr { .. } - | Error::InvalidTimeZoneOffset { .. } + | Error::InvalidTimezoneOffset { .. } | Error::ParseOffsetStr { .. } - | Error::ParseTimeZoneName { .. } => None, + | Error::ParseTimezoneName { .. } => None, Error::InvalidDateStr { location, .. } => Some(*location), Error::ParseInterval { location, .. } => Some(*location), } diff --git a/src/common/time/src/lib.rs b/src/common/time/src/lib.rs index 4a47c212dc11..770057394c2a 100644 --- a/src/common/time/src/lib.rs +++ b/src/common/time/src/lib.rs @@ -31,4 +31,4 @@ pub use interval::Interval; pub use range::RangeMillis; pub use timestamp::Timestamp; pub use timestamp_millis::TimestampMillis; -pub use timezone::TimeZone; +pub use timezone::Timezone; diff --git a/src/common/time/src/time.rs b/src/common/time/src/time.rs index 8deb03a73d3f..fdcc9ee32ec2 100644 --- a/src/common/time/src/time.rs +++ b/src/common/time/src/time.rs @@ -19,8 +19,7 @@ use chrono::{NaiveDateTime, NaiveTime, TimeZone as ChronoTimeZone, Utc}; use serde::{Deserialize, Serialize}; use crate::timestamp::TimeUnit; -use crate::timezone::TimeZone; -use crate::util::format_utc_datetime; +use crate::timezone::{get_timezone, Timezone}; /// Time value, represents the elapsed time since midnight in the unit of `TimeUnit`. #[derive(Debug, Clone, Default, Copy, Serialize, Deserialize)] @@ -109,30 +108,28 @@ impl Time { self.as_formatted_string("%H:%M:%S%.f%z", None) } - /// Format Time for local timezone. - pub fn to_local_string(&self) -> String { + /// Format Time for system timeszone. + pub fn to_system_tz_string(&self) -> String { self.as_formatted_string("%H:%M:%S%.f", None) } /// Format Time for given timezone. - /// When timezone is None, using local time by default. - pub fn to_timezone_aware_string(&self, tz: Option<TimeZone>) -> String { + /// When timezone is None, using system timezone by default. + pub fn to_timezone_aware_string(&self, tz: Option<Timezone>) -> String { self.as_formatted_string("%H:%M:%S%.f", tz) } - fn as_formatted_string(self, pattern: &str, timezone: Option<TimeZone>) -> String { + fn as_formatted_string(self, pattern: &str, timezone: Option<Timezone>) -> String { if let Some(time) = self.to_chrono_time() { let date = Utc::now().date_naive(); let datetime = NaiveDateTime::new(date, time); - - match timezone { - Some(TimeZone::Offset(offset)) => { + match get_timezone(timezone) { + Timezone::Offset(offset) => { format!("{}", offset.from_utc_datetime(&datetime).format(pattern)) } - Some(TimeZone::Named(tz)) => { + Timezone::Named(tz) => { format!("{}", tz.from_utc_datetime(&datetime).format(pattern)) } - None => format_utc_datetime(&datetime, pattern), } } else { format!("[Time{}: {}]", self.unit, self.value) @@ -223,6 +220,7 @@ mod tests { use serde_json::Value; use super::*; + use crate::timezone::set_default_timezone; #[test] fn test_time() { @@ -312,33 +310,33 @@ mod tests { #[test] fn test_to_iso8601_string() { - std::env::set_var("TZ", "Asia/Shanghai"); + set_default_timezone(Some("+10:00")).unwrap(); let time_millis = 1000001; let ts = Time::new_millisecond(time_millis); - assert_eq!("08:16:40.001+0800", ts.to_iso8601_string()); + assert_eq!("10:16:40.001+1000", ts.to_iso8601_string()); let time_millis = 1000; let ts = Time::new_millisecond(time_millis); - assert_eq!("08:00:01+0800", ts.to_iso8601_string()); + assert_eq!("10:00:01+1000", ts.to_iso8601_string()); let time_millis = 1; let ts = Time::new_millisecond(time_millis); - assert_eq!("08:00:00.001+0800", ts.to_iso8601_string()); + assert_eq!("10:00:00.001+1000", ts.to_iso8601_string()); let time_seconds = 9 * 3600; let ts = Time::new_second(time_seconds); - assert_eq!("17:00:00+0800", ts.to_iso8601_string()); + assert_eq!("19:00:00+1000", ts.to_iso8601_string()); let time_seconds = 23 * 3600; let ts = Time::new_second(time_seconds); - assert_eq!("07:00:00+0800", ts.to_iso8601_string()); + assert_eq!("09:00:00+1000", ts.to_iso8601_string()); } #[test] fn test_serialize_to_json_value() { - std::env::set_var("TZ", "Asia/Shanghai"); + set_default_timezone(Some("+10:00")).unwrap(); assert_eq!( - "08:00:01+0800", + "10:00:01+1000", match serde_json::Value::from(Time::new(1, TimeUnit::Second)) { Value::String(s) => s, _ => unreachable!(), @@ -346,7 +344,7 @@ mod tests { ); assert_eq!( - "08:00:00.001+0800", + "10:00:00.001+1000", match serde_json::Value::from(Time::new(1, TimeUnit::Millisecond)) { Value::String(s) => s, _ => unreachable!(), @@ -354,7 +352,7 @@ mod tests { ); assert_eq!( - "08:00:00.000001+0800", + "10:00:00.000001+1000", match serde_json::Value::from(Time::new(1, TimeUnit::Microsecond)) { Value::String(s) => s, _ => unreachable!(), @@ -362,7 +360,7 @@ mod tests { ); assert_eq!( - "08:00:00.000000001+0800", + "10:00:00.000000001+1000", match serde_json::Value::from(Time::new(1, TimeUnit::Nanosecond)) { Value::String(s) => s, _ => unreachable!(), @@ -372,46 +370,47 @@ mod tests { #[test] fn test_to_timezone_aware_string() { - std::env::set_var("TZ", "Asia/Shanghai"); + set_default_timezone(Some("+10:00")).unwrap(); assert_eq!( - "08:00:00.001", + "10:00:00.001", Time::new(1, TimeUnit::Millisecond).to_timezone_aware_string(None) ); + std::env::set_var("TZ", "Asia/Shanghai"); assert_eq!( "08:00:00.001", Time::new(1, TimeUnit::Millisecond) - .to_timezone_aware_string(TimeZone::from_tz_string("SYSTEM").unwrap()) + .to_timezone_aware_string(Some(Timezone::from_tz_string("SYSTEM").unwrap())) ); assert_eq!( "08:00:00.001", Time::new(1, TimeUnit::Millisecond) - .to_timezone_aware_string(TimeZone::from_tz_string("+08:00").unwrap()) + .to_timezone_aware_string(Some(Timezone::from_tz_string("+08:00").unwrap())) ); assert_eq!( "07:00:00.001", Time::new(1, TimeUnit::Millisecond) - .to_timezone_aware_string(TimeZone::from_tz_string("+07:00").unwrap()) + .to_timezone_aware_string(Some(Timezone::from_tz_string("+07:00").unwrap())) ); assert_eq!( "23:00:00.001", Time::new(1, TimeUnit::Millisecond) - .to_timezone_aware_string(TimeZone::from_tz_string("-01:00").unwrap()) + .to_timezone_aware_string(Some(Timezone::from_tz_string("-01:00").unwrap())) ); assert_eq!( "08:00:00.001", Time::new(1, TimeUnit::Millisecond) - .to_timezone_aware_string(TimeZone::from_tz_string("Asia/Shanghai").unwrap()) + .to_timezone_aware_string(Some(Timezone::from_tz_string("Asia/Shanghai").unwrap())) ); assert_eq!( "00:00:00.001", Time::new(1, TimeUnit::Millisecond) - .to_timezone_aware_string(TimeZone::from_tz_string("UTC").unwrap()) + .to_timezone_aware_string(Some(Timezone::from_tz_string("UTC").unwrap())) ); assert_eq!( "03:00:00.001", Time::new(1, TimeUnit::Millisecond) - .to_timezone_aware_string(TimeZone::from_tz_string("Europe/Moscow").unwrap()) + .to_timezone_aware_string(Some(Timezone::from_tz_string("Europe/Moscow").unwrap())) ); } } diff --git a/src/common/time/src/timestamp.rs b/src/common/time/src/timestamp.rs index 0e40082378ce..d09890544bd3 100644 --- a/src/common/time/src/timestamp.rs +++ b/src/common/time/src/timestamp.rs @@ -27,12 +27,12 @@ use serde::{Deserialize, Serialize}; use snafu::{OptionExt, ResultExt}; use crate::error::{ArithmeticOverflowSnafu, Error, ParseTimestampSnafu, TimestampOverflowSnafu}; -use crate::timezone::TimeZone; -use crate::util::{div_ceil, format_utc_datetime}; +use crate::timezone::{get_timezone, Timezone}; +use crate::util::div_ceil; use crate::{error, Interval}; /// Timestamp represents the value of units(seconds/milliseconds/microseconds/nanoseconds) elapsed -/// since UNIX epoch. The valid value range of [Timestamp] depends on it's unit (all in UTC time zone): +/// since UNIX epoch. The valid value range of [Timestamp] depends on it's unit (all in UTC timezone): /// - for [TimeUnit::Second]: [-262144-01-01 00:00:00, +262143-12-31 23:59:59] /// - for [TimeUnit::Millisecond]: [-262144-01-01 00:00:00.000, +262143-12-31 23:59:59.999] /// - for [TimeUnit::Microsecond]: [-262144-01-01 00:00:00.000000, +262143-12-31 23:59:59.999999] @@ -293,26 +293,26 @@ impl Timestamp { self.as_formatted_string("%Y-%m-%d %H:%M:%S%.f%z", None) } + /// Format timestamp use **system timezone**. pub fn to_local_string(&self) -> String { self.as_formatted_string("%Y-%m-%d %H:%M:%S%.f", None) } /// Format timestamp for given timezone. - /// When timezone is None, using local time by default. - pub fn to_timezone_aware_string(&self, tz: Option<TimeZone>) -> String { + /// If `tz==None`, the server default timezone will used. + pub fn to_timezone_aware_string(&self, tz: Option<Timezone>) -> String { self.as_formatted_string("%Y-%m-%d %H:%M:%S%.f", tz) } - fn as_formatted_string(self, pattern: &str, timezone: Option<TimeZone>) -> String { + fn as_formatted_string(self, pattern: &str, timezone: Option<Timezone>) -> String { if let Some(v) = self.to_chrono_datetime() { - match timezone { - Some(TimeZone::Offset(offset)) => { + match get_timezone(timezone) { + Timezone::Offset(offset) => { format!("{}", offset.from_utc_datetime(&v).format(pattern)) } - Some(TimeZone::Named(tz)) => { + Timezone::Named(tz) => { format!("{}", tz.from_utc_datetime(&v).format(pattern)) } - None => format_utc_datetime(&v, pattern), } } else { format!("[Timestamp{}: {}]", self.unit, self.value) @@ -324,11 +324,11 @@ impl Timestamp { NaiveDateTime::from_timestamp_opt(sec, nsec) } - pub fn to_chrono_datetime_with_timezone(&self, tz: Option<TimeZone>) -> Option<NaiveDateTime> { + pub fn to_chrono_datetime_with_timezone(&self, tz: Option<Timezone>) -> Option<NaiveDateTime> { let datetime = self.to_chrono_datetime(); datetime.map(|v| match tz { - Some(TimeZone::Offset(offset)) => offset.from_utc_datetime(&v).naive_local(), - Some(TimeZone::Named(tz)) => tz.from_utc_datetime(&v).naive_local(), + Some(Timezone::Offset(offset)) => offset.from_utc_datetime(&v).naive_local(), + Some(Timezone::Named(tz)) => tz.from_utc_datetime(&v).naive_local(), None => Utc.from_utc_datetime(&v).naive_local(), }) } @@ -560,6 +560,7 @@ mod tests { use serde_json::Value; use super::*; + use crate::timezone::set_default_timezone; #[test] pub fn test_time_unit() { @@ -789,7 +790,7 @@ mod tests { #[test] fn test_to_iso8601_string() { - std::env::set_var("TZ", "Asia/Shanghai"); + set_default_timezone(Some("Asia/Shanghai")).unwrap(); let datetime_str = "2020-09-08 13:42:29.042+0000"; let ts = Timestamp::from_str(datetime_str).unwrap(); assert_eq!("2020-09-08 21:42:29.042+0800", ts.to_iso8601_string()); @@ -813,7 +814,7 @@ mod tests { #[test] fn test_serialize_to_json_value() { - std::env::set_var("TZ", "Asia/Shanghai"); + set_default_timezone(Some("Asia/Shanghai")).unwrap(); assert_eq!( "1970-01-01 08:00:01+0800", match serde_json::Value::from(Timestamp::new(1, TimeUnit::Second)) { @@ -1054,7 +1055,7 @@ mod tests { // $TZ doesn't take effort. #[test] - fn test_parse_in_time_zone() { + fn test_parse_in_timezone() { std::env::set_var("TZ", "Asia/Shanghai"); assert_eq!( Timestamp::new(28800, TimeUnit::Second), @@ -1074,7 +1075,7 @@ mod tests { #[test] fn test_to_local_string() { - std::env::set_var("TZ", "Asia/Shanghai"); + set_default_timezone(Some("Asia/Shanghai")).unwrap(); assert_eq!( "1970-01-01 08:00:00.000000001", @@ -1107,51 +1108,52 @@ mod tests { #[test] fn test_to_timezone_aware_string() { + set_default_timezone(Some("Asia/Shanghai")).unwrap(); std::env::set_var("TZ", "Asia/Shanghai"); - assert_eq!( "1970-01-01 08:00:00.001", - Timestamp::new(1, TimeUnit::Millisecond).to_timezone_aware_string(None) + Timestamp::new(1, TimeUnit::Millisecond) + .to_timezone_aware_string(Some(Timezone::from_tz_string("SYSTEM").unwrap())) ); assert_eq!( "1970-01-01 08:00:00.001", Timestamp::new(1, TimeUnit::Millisecond) - .to_timezone_aware_string(TimeZone::from_tz_string("SYSTEM").unwrap()) + .to_timezone_aware_string(Some(Timezone::from_tz_string("SYSTEM").unwrap())) ); assert_eq!( "1970-01-01 08:00:00.001", Timestamp::new(1, TimeUnit::Millisecond) - .to_timezone_aware_string(TimeZone::from_tz_string("+08:00").unwrap()) + .to_timezone_aware_string(Some(Timezone::from_tz_string("+08:00").unwrap())) ); assert_eq!( "1970-01-01 07:00:00.001", Timestamp::new(1, TimeUnit::Millisecond) - .to_timezone_aware_string(TimeZone::from_tz_string("+07:00").unwrap()) + .to_timezone_aware_string(Some(Timezone::from_tz_string("+07:00").unwrap())) ); assert_eq!( "1969-12-31 23:00:00.001", Timestamp::new(1, TimeUnit::Millisecond) - .to_timezone_aware_string(TimeZone::from_tz_string("-01:00").unwrap()) + .to_timezone_aware_string(Some(Timezone::from_tz_string("-01:00").unwrap())) ); assert_eq!( "1970-01-01 08:00:00.001", Timestamp::new(1, TimeUnit::Millisecond) - .to_timezone_aware_string(TimeZone::from_tz_string("Asia/Shanghai").unwrap()) + .to_timezone_aware_string(Some(Timezone::from_tz_string("Asia/Shanghai").unwrap())) ); assert_eq!( "1970-01-01 00:00:00.001", Timestamp::new(1, TimeUnit::Millisecond) - .to_timezone_aware_string(TimeZone::from_tz_string("UTC").unwrap()) + .to_timezone_aware_string(Some(Timezone::from_tz_string("UTC").unwrap())) ); assert_eq!( "1970-01-01 01:00:00.001", Timestamp::new(1, TimeUnit::Millisecond) - .to_timezone_aware_string(TimeZone::from_tz_string("Europe/Berlin").unwrap()) + .to_timezone_aware_string(Some(Timezone::from_tz_string("Europe/Berlin").unwrap())) ); assert_eq!( "1970-01-01 03:00:00.001", Timestamp::new(1, TimeUnit::Millisecond) - .to_timezone_aware_string(TimeZone::from_tz_string("Europe/Moscow").unwrap()) + .to_timezone_aware_string(Some(Timezone::from_tz_string("Europe/Moscow").unwrap())) ); } diff --git a/src/common/time/src/timezone.rs b/src/common/time/src/timezone.rs index 4b1878c15f8c..700e0db073d9 100644 --- a/src/common/time/src/timezone.rs +++ b/src/common/time/src/timezone.rs @@ -15,24 +15,52 @@ use std::fmt::Display; use std::str::FromStr; -use chrono::{FixedOffset, Local, Offset}; +use chrono::FixedOffset; use chrono_tz::Tz; +use once_cell::sync::OnceCell; use snafu::{OptionExt, ResultExt}; use crate::error::{ - InvalidTimeZoneOffsetSnafu, ParseOffsetStrSnafu, ParseTimeZoneNameSnafu, Result, + InvalidTimezoneOffsetSnafu, ParseOffsetStrSnafu, ParseTimezoneNameSnafu, Result, }; use crate::util::find_tz_from_env; +/// System timezone in `frontend`/`standalone`, +/// config by option `default_timezone` in toml, +/// default value is `UTC` when `default_timezone` is not set. +static DEFAULT_TIMEZONE: OnceCell<Timezone> = OnceCell::new(); + +// Set the System timezone by `tz_str` +pub fn set_default_timezone(tz_str: Option<&str>) -> Result<()> { + let tz = match tz_str { + None | Some("") => Timezone::Named(Tz::UTC), + Some(tz) => Timezone::from_tz_string(tz)?, + }; + DEFAULT_TIMEZONE.get_or_init(|| tz); + Ok(()) +} + +#[inline(always)] +/// If the `tz=Some(timezone)`, return `timezone` directly, +/// or return current system timezone. +pub fn get_timezone(tz: Option<Timezone>) -> Timezone { + tz.unwrap_or_else(|| { + DEFAULT_TIMEZONE + .get() + .cloned() + .unwrap_or(Timezone::Named(Tz::UTC)) + }) +} + #[derive(Debug, Clone, PartialEq, Eq)] -pub enum TimeZone { +pub enum Timezone { Offset(FixedOffset), Named(Tz), } -impl TimeZone { +impl Timezone { /// Compute timezone from given offset hours and minutes - /// Return `None` if given offset exceeds scope + /// Return `Err` if given offset exceeds scope pub fn hours_mins_opt(offset_hours: i32, offset_mins: u32) -> Result<Self> { let offset_secs = if offset_hours > 0 { offset_hours * 3600 + offset_mins as i32 * 60 @@ -42,7 +70,7 @@ impl TimeZone { FixedOffset::east_opt(offset_secs) .map(Self::Offset) - .context(InvalidTimeZoneOffsetSnafu { + .context(InvalidTimezoneOffsetSnafu { hours: offset_hours, minutes: offset_mins, }) @@ -57,10 +85,10 @@ impl TimeZone { /// - `SYSTEM` /// - Offset to UTC: `+08:00` , `-11:30` /// - Named zones: `Asia/Shanghai`, `Europe/Berlin` - pub fn from_tz_string(tz_string: &str) -> Result<Option<Self>> { + pub fn from_tz_string(tz_string: &str) -> Result<Self> { // Use system timezone if tz_string.eq_ignore_ascii_case("SYSTEM") { - Ok(None) + Ok(Timezone::Named(find_tz_from_env().unwrap_or(Tz::UTC))) } else if let Some((hrs, mins)) = tz_string.split_once(':') { let hrs = hrs .parse::<i32>() @@ -68,16 +96,16 @@ impl TimeZone { let mins = mins .parse::<u32>() .context(ParseOffsetStrSnafu { raw: tz_string })?; - Self::hours_mins_opt(hrs, mins).map(Some) + Self::hours_mins_opt(hrs, mins) } else if let Ok(tz) = Tz::from_str(tz_string) { - Ok(Some(Self::Named(tz))) + Ok(Self::Named(tz)) } else { - ParseTimeZoneNameSnafu { raw: tz_string }.fail() + ParseTimezoneNameSnafu { raw: tz_string }.fail() } } } -impl Display for TimeZone { +impl Display for Timezone { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { Self::Named(tz) => write!(f, "{}", tz.name()), @@ -87,12 +115,9 @@ impl Display for TimeZone { } #[inline] -pub fn system_time_zone_name() -> String { - if let Some(tz) = find_tz_from_env() { - Local::now().with_timezone(&tz).offset().fix().to_string() - } else { - Local::now().offset().to_string() - } +/// Return current system config timezone, default config is UTC +pub fn system_timezone_name() -> String { + format!("{}", get_timezone(None)) } #[cfg(test)] @@ -101,61 +126,56 @@ mod tests { #[test] fn test_from_tz_string() { - assert_eq!(None, TimeZone::from_tz_string("SYSTEM").unwrap()); + assert_eq!( + Timezone::Named(Tz::UTC), + Timezone::from_tz_string("SYSTEM").unwrap() + ); - let utc_plus_8 = Some(TimeZone::Offset(FixedOffset::east_opt(3600 * 8).unwrap())); - assert_eq!(utc_plus_8, TimeZone::from_tz_string("+8:00").unwrap()); - assert_eq!(utc_plus_8, TimeZone::from_tz_string("+08:00").unwrap()); - assert_eq!(utc_plus_8, TimeZone::from_tz_string("08:00").unwrap()); + let utc_plus_8 = Timezone::Offset(FixedOffset::east_opt(3600 * 8).unwrap()); + assert_eq!(utc_plus_8, Timezone::from_tz_string("+8:00").unwrap()); + assert_eq!(utc_plus_8, Timezone::from_tz_string("+08:00").unwrap()); + assert_eq!(utc_plus_8, Timezone::from_tz_string("08:00").unwrap()); - let utc_minus_8 = Some(TimeZone::Offset(FixedOffset::west_opt(3600 * 8).unwrap())); - assert_eq!(utc_minus_8, TimeZone::from_tz_string("-08:00").unwrap()); - assert_eq!(utc_minus_8, TimeZone::from_tz_string("-8:00").unwrap()); + let utc_minus_8 = Timezone::Offset(FixedOffset::west_opt(3600 * 8).unwrap()); + assert_eq!(utc_minus_8, Timezone::from_tz_string("-08:00").unwrap()); + assert_eq!(utc_minus_8, Timezone::from_tz_string("-8:00").unwrap()); - let utc_minus_8_5 = Some(TimeZone::Offset( - FixedOffset::west_opt(3600 * 8 + 60 * 30).unwrap(), - )); - assert_eq!(utc_minus_8_5, TimeZone::from_tz_string("-8:30").unwrap()); + let utc_minus_8_5 = Timezone::Offset(FixedOffset::west_opt(3600 * 8 + 60 * 30).unwrap()); + assert_eq!(utc_minus_8_5, Timezone::from_tz_string("-8:30").unwrap()); - let utc_plus_max = Some(TimeZone::Offset(FixedOffset::east_opt(3600 * 14).unwrap())); - assert_eq!(utc_plus_max, TimeZone::from_tz_string("14:00").unwrap()); + let utc_plus_max = Timezone::Offset(FixedOffset::east_opt(3600 * 14).unwrap()); + assert_eq!(utc_plus_max, Timezone::from_tz_string("14:00").unwrap()); - let utc_minus_max = Some(TimeZone::Offset( - FixedOffset::west_opt(3600 * 13 + 60 * 59).unwrap(), - )); - assert_eq!(utc_minus_max, TimeZone::from_tz_string("-13:59").unwrap()); + let utc_minus_max = Timezone::Offset(FixedOffset::west_opt(3600 * 13 + 60 * 59).unwrap()); + assert_eq!(utc_minus_max, Timezone::from_tz_string("-13:59").unwrap()); assert_eq!( - Some(TimeZone::Named(Tz::Asia__Shanghai)), - TimeZone::from_tz_string("Asia/Shanghai").unwrap() + Timezone::Named(Tz::Asia__Shanghai), + Timezone::from_tz_string("Asia/Shanghai").unwrap() ); assert_eq!( - Some(TimeZone::Named(Tz::UTC)), - TimeZone::from_tz_string("UTC").unwrap() + Timezone::Named(Tz::UTC), + Timezone::from_tz_string("UTC").unwrap() ); - assert!(TimeZone::from_tz_string("WORLD_PEACE").is_err()); - assert!(TimeZone::from_tz_string("A0:01").is_err()); - assert!(TimeZone::from_tz_string("20:0A").is_err()); - assert!(TimeZone::from_tz_string(":::::").is_err()); - assert!(TimeZone::from_tz_string("Asia/London").is_err()); - assert!(TimeZone::from_tz_string("Unknown").is_err()); + assert!(Timezone::from_tz_string("WORLD_PEACE").is_err()); + assert!(Timezone::from_tz_string("A0:01").is_err()); + assert!(Timezone::from_tz_string("20:0A").is_err()); + assert!(Timezone::from_tz_string(":::::").is_err()); + assert!(Timezone::from_tz_string("Asia/London").is_err()); + assert!(Timezone::from_tz_string("Unknown").is_err()); } #[test] fn test_timezone_to_string() { - assert_eq!("UTC", TimeZone::Named(Tz::UTC).to_string()); + assert_eq!("UTC", Timezone::Named(Tz::UTC).to_string()); assert_eq!( "+01:00", - TimeZone::from_tz_string("01:00") - .unwrap() - .unwrap() - .to_string() + Timezone::from_tz_string("01:00").unwrap().to_string() ); assert_eq!( "Asia/Shanghai", - TimeZone::from_tz_string("Asia/Shanghai") - .unwrap() + Timezone::from_tz_string("Asia/Shanghai") .unwrap() .to_string() ); diff --git a/src/common/time/src/util.rs b/src/common/time/src/util.rs index 8d1f870cb445..1a890ec2092f 100644 --- a/src/common/time/src/util.rs +++ b/src/common/time/src/util.rs @@ -14,23 +14,24 @@ use std::str::FromStr; -use chrono::offset::Local; use chrono::{LocalResult, NaiveDateTime, TimeZone}; use chrono_tz::Tz; +use crate::timezone::get_timezone; + pub fn format_utc_datetime(utc: &NaiveDateTime, pattern: &str) -> String { - if let Some(tz) = find_tz_from_env() { - format!("{}", tz.from_utc_datetime(utc).format(pattern)) - } else { - format!("{}", Local.from_utc_datetime(utc).format(pattern)) + match get_timezone(None) { + crate::Timezone::Offset(offset) => { + offset.from_utc_datetime(utc).format(pattern).to_string() + } + crate::Timezone::Named(tz) => tz.from_utc_datetime(utc).format(pattern).to_string(), } } pub fn local_datetime_to_utc(local: &NaiveDateTime) -> LocalResult<NaiveDateTime> { - if let Some(tz) = find_tz_from_env() { - tz.from_local_datetime(local).map(|x| x.naive_utc()) - } else { - Local.from_local_datetime(local).map(|x| x.naive_utc()) + match get_timezone(None) { + crate::Timezone::Offset(offset) => offset.from_local_datetime(local).map(|x| x.naive_utc()), + crate::Timezone::Named(tz) => tz.from_local_datetime(local).map(|x| x.naive_utc()), } } diff --git a/src/datatypes/src/time.rs b/src/datatypes/src/time.rs index 0612255817c8..845e1dd25c5a 100644 --- a/src/datatypes/src/time.rs +++ b/src/datatypes/src/time.rs @@ -120,11 +120,13 @@ define_time_with_unit!(Nanosecond, i64); #[cfg(test)] mod tests { + use common_time::timezone::set_default_timezone; + use super::*; #[test] fn test_to_serde_json_value() { - std::env::set_var("TZ", "Asia/Shanghai"); + set_default_timezone(Some("Asia/Shanghai")).unwrap(); let time = TimeSecond::new(123); let val = serde_json::Value::from(time); match val { diff --git a/src/datatypes/src/timestamp.rs b/src/datatypes/src/timestamp.rs index fa07e043665f..f434d2e3766e 100644 --- a/src/datatypes/src/timestamp.rs +++ b/src/datatypes/src/timestamp.rs @@ -122,11 +122,13 @@ define_timestamp_with_unit!(Nanosecond); #[cfg(test)] mod tests { + use common_time::timezone::set_default_timezone; + use super::*; #[test] fn test_to_serde_json_value() { - std::env::set_var("TZ", "Asia/Shanghai"); + set_default_timezone(Some("Asia/Shanghai")).unwrap(); let ts = TimestampSecond::new(123); let val = serde_json::Value::from(ts); match val { diff --git a/src/datatypes/src/types/cast.rs b/src/datatypes/src/types/cast.rs index 299d0d625066..d92f5f9bbfbb 100644 --- a/src/datatypes/src/types/cast.rs +++ b/src/datatypes/src/types/cast.rs @@ -176,6 +176,7 @@ mod tests { use common_base::bytes::StringBytes; use common_time::time::Time; + use common_time::timezone::set_default_timezone; use common_time::{Date, DateTime, Timestamp}; use ordered_float::OrderedFloat; @@ -213,7 +214,7 @@ mod tests { #[test] fn test_cast_with_opt() { - std::env::set_var("TZ", "Asia/Shanghai"); + set_default_timezone(Some("Asia/Shanghai")).unwrap(); // non-strict mode let cast_option = CastOption { strict: false }; let src_value = Value::Int8(-1); diff --git a/src/datatypes/src/types/date_type.rs b/src/datatypes/src/types/date_type.rs index 89a8889cf8f6..a0df0b5a2151 100644 --- a/src/datatypes/src/types/date_type.rs +++ b/src/datatypes/src/types/date_type.rs @@ -101,6 +101,7 @@ impl LogicalPrimitiveType for DateType { #[cfg(test)] mod tests { use common_base::bytes::StringBytes; + use common_time::timezone::set_default_timezone; use common_time::Timestamp; use super::*; @@ -108,7 +109,7 @@ mod tests { // $TZ doesn't take effort #[test] fn test_date_cast() { - std::env::set_var("TZ", "Asia/Shanghai"); + set_default_timezone(Some("Asia/Shanghai")).unwrap(); // timestamp -> date let ts = Value::Timestamp(Timestamp::from_str("2000-01-01 08:00:01").unwrap()); let date = ConcreteDataType::date_datatype().try_cast(ts).unwrap(); diff --git a/src/datatypes/src/types/datetime_type.rs b/src/datatypes/src/types/datetime_type.rs index abed366264fd..4e23982a2e34 100644 --- a/src/datatypes/src/types/datetime_type.rs +++ b/src/datatypes/src/types/datetime_type.rs @@ -101,6 +101,7 @@ impl LogicalPrimitiveType for DateTimeType { #[cfg(test)] mod tests { + use common_time::timezone::set_default_timezone; use common_time::Timestamp; use super::*; @@ -113,7 +114,7 @@ mod tests { assert_eq!(dt, Value::DateTime(DateTime::from(1000))); // cast from String - std::env::set_var("TZ", "Asia/Shanghai"); + set_default_timezone(Some("Asia/Shanghai")).unwrap(); let val = Value::String("1970-01-01 00:00:00+0800".into()); let dt = ConcreteDataType::datetime_datatype().try_cast(val).unwrap(); assert_eq!( diff --git a/src/datatypes/src/types/timestamp_type.rs b/src/datatypes/src/types/timestamp_type.rs index ffd63228b098..bca9d3e8e2e2 100644 --- a/src/datatypes/src/types/timestamp_type.rs +++ b/src/datatypes/src/types/timestamp_type.rs @@ -203,6 +203,7 @@ impl_data_type_for_timestamp!(Microsecond); #[cfg(test)] mod tests { + use common_time::timezone::set_default_timezone; use common_time::{Date, DateTime}; use super::*; @@ -230,7 +231,7 @@ mod tests { // $TZ doesn't take effort #[test] fn test_timestamp_cast() { - std::env::set_var("TZ", "Asia/Shanghai"); + set_default_timezone(Some("Asia/Shanghai")).unwrap(); // String -> TimestampSecond let s = Value::String("2021-01-01 01:02:03".to_string().into()); let ts = ConcreteDataType::timestamp_second_datatype() diff --git a/src/datatypes/src/value.rs b/src/datatypes/src/value.rs index c198fde9a7b1..b53a96a2c434 100644 --- a/src/datatypes/src/value.rs +++ b/src/datatypes/src/value.rs @@ -1190,6 +1190,7 @@ impl<'a> ValueRef<'a> { #[cfg(test)] mod tests { use arrow::datatypes::DataType as ArrowDataType; + use common_time::timezone::set_default_timezone; use num_traits::Float; use super::*; @@ -1875,7 +1876,7 @@ mod tests { #[test] fn test_display() { - std::env::set_var("TZ", "Asia/Shanghai"); + set_default_timezone(Some("Asia/Shanghai")).unwrap(); assert_eq!(Value::Null.to_string(), "Null"); assert_eq!(Value::UInt8(8).to_string(), "8"); assert_eq!(Value::UInt16(16).to_string(), "16"); diff --git a/src/datatypes/src/vectors/datetime.rs b/src/datatypes/src/vectors/datetime.rs index 4ae654cc3e98..8a29648e6e07 100644 --- a/src/datatypes/src/vectors/datetime.rs +++ b/src/datatypes/src/vectors/datetime.rs @@ -26,6 +26,7 @@ mod tests { use arrow::array::{Array, PrimitiveArray}; use arrow_array::ArrayRef; + use common_time::timezone::set_default_timezone; use common_time::DateTime; use super::*; @@ -37,7 +38,7 @@ mod tests { #[test] fn test_datetime_vector() { - std::env::set_var("TZ", "Asia/Shanghai"); + set_default_timezone(Some("Asia/Shanghai")).unwrap(); let v = DateTimeVector::new(PrimitiveArray::from(vec![1000, 2000, 3000])); assert_eq!(ConcreteDataType::datetime_datatype(), v.data_type()); assert_eq!(3, v.len()); diff --git a/src/frontend/src/frontend.rs b/src/frontend/src/frontend.rs index eddd0e73a1b6..18140e6566f8 100644 --- a/src/frontend/src/frontend.rs +++ b/src/frontend/src/frontend.rs @@ -32,6 +32,7 @@ use crate::service_config::{ pub struct FrontendOptions { pub mode: Mode, pub node_id: Option<String>, + pub default_timezone: Option<String>, pub heartbeat: HeartbeatOptions, pub http: HttpOptions, pub grpc: GrpcOptions, @@ -53,6 +54,7 @@ impl Default for FrontendOptions { Self { mode: Mode::Standalone, node_id: None, + default_timezone: None, heartbeat: HeartbeatOptions::frontend_default(), http: HttpOptions::default(), grpc: GrpcOptions::default(), diff --git a/src/servers/src/mysql/federated.rs b/src/servers/src/mysql/federated.rs index ca4948d6657e..d59d97deb0ef 100644 --- a/src/servers/src/mysql/federated.rs +++ b/src/servers/src/mysql/federated.rs @@ -21,8 +21,8 @@ use std::sync::Arc; use common_query::Output; use common_recordbatch::RecordBatches; -use common_time::timezone::system_time_zone_name; -use common_time::TimeZone; +use common_time::timezone::system_timezone_name; +use common_time::Timezone; use datatypes::prelude::ConcreteDataType; use datatypes::schema::{ColumnSchema, Schema}; use datatypes::vectors::StringVector; @@ -55,7 +55,7 @@ static SELECT_TIME_DIFF_FUNC_PATTERN: Lazy<Regex> = static SHOW_SQL_MODE_PATTERN: Lazy<Regex> = Lazy::new(|| Regex::new("(?i)^(SHOW VARIABLES LIKE 'sql_mode'(.*))").unwrap()); -// Time zone settings +// Timezone settings static SET_TIME_ZONE_PATTERN: Lazy<Regex> = Lazy::new(|| Regex::new(r"(?i)^SET TIME_ZONE\s*=\s*'(\S+)'").unwrap()); @@ -200,11 +200,8 @@ fn select_variable(query: &str, query_context: QueryContextRef) -> Option<Output // get value of variables from known sources or fallback to defaults let value = match var_as[0] { - "time_zone" => query_context - .time_zone() - .map(|tz| tz.to_string()) - .unwrap_or_else(|| "".to_owned()), - "system_time_zone" => system_time_zone_name(), + "time_zone" => query_context.timezone().to_string(), + "system_time_zone" => system_timezone_name(), _ => VAR_VALUES .get(var_as[0]) .map(|v| v.to_string()) @@ -271,8 +268,8 @@ fn check_set_variables(query: &str, session: SessionRef) -> Option<Output> { if let Some(captures) = SET_TIME_ZONE_PATTERN.captures(query) { // get the capture let tz = captures.get(1).unwrap(); - if let Ok(timezone) = TimeZone::from_tz_string(tz.as_str()) { - session.set_time_zone(timezone); + if let Ok(timezone) = Timezone::from_tz_string(tz.as_str()) { + session.set_timezone(timezone); return Some(Output::AffectedRows(0)); } } @@ -331,6 +328,7 @@ fn get_version() -> String { #[cfg(test)] mod test { + use common_time::timezone::set_default_timezone; use session::context::{Channel, QueryContext}; use session::Session; @@ -390,16 +388,16 @@ mod test { +-----------------+------------------------+"; test(query, expected); - // set sysstem timezone - std::env::set_var("TZ", "Asia/Shanghai"); + // set system timezone + set_default_timezone(Some("Asia/Shanghai")).unwrap(); // complex variables let query = "/* mysql-connector-java-8.0.17 (Revision: 16a712ddb3f826a1933ab42b0039f7fb9eebc6ec) */SELECT @@session.auto_increment_increment AS auto_increment_increment, @@character_set_client AS character_set_client, @@character_set_connection AS character_set_connection, @@character_set_results AS character_set_results, @@character_set_server AS character_set_server, @@collation_server AS collation_server, @@collation_connection AS collation_connection, @@init_connect AS init_connect, @@interactive_timeout AS interactive_timeout, @@license AS license, @@lower_case_table_names AS lower_case_table_names, @@max_allowed_packet AS max_allowed_packet, @@net_write_timeout AS net_write_timeout, @@performance_schema AS performance_schema, @@sql_mode AS sql_mode, @@system_time_zone AS system_time_zone, @@time_zone AS time_zone, @@transaction_isolation AS transaction_isolation, @@wait_timeout AS wait_timeout;"; let expected = "\ -+--------------------------+----------------------+--------------------------+-----------------------+----------------------+------------------+----------------------+--------------+---------------------+---------+------------------------+--------------------+-------------------+--------------------+----------+------------------+-----------+-----------------------+---------------+ -| auto_increment_increment | character_set_client | character_set_connection | character_set_results | character_set_server | collation_server | collation_connection | init_connect | interactive_timeout | license | lower_case_table_names | max_allowed_packet | net_write_timeout | performance_schema | sql_mode | system_time_zone | time_zone | transaction_isolation | wait_timeout; | -+--------------------------+----------------------+--------------------------+-----------------------+----------------------+------------------+----------------------+--------------+---------------------+---------+------------------------+--------------------+-------------------+--------------------+----------+------------------+-----------+-----------------------+---------------+ -| 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 31536000 | 0 | 0 | 134217728 | 31536000 | 0 | 0 | +08:00 | | REPEATABLE-READ | 31536000 | -+--------------------------+----------------------+--------------------------+-----------------------+----------------------+------------------+----------------------+--------------+---------------------+---------+------------------------+--------------------+-------------------+--------------------+----------+------------------+-----------+-----------------------+---------------+"; ++--------------------------+----------------------+--------------------------+-----------------------+----------------------+------------------+----------------------+--------------+---------------------+---------+------------------------+--------------------+-------------------+--------------------+----------+------------------+---------------+-----------------------+---------------+ +| auto_increment_increment | character_set_client | character_set_connection | character_set_results | character_set_server | collation_server | collation_connection | init_connect | interactive_timeout | license | lower_case_table_names | max_allowed_packet | net_write_timeout | performance_schema | sql_mode | system_time_zone | time_zone | transaction_isolation | wait_timeout; | ++--------------------------+----------------------+--------------------------+-----------------------+----------------------+------------------+----------------------+--------------+---------------------+---------+------------------------+--------------------+-------------------+--------------------+----------+------------------+---------------+-----------------------+---------------+ +| 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 31536000 | 0 | 0 | 134217728 | 31536000 | 0 | 0 | Asia/Shanghai | Asia/Shanghai | REPEATABLE-READ | 31536000 | ++--------------------------+----------------------+--------------------------+-----------------------+----------------------+------------------+----------------------+--------------+---------------------+---------+------------------------+--------------------+-------------------+--------------------+----------+------------------+---------------+-----------------------+---------------+"; test(query, expected); let query = "show variables"; @@ -437,8 +435,17 @@ mod test { } #[test] - fn test_set_time_zone() { + fn test_set_timezone() { + // test default is UTC when no config in greptimedb + { + let session = Arc::new(Session::new(None, Channel::Mysql)); + let query_context = session.new_query_context(); + assert_eq!("UTC", query_context.timezone().to_string()); + } + set_default_timezone(Some("Asia/Shanghai")).unwrap(); let session = Arc::new(Session::new(None, Channel::Mysql)); + let query_context = session.new_query_context(); + assert_eq!("Asia/Shanghai", query_context.timezone().to_string()); let output = check( "set time_zone = 'UTC'", QueryContext::arc(), @@ -451,7 +458,7 @@ mod test { _ => unreachable!(), } let query_context = session.new_query_context(); - assert_eq!("UTC", query_context.time_zone().unwrap().to_string()); + assert_eq!("UTC", query_context.timezone().to_string()); let output = check("select @@time_zone", query_context.clone(), session.clone()); match output.unwrap() { diff --git a/src/servers/src/mysql/writer.rs b/src/servers/src/mysql/writer.rs index 6d92fb3804e6..00c6ee08e052 100644 --- a/src/servers/src/mysql/writer.rs +++ b/src/servers/src/mysql/writer.rs @@ -193,10 +193,12 @@ impl<'a, W: AsyncWrite + Unpin> MysqlResultWriter<'a, W> { Value::Binary(v) => row_writer.write_col(v.deref())?, Value::Date(v) => row_writer.write_col(v.to_chrono_date())?, // convert datetime and timestamp to timezone of current connection - Value::DateTime(v) => row_writer - .write_col(v.to_chrono_datetime_with_timezone(query_context.time_zone()))?, - Value::Timestamp(v) => row_writer - .write_col(v.to_chrono_datetime_with_timezone(query_context.time_zone()))?, + Value::DateTime(v) => row_writer.write_col( + v.to_chrono_datetime_with_timezone(Some(query_context.timezone())), + )?, + Value::Timestamp(v) => row_writer.write_col( + v.to_chrono_datetime_with_timezone(Some(query_context.timezone())), + )?, Value::Interval(v) => row_writer.write_col(v.to_iso8601_string())?, Value::Duration(v) => row_writer.write_col(v.to_std_duration())?, Value::List(_) => { @@ -208,7 +210,7 @@ impl<'a, W: AsyncWrite + Unpin> MysqlResultWriter<'a, W> { }) } Value::Time(v) => row_writer - .write_col(v.to_timezone_aware_string(query_context.time_zone()))?, + .write_col(v.to_timezone_aware_string(Some(query_context.timezone())))?, Value::Decimal128(v) => row_writer.write_col(v.to_string())?, } } diff --git a/src/session/src/context.rs b/src/session/src/context.rs index bfb8a6036aba..256217d78502 100644 --- a/src/session/src/context.rs +++ b/src/session/src/context.rs @@ -21,7 +21,8 @@ use arc_swap::ArcSwap; use auth::UserInfoRef; use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME}; use common_catalog::{build_db_string, parse_catalog_and_schema_from_db_string}; -use common_time::TimeZone; +use common_time::timezone::get_timezone; +use common_time::Timezone; use derive_builder::Builder; use sql::dialect::{Dialect, GreptimeDbDialect, MySqlDialect, PostgreSqlDialect}; @@ -35,7 +36,7 @@ pub struct QueryContext { current_catalog: String, current_schema: String, current_user: ArcSwap<Option<UserInfoRef>>, - time_zone: Option<TimeZone>, + timezone: Timezone, sql_dialect: Box<dyn Dialect + Send + Sync>, } @@ -57,7 +58,7 @@ impl From<&RegionRequestHeader> for QueryContext { current_catalog: catalog.to_string(), current_schema: schema.to_string(), current_user: Default::default(), - time_zone: Default::default(), + timezone: get_timezone(None), sql_dialect: Box::new(GreptimeDbDialect {}), } } @@ -115,8 +116,8 @@ impl QueryContext { } #[inline] - pub fn time_zone(&self) -> Option<TimeZone> { - self.time_zone.clone() + pub fn timezone(&self) -> Timezone { + self.timezone.clone() } #[inline] @@ -142,7 +143,7 @@ impl QueryContextBuilder { current_user: self .current_user .unwrap_or_else(|| ArcSwap::new(Arc::new(None))), - time_zone: self.time_zone.unwrap_or(None), + timezone: self.timezone.unwrap_or(get_timezone(None)), sql_dialect: self .sql_dialect .unwrap_or_else(|| Box::new(GreptimeDbDialect {})), diff --git a/src/session/src/lib.rs b/src/session/src/lib.rs index 2ab4e8c56ee4..49290826a069 100644 --- a/src/session/src/lib.rs +++ b/src/session/src/lib.rs @@ -21,7 +21,8 @@ use arc_swap::ArcSwap; use auth::UserInfoRef; use common_catalog::build_db_string; use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME}; -use common_time::TimeZone; +use common_time::timezone::get_timezone; +use common_time::Timezone; use context::QueryContextBuilder; use crate::context::{Channel, ConnInfo, QueryContextRef}; @@ -33,7 +34,7 @@ pub struct Session { schema: ArcSwap<String>, user_info: ArcSwap<UserInfoRef>, conn_info: ConnInfo, - time_zone: ArcSwap<Option<TimeZone>>, + timezone: ArcSwap<Timezone>, } pub type SessionRef = Arc<Session>; @@ -45,7 +46,7 @@ impl Session { schema: ArcSwap::new(Arc::new(DEFAULT_SCHEMA_NAME.into())), user_info: ArcSwap::new(Arc::new(auth::userinfo_by_name(None))), conn_info: ConnInfo::new(addr, channel), - time_zone: ArcSwap::new(Arc::new(None)), + timezone: ArcSwap::new(Arc::new(get_timezone(None))), } } @@ -58,7 +59,7 @@ impl Session { .current_catalog(self.catalog.load().to_string()) .current_schema(self.schema.load().to_string()) .sql_dialect(self.conn_info.channel.dialect()) - .time_zone((**self.time_zone.load()).clone()) + .timezone((**self.timezone.load()).clone()) .build() } @@ -73,13 +74,13 @@ impl Session { } #[inline] - pub fn time_zone(&self) -> Option<TimeZone> { - self.time_zone.load().as_ref().clone() + pub fn timezone(&self) -> Timezone { + self.timezone.load().as_ref().clone() } #[inline] - pub fn set_time_zone(&self, tz: Option<TimeZone>) { - let _ = self.time_zone.swap(Arc::new(tz)); + pub fn set_timezone(&self, tz: Timezone) { + let _ = self.timezone.swap(Arc::new(tz)); } #[inline] diff --git a/src/sql/src/statements.rs b/src/sql/src/statements.rs index be57b39856af..ebc1c4d9f4fe 100644 --- a/src/sql/src/statements.rs +++ b/src/sql/src/statements.rs @@ -521,6 +521,7 @@ mod tests { use api::v1::ColumnDataType; use common_time::timestamp::TimeUnit; + use common_time::timezone::set_default_timezone; use datatypes::types::BooleanType; use datatypes::value::OrderedFloat; @@ -696,7 +697,7 @@ mod tests { #[test] pub fn test_parse_datetime_literal() { - std::env::set_var("TZ", "Asia/Shanghai"); + set_default_timezone(Some("Asia/Shanghai")).unwrap(); let value = sql_value_to_value( "datetime_col", &ConcreteDataType::datetime_datatype(), diff --git a/tests-integration/tests/sql.rs b/tests-integration/tests/sql.rs index 2b747a554a75..93566296fab0 100644 --- a/tests-integration/tests/sql.rs +++ b/tests-integration/tests/sql.rs @@ -219,8 +219,8 @@ pub async fn test_mysql_timezone(store_type: StorageType) { .unwrap(); let _ = conn.execute("SET time_zone = 'UTC'").await.unwrap(); - let time_zone = conn.fetch_all("SELECT @@time_zone").await.unwrap(); - assert_eq!(time_zone[0].get::<String, usize>(0), "UTC"); + let timezone = conn.fetch_all("SELECT @@time_zone").await.unwrap(); + assert_eq!(timezone[0].get::<String, usize>(0), "UTC"); // test data let _ = conn
feat
support set timezone in db (#2992)
3e4a69017d75dda4bacc13983bbfed0070a6a929
2024-09-04 17:08:47
LFC
build: add mysql and postgresql clients to greptimedb image (#4677)
false
diff --git a/docker/ci/ubuntu/Dockerfile b/docker/ci/ubuntu/Dockerfile index 580b73e56ffa..cc3bed6f2523 100644 --- a/docker/ci/ubuntu/Dockerfile +++ b/docker/ci/ubuntu/Dockerfile @@ -11,7 +11,9 @@ RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \ python3.10 \ python3.10-dev \ python3-pip \ - curl + curl \ + mysql-client \ + postgresql-client COPY $DOCKER_BUILD_ROOT/docker/python/requirements.txt /etc/greptime/requirements.txt
build
add mysql and postgresql clients to greptimedb image (#4677)
8c1959c580fdb3c5ecafdb6bc4fb6395a80ebedf
2024-12-12 09:19:54
Weny Xu
feat: add prefetch support to `InvertedIndexFooterReader` for reduced I/O time (#5146)
false
diff --git a/src/index/src/inverted_index/error.rs b/src/index/src/inverted_index/error.rs index 49816e63c463..7e861beda6d1 100644 --- a/src/index/src/inverted_index/error.rs +++ b/src/index/src/inverted_index/error.rs @@ -68,6 +68,18 @@ pub enum Error { location: Location, }, + #[snafu(display("Blob size too small"))] + BlobSizeTooSmall { + #[snafu(implicit)] + location: Location, + }, + + #[snafu(display("Invalid footer payload size"))] + InvalidFooterPayloadSize { + #[snafu(implicit)] + location: Location, + }, + #[snafu(display("Unexpected inverted index footer payload size, max: {max_payload_size}, actual: {actual_payload_size}"))] UnexpectedFooterPayloadSize { max_payload_size: u64, @@ -220,7 +232,9 @@ impl ErrorExt for Error { | KeysApplierUnexpectedPredicates { .. } | CommonIo { .. } | UnknownIntermediateCodecMagic { .. } - | FstCompile { .. } => StatusCode::Unexpected, + | FstCompile { .. } + | InvalidFooterPayloadSize { .. } + | BlobSizeTooSmall { .. } => StatusCode::Unexpected, ParseRegex { .. } | ParseDFA { .. } diff --git a/src/index/src/inverted_index/format/reader/blob.rs b/src/index/src/inverted_index/format/reader/blob.rs index ace0e5c48536..de34cd36f849 100644 --- a/src/index/src/inverted_index/format/reader/blob.rs +++ b/src/index/src/inverted_index/format/reader/blob.rs @@ -19,8 +19,9 @@ use common_base::range_read::RangeReader; use greptime_proto::v1::index::InvertedIndexMetas; use snafu::{ensure, ResultExt}; +use super::footer::DEFAULT_PREFETCH_SIZE; use crate::inverted_index::error::{CommonIoSnafu, Result, UnexpectedBlobSizeSnafu}; -use crate::inverted_index::format::reader::footer::InvertedIndeFooterReader; +use crate::inverted_index::format::reader::footer::InvertedIndexFooterReader; use crate::inverted_index::format::reader::InvertedIndexReader; use crate::inverted_index::format::MIN_BLOB_SIZE; @@ -72,7 +73,8 @@ impl<R: RangeReader> InvertedIndexReader for InvertedIndexBlobReader<R> { let blob_size = metadata.content_length; Self::validate_blob_size(blob_size)?; - let mut footer_reader = InvertedIndeFooterReader::new(&mut self.source, blob_size); + let mut footer_reader = InvertedIndexFooterReader::new(&mut self.source, blob_size) + .with_prefetch_size(DEFAULT_PREFETCH_SIZE); footer_reader.metadata().await.map(Arc::new) } } diff --git a/src/index/src/inverted_index/format/reader/footer.rs b/src/index/src/inverted_index/format/reader/footer.rs index 1f35237711ce..c025ecf52ecd 100644 --- a/src/index/src/inverted_index/format/reader/footer.rs +++ b/src/index/src/inverted_index/format/reader/footer.rs @@ -18,53 +18,88 @@ use prost::Message; use snafu::{ensure, ResultExt}; use crate::inverted_index::error::{ - CommonIoSnafu, DecodeProtoSnafu, Result, UnexpectedFooterPayloadSizeSnafu, - UnexpectedOffsetSizeSnafu, UnexpectedZeroSegmentRowCountSnafu, + BlobSizeTooSmallSnafu, CommonIoSnafu, DecodeProtoSnafu, InvalidFooterPayloadSizeSnafu, Result, + UnexpectedFooterPayloadSizeSnafu, UnexpectedOffsetSizeSnafu, + UnexpectedZeroSegmentRowCountSnafu, }; use crate::inverted_index::format::FOOTER_PAYLOAD_SIZE_SIZE; -/// InvertedIndeFooterReader is for reading the footer section of the blob. -pub struct InvertedIndeFooterReader<R> { +pub const DEFAULT_PREFETCH_SIZE: u64 = 1024; // 1KiB + +/// InvertedIndexFooterReader is for reading the footer section of the blob. +pub struct InvertedIndexFooterReader<R> { source: R, blob_size: u64, + prefetch_size: Option<u64>, } -impl<R> InvertedIndeFooterReader<R> { +impl<R> InvertedIndexFooterReader<R> { pub fn new(source: R, blob_size: u64) -> Self { - Self { source, blob_size } + Self { + source, + blob_size, + prefetch_size: None, + } + } + + /// Set the prefetch size for the footer reader. + pub fn with_prefetch_size(mut self, prefetch_size: u64) -> Self { + self.prefetch_size = Some(prefetch_size.max(FOOTER_PAYLOAD_SIZE_SIZE)); + self + } + + pub fn prefetch_size(&self) -> u64 { + self.prefetch_size.unwrap_or(FOOTER_PAYLOAD_SIZE_SIZE) } } -impl<R: RangeReader> InvertedIndeFooterReader<R> { +impl<R: RangeReader> InvertedIndexFooterReader<R> { pub async fn metadata(&mut self) -> Result<InvertedIndexMetas> { - let payload_size = self.read_payload_size().await?; - let metas = self.read_payload(payload_size).await?; - Ok(metas) - } + ensure!( + self.blob_size >= FOOTER_PAYLOAD_SIZE_SIZE, + BlobSizeTooSmallSnafu + ); - async fn read_payload_size(&mut self) -> Result<u64> { - let mut size_buf = [0u8; FOOTER_PAYLOAD_SIZE_SIZE as usize]; - let end = self.blob_size; - let start = end - FOOTER_PAYLOAD_SIZE_SIZE; - self.source - .read_into(start..end, &mut &mut size_buf[..]) + let footer_start = self.blob_size.saturating_sub(self.prefetch_size()); + let suffix = self + .source + .read(footer_start..self.blob_size) .await .context(CommonIoSnafu)?; + let suffix_len = suffix.len(); + let length = u32::from_le_bytes(Self::read_tailing_four_bytes(&suffix)?) as u64; + self.validate_payload_size(length)?; + + let footer_size = FOOTER_PAYLOAD_SIZE_SIZE; + + // Did not fetch the entire file metadata in the initial read, need to make a second request. + if length > suffix_len as u64 - footer_size { + let metadata_start = self.blob_size - length - footer_size; + let meta = self + .source + .read(metadata_start..self.blob_size - footer_size) + .await + .context(CommonIoSnafu)?; + self.parse_payload(&meta, length) + } else { + let metadata_start = self.blob_size - length - footer_size - footer_start; + let meta = &suffix[metadata_start as usize..suffix_len - footer_size as usize]; + self.parse_payload(meta, length) + } + } - let payload_size = u32::from_le_bytes(size_buf) as u64; - self.validate_payload_size(payload_size)?; + fn read_tailing_four_bytes(suffix: &[u8]) -> Result<[u8; 4]> { + let suffix_len = suffix.len(); + ensure!(suffix_len >= 4, InvalidFooterPayloadSizeSnafu); + let mut bytes = [0; 4]; + bytes.copy_from_slice(&suffix[suffix_len - 4..suffix_len]); - Ok(payload_size) + Ok(bytes) } - async fn read_payload(&mut self, payload_size: u64) -> Result<InvertedIndexMetas> { - let end = self.blob_size - FOOTER_PAYLOAD_SIZE_SIZE; - let start = end - payload_size; - let bytes = self.source.read(start..end).await.context(CommonIoSnafu)?; - - let metas = InvertedIndexMetas::decode(&*bytes).context(DecodeProtoSnafu)?; + fn parse_payload(&mut self, bytes: &[u8], payload_size: u64) -> Result<InvertedIndexMetas> { + let metas = InvertedIndexMetas::decode(bytes).context(DecodeProtoSnafu)?; self.validate_metas(&metas, payload_size)?; - Ok(metas) } @@ -113,9 +148,12 @@ impl<R: RangeReader> InvertedIndeFooterReader<R> { #[cfg(test)] mod tests { + use std::assert_matches::assert_matches; + use prost::Message; use super::*; + use crate::inverted_index::error::Error; fn create_test_payload(meta: InvertedIndexMeta) -> Vec<u8> { let mut metas = InvertedIndexMetas { @@ -141,14 +179,18 @@ mod tests { let mut payload_buf = create_test_payload(meta); let blob_size = payload_buf.len() as u64; - let mut reader = InvertedIndeFooterReader::new(&mut payload_buf, blob_size); - let payload_size = reader.read_payload_size().await.unwrap(); - let metas = reader.read_payload(payload_size).await.unwrap(); + for prefetch in [0, blob_size / 2, blob_size, blob_size + 10] { + let mut reader = InvertedIndexFooterReader::new(&mut payload_buf, blob_size); + if prefetch > 0 { + reader = reader.with_prefetch_size(prefetch); + } - assert_eq!(metas.metas.len(), 1); - let index_meta = &metas.metas.get("test").unwrap(); - assert_eq!(index_meta.name, "test"); + let metas = reader.metadata().await.unwrap(); + assert_eq!(metas.metas.len(), 1); + let index_meta = &metas.metas.get("test").unwrap(); + assert_eq!(index_meta.name, "test"); + } } #[tokio::test] @@ -157,14 +199,20 @@ mod tests { name: "test".to_string(), ..Default::default() }; - let mut payload_buf = create_test_payload(meta); payload_buf.push(0xff); // Add an extra byte to corrupt the footer let blob_size = payload_buf.len() as u64; - let mut reader = InvertedIndeFooterReader::new(&mut payload_buf, blob_size); - let payload_size_result = reader.read_payload_size().await; - assert!(payload_size_result.is_err()); + for prefetch in [0, blob_size / 2, blob_size, blob_size + 10] { + let blob_size = payload_buf.len() as u64; + let mut reader = InvertedIndexFooterReader::new(&mut payload_buf, blob_size); + if prefetch > 0 { + reader = reader.with_prefetch_size(prefetch); + } + + let result = reader.metadata().await; + assert_matches!(result, Err(Error::UnexpectedFooterPayloadSize { .. })); + } } #[tokio::test] @@ -178,10 +226,15 @@ mod tests { let mut payload_buf = create_test_payload(meta); let blob_size = payload_buf.len() as u64; - let mut reader = InvertedIndeFooterReader::new(&mut payload_buf, blob_size); - let payload_size = reader.read_payload_size().await.unwrap(); - let payload_result = reader.read_payload(payload_size).await; - assert!(payload_result.is_err()); + for prefetch in [0, blob_size / 2, blob_size, blob_size + 10] { + let mut reader = InvertedIndexFooterReader::new(&mut payload_buf, blob_size); + if prefetch > 0 { + reader = reader.with_prefetch_size(prefetch); + } + + let result = reader.metadata().await; + assert_matches!(result, Err(Error::UnexpectedOffsetSize { .. })); + } } } diff --git a/src/index/src/lib.rs b/src/index/src/lib.rs index 197fc01818c0..5e2e41166863 100644 --- a/src/index/src/lib.rs +++ b/src/index/src/lib.rs @@ -13,6 +13,7 @@ // limitations under the License. #![feature(iter_partition_in_place)] +#![feature(assert_matches)] pub mod fulltext_index; pub mod inverted_index;
feat
add prefetch support to `InvertedIndexFooterReader` for reduced I/O time (#5146)
8bdef9a348d997c51ebf1c2a1626f02cf6296403
2023-10-10 09:33:10
Lei, HUANG
feat: memtable filter push down (#2539)
false
diff --git a/Cargo.lock b/Cargo.lock index 8456bab511e4..da7ff9461400 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5500,6 +5500,7 @@ dependencies = [ "anymap", "api", "aquamarine", + "arc-swap", "async-channel", "async-compat", "async-stream", diff --git a/src/mito2/Cargo.toml b/src/mito2/Cargo.toml index 55795df28bbe..851b0f02a893 100644 --- a/src/mito2/Cargo.toml +++ b/src/mito2/Cargo.toml @@ -12,6 +12,7 @@ test = ["common-test-util"] anymap = "1.0.0-beta.2" api.workspace = true aquamarine.workspace = true +arc-swap = "1.6" async-channel = "1.9" async-compat = "0.2" async-stream.workspace = true diff --git a/src/mito2/src/flush.rs b/src/mito2/src/flush.rs index cc1bf0544d01..1417bf352c10 100644 --- a/src/mito2/src/flush.rs +++ b/src/mito2/src/flush.rs @@ -307,7 +307,7 @@ impl RegionFlushTask { } let file_id = FileId::random(); - let iter = mem.iter(None, &[]); + let iter = mem.iter(None, None); let source = Source::Iter(iter); let mut writer = self .access_layer diff --git a/src/mito2/src/memtable.rs b/src/mito2/src/memtable.rs index bb82fb60d85d..0ced4f547282 100644 --- a/src/mito2/src/memtable.rs +++ b/src/mito2/src/memtable.rs @@ -23,11 +23,11 @@ use std::fmt; use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering}; use std::sync::Arc; -use common_query::logical_plan::Expr; use common_time::Timestamp; use metrics::{decrement_gauge, increment_gauge}; use store_api::metadata::RegionMetadataRef; use store_api::storage::ColumnId; +use table::predicate::Predicate; use crate::error::Result; use crate::flush::WriteBufferManagerRef; @@ -73,7 +73,11 @@ pub trait Memtable: Send + Sync + fmt::Debug { /// Scans the memtable. /// `projection` selects columns to read, `None` means reading all columns. /// `filters` are the predicates to be pushed down to memtable. - fn iter(&self, projection: Option<&[ColumnId]>, filters: &[Expr]) -> BoxedBatchIterator; + fn iter( + &self, + projection: Option<&[ColumnId]>, + predicate: Option<Predicate>, + ) -> BoxedBatchIterator; /// Returns true if the memtable is empty. fn is_empty(&self) -> bool; diff --git a/src/mito2/src/memtable/time_series.rs b/src/mito2/src/memtable/time_series.rs index b5ab52447234..9ea4e04ec36f 100644 --- a/src/mito2/src/memtable/time_series.rs +++ b/src/mito2/src/memtable/time_series.rs @@ -19,9 +19,11 @@ use std::sync::atomic::{AtomicI64, AtomicU32, Ordering}; use std::sync::{Arc, RwLock}; use api::v1::OpType; -use common_query::logical_plan::Expr; +use arc_swap::ArcSwapOption; +use common_telemetry::debug; use datatypes::arrow; use datatypes::arrow::array::ArrayRef; +use datatypes::arrow::record_batch::RecordBatch; use datatypes::data_type::DataType; use datatypes::prelude::{MutableVector, ScalarVectorBuilder, Vector, VectorRef}; use datatypes::value::ValueRef; @@ -31,8 +33,12 @@ use datatypes::vectors::{ use snafu::{ensure, ResultExt}; use store_api::metadata::RegionMetadataRef; use store_api::storage::ColumnId; +use table::predicate::Predicate; -use crate::error::{ComputeArrowSnafu, ConvertVectorSnafu, PrimaryKeyLengthMismatchSnafu, Result}; +use crate::error::{ + ComputeArrowSnafu, ConvertVectorSnafu, NewRecordBatchSnafu, PrimaryKeyLengthMismatchSnafu, + Result, +}; use crate::flush::WriteBufferManagerRef; use crate::memtable::{ AllocTracker, BoxedBatchIterator, KeyValues, Memtable, MemtableBuilder, MemtableId, @@ -76,7 +82,7 @@ impl MemtableBuilder for TimeSeriesMemtableBuilder { pub struct TimeSeriesMemtable { id: MemtableId, region_metadata: RegionMetadataRef, - row_codec: McmpRowCodec, + row_codec: Arc<McmpRowCodec>, series_set: SeriesSet, alloc_tracker: AllocTracker, max_timestamp: AtomicI64, @@ -89,13 +95,13 @@ impl TimeSeriesMemtable { id: MemtableId, write_buffer_manager: Option<WriteBufferManagerRef>, ) -> Self { - let row_codec = McmpRowCodec::new( + let row_codec = Arc::new(McmpRowCodec::new( region_metadata .primary_key_columns() .map(|c| SortField::new(c.column_schema.data_type.clone())) .collect(), - ); - let series_set = SeriesSet::new(region_metadata.clone()); + )); + let series_set = SeriesSet::new(region_metadata.clone(), row_codec.clone()); Self { id, region_metadata, @@ -177,7 +183,7 @@ impl Memtable for TimeSeriesMemtable { actual: kv.num_primary_keys() } ); - let primary_key_encoded = self.row_codec.encode(kv.primary_keys())?; + let primary_key_encoded = PrimaryKey::new(self.row_codec.encode(kv.primary_keys())?); let fields = kv.fields().collect::<Vec<_>>(); allocated += fields.len() * std::mem::size_of::<ValueRef>(); @@ -200,7 +206,11 @@ impl Memtable for TimeSeriesMemtable { Ok(()) } - fn iter(&self, projection: Option<&[ColumnId]>, _filters: &[Expr]) -> BoxedBatchIterator { + fn iter( + &self, + projection: Option<&[ColumnId]>, + predicate: Option<Predicate>, + ) -> BoxedBatchIterator { let projection = if let Some(projection) = projection { projection.iter().copied().collect() } else { @@ -210,7 +220,7 @@ impl Memtable for TimeSeriesMemtable { .collect() }; - Box::new(self.series_set.iter_series(projection)) + Box::new(self.series_set.iter_series(projection, predicate)) } fn is_empty(&self) -> bool { @@ -248,18 +258,76 @@ impl Memtable for TimeSeriesMemtable { } } -type SeriesRwLockMap = RwLock<BTreeMap<Vec<u8>, Arc<RwLock<Series>>>>; +struct PrimaryKey { + bytes: Vec<u8>, + record_batch: ArcSwapOption<RecordBatch>, +} + +impl Clone for PrimaryKey { + fn clone(&self) -> Self { + Self { + bytes: self.bytes.clone(), + record_batch: Default::default(), + } + } +} + +impl PrimaryKey { + fn new(bytes: Vec<u8>) -> Self { + Self { + bytes, + record_batch: ArcSwapOption::empty(), + } + } + + fn get_or_update_record_batch_with<F: FnMut() -> Result<RecordBatch>>( + &self, + mut f: F, + ) -> Result<Arc<RecordBatch>> { + if let Some(rb) = self.record_batch.load_full() { + return Ok(rb); + } + + let batch = Arc::new(f()?); + self.record_batch.store(Some(batch.clone())); + Ok(batch) + } +} + +impl Eq for PrimaryKey {} + +impl PartialEq<Self> for PrimaryKey { + fn eq(&self, other: &Self) -> bool { + self.bytes.eq(&other.bytes) + } +} + +impl PartialOrd<Self> for PrimaryKey { + fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> { + Some(self.cmp(other)) + } +} + +impl Ord for PrimaryKey { + fn cmp(&self, other: &Self) -> std::cmp::Ordering { + self.bytes.cmp(&other.bytes) + } +} + +type SeriesRwLockMap = RwLock<BTreeMap<PrimaryKey, Arc<RwLock<Series>>>>; struct SeriesSet { region_metadata: RegionMetadataRef, series: Arc<SeriesRwLockMap>, + codec: Arc<McmpRowCodec>, } impl SeriesSet { - fn new(region_metadata: RegionMetadataRef) -> Self { + fn new(region_metadata: RegionMetadataRef, codec: Arc<McmpRowCodec>) -> Self { Self { region_metadata, series: Default::default(), + codec, } } } @@ -267,7 +335,7 @@ impl SeriesSet { impl SeriesSet { /// Returns the series for given primary key, or create a new series if not already exist, /// along with the allocated memory footprint for primary keys. - fn get_or_add_series(&self, primary_key: Vec<u8>) -> (Arc<RwLock<Series>>, usize) { + fn get_or_add_series(&self, primary_key: PrimaryKey) -> (Arc<RwLock<Series>>, usize) { if let Some(series) = self.series.read().unwrap().get(&primary_key) { return (series.clone(), 0); }; @@ -275,7 +343,7 @@ impl SeriesSet { let mut indices = self.series.write().unwrap(); match indices.entry(primary_key) { Entry::Vacant(v) => { - let key_len = v.key().len(); + let key_len = v.key().bytes.len(); v.insert(s.clone()); (s, key_len) } @@ -285,21 +353,55 @@ impl SeriesSet { } /// Iterates all series in [SeriesSet]. - fn iter_series(&self, projection: HashSet<ColumnId>) -> Iter { + fn iter_series(&self, projection: HashSet<ColumnId>, predicate: Option<Predicate>) -> Iter { + let (primary_key_builders, primary_key_schema) = + primary_key_builders(&self.region_metadata, 1); + Iter { metadata: self.region_metadata.clone(), series: self.series.clone(), projection, last_key: None, + predicate, + pk_schema: primary_key_schema, + primary_key_builders, + codec: self.codec.clone(), } } } +/// Creates primary key array builders and arrow's schema for primary keys of given region schema. +fn primary_key_builders( + region_metadata: &RegionMetadataRef, + num_pk_rows: usize, +) -> (Vec<Box<dyn MutableVector>>, arrow::datatypes::SchemaRef) { + let (builders, fields): (_, Vec<_>) = region_metadata + .primary_key_columns() + .map(|pk| { + ( + pk.column_schema + .data_type + .create_mutable_vector(num_pk_rows), + arrow::datatypes::Field::new( + pk.column_schema.name.clone(), + pk.column_schema.data_type.as_arrow_type(), + pk.column_schema.is_nullable(), + ), + ) + }) + .unzip(); + (builders, Arc::new(arrow::datatypes::Schema::new(fields))) +} + struct Iter { metadata: RegionMetadataRef, series: Arc<SeriesRwLockMap>, projection: HashSet<ColumnId>, - last_key: Option<Vec<u8>>, + last_key: Option<PrimaryKey>, + predicate: Option<Predicate>, + pk_schema: arrow::datatypes::SchemaRef, + primary_key_builders: Vec<Box<dyn MutableVector>>, + codec: Arc<McmpRowCodec>, } impl Iterator for Iter { @@ -307,21 +409,82 @@ impl Iterator for Iter { fn next(&mut self) -> Option<Self::Item> { let map = self.series.read().unwrap(); - let mut range = match &self.last_key { - None => map.range::<Vec<u8>, _>(..), + let range = match &self.last_key { + None => map.range::<PrimaryKey, _>(..), Some(last_key) => { - map.range::<Vec<u8>, _>((Bound::Excluded(last_key), Bound::Unbounded)) + map.range::<PrimaryKey, _>((Bound::Excluded(last_key), Bound::Unbounded)) } }; - if let Some((primary_key, series)) = range.next() { + // TODO(hl): maybe yield more than one time series to amortize range overhead. + for (primary_key, series) in range { + if let Some(predicate) = &self.predicate { + if !prune_primary_key( + &self.codec, + primary_key, + &mut self.primary_key_builders, + &self.pk_schema, + predicate, + ) { + // read next series + continue; + } + } self.last_key = Some(primary_key.clone()); let values = series.write().unwrap().compact(&self.metadata); - Some(values.and_then(|v| v.to_batch(primary_key, &self.metadata, &self.projection))) - } else { - None + return Some( + values.and_then(|v| v.to_batch(primary_key, &self.metadata, &self.projection)), + ); } + None + } +} + +fn prune_primary_key( + codec: &Arc<McmpRowCodec>, + pk: &PrimaryKey, + builders: &mut Vec<Box<dyn MutableVector>>, + pk_schema: &arrow::datatypes::SchemaRef, + predicate: &Predicate, +) -> bool { + // no primary key, we simply return true. + if pk_schema.fields().is_empty() { + return true; } + + let Ok(pk_record_batch) = pk.get_or_update_record_batch_with(move || { + pk_to_record_batch(codec, &pk.bytes, builders, pk_schema) + }) else { + return true; + }; + + let result = predicate.prune_primary_key(&pk_record_batch); + debug!( + "Prune primary key: {:?}, res: {:?}", + pk_record_batch, result + ); + result.unwrap_or(true) +} + +fn pk_to_record_batch( + codec: &Arc<McmpRowCodec>, + bytes: &[u8], + builders: &mut Vec<Box<dyn MutableVector>>, + pk_schema: &arrow::datatypes::SchemaRef, +) -> Result<RecordBatch> { + let pk_values = codec.decode(bytes).unwrap(); + assert_eq!(builders.len(), pk_values.len()); + + let arrays = builders + .iter_mut() + .zip(pk_values.iter()) + .map(|(builder, pk_value)| { + builder.push_value_ref(pk_value.as_value_ref()); + builder.to_vector().to_arrow_array() + }) + .collect(); + + RecordBatch::try_new(pk_schema.clone(), arrays).context(NewRecordBatchSnafu) } /// A `Series` holds a list of field values of some given primary key. @@ -461,12 +624,12 @@ impl Values { /// keeps only the latest row for the same timestamp. pub fn to_batch( &self, - primary_key: &[u8], + primary_key: &PrimaryKey, metadata: &RegionMetadataRef, projection: &HashSet<ColumnId>, ) -> Result<Batch> { let builder = BatchBuilder::with_required_columns( - primary_key.to_vec(), + primary_key.bytes.clone(), self.timestamp.clone(), self.sequence.clone(), self.op_type.clone(), @@ -699,7 +862,11 @@ mod tests { }; let batch = values - .to_batch(b"test", &schema, &[0, 1, 2, 3, 4].into_iter().collect()) + .to_batch( + &PrimaryKey::new(b"test".to_vec()), + &schema, + &[0, 1, 2, 3, 4].into_iter().collect(), + ) .unwrap(); check_value( &batch, @@ -784,7 +951,13 @@ mod tests { #[test] fn test_series_set_concurrency() { let schema = schema_for_test(); - let set = Arc::new(SeriesSet::new(schema.clone())); + let row_codec = Arc::new(McmpRowCodec::new( + schema + .primary_key_columns() + .map(|c| SortField::new(c.column_schema.data_type.clone())) + .collect(), + )); + let set = Arc::new(SeriesSet::new(schema.clone(), row_codec)); let concurrency = 32; let pk_num = concurrency * 2; @@ -795,7 +968,7 @@ mod tests { for j in i * 100..(i + 1) * 100 { let pk = j % pk_num; let primary_key = format!("pk-{}", pk).as_bytes().to_vec(); - let (series, _) = set.get_or_add_series(primary_key); + let (series, _) = set.get_or_add_series(PrimaryKey::new(primary_key)); let mut guard = series.write().unwrap(); guard.push( ts_value_ref(j as i64), @@ -818,7 +991,7 @@ mod tests { for i in 0..pk_num { let pk = format!("pk-{}", i).as_bytes().to_vec(); - let (series, _) = set.get_or_add_series(pk); + let (series, _) = set.get_or_add_series(PrimaryKey::new(pk)); let mut guard = series.write().unwrap(); let values = guard.compact(&schema).unwrap(); timestamps.extend(values.sequence.iter_data().map(|v| v.unwrap() as i64)); @@ -866,7 +1039,7 @@ mod tests { .map(|kv| kv.timestamp().as_timestamp().unwrap().unwrap().value()) .collect::<HashSet<_>>(); - let iter = memtable.iter(None, &[]); + let iter = memtable.iter(None, None); let read = iter .flat_map(|batch| { batch @@ -892,7 +1065,7 @@ mod tests { let memtable = TimeSeriesMemtable::new(schema, 42, None); memtable.write(&kvs).unwrap(); - let iter = memtable.iter(Some(&[3]), &[]); + let iter = memtable.iter(Some(&[3]), None); let mut v0_all = vec![]; diff --git a/src/mito2/src/read/seq_scan.rs b/src/mito2/src/read/seq_scan.rs index abe95b8db2f6..805e8d8df9e5 100644 --- a/src/mito2/src/read/seq_scan.rs +++ b/src/mito2/src/read/seq_scan.rs @@ -135,8 +135,7 @@ impl SeqScan { // Scans all memtables and SSTs. Builds a merge reader to merge results. let mut builder = MergeReaderBuilder::new(); for mem in &self.memtables { - // TODO(hl): pass filters once memtable supports filter pushdown. - let iter = mem.iter(Some(self.mapper.column_ids()), &[]); + let iter = mem.iter(Some(self.mapper.column_ids()), self.predicate.clone()); builder.push_batch_iter(iter); } for file in &self.files { diff --git a/src/mito2/src/test_util/memtable_util.rs b/src/mito2/src/test_util/memtable_util.rs index 4b6c4142e753..22dca01156e7 100644 --- a/src/mito2/src/test_util/memtable_util.rs +++ b/src/mito2/src/test_util/memtable_util.rs @@ -17,9 +17,9 @@ use std::sync::atomic::{AtomicU32, Ordering}; use std::sync::Arc; -use common_query::logical_plan::Expr; use store_api::metadata::RegionMetadataRef; use store_api::storage::ColumnId; +use table::predicate::Predicate; use crate::error::Result; use crate::memtable::{ @@ -50,7 +50,11 @@ impl Memtable for EmptyMemtable { Ok(()) } - fn iter(&self, _projection: Option<&[ColumnId]>, _filters: &[Expr]) -> BoxedBatchIterator { + fn iter( + &self, + _projection: Option<&[ColumnId]>, + _filters: Option<Predicate>, + ) -> BoxedBatchIterator { Box::new(std::iter::empty()) } diff --git a/src/table/src/predicate.rs b/src/table/src/predicate.rs index e4d5c24b0c6a..cfb3b066a8d6 100644 --- a/src/table/src/predicate.rs +++ b/src/table/src/predicate.rs @@ -15,17 +15,19 @@ use std::sync::Arc; use common_query::logical_plan::{DfExpr, Expr}; -use common_telemetry::{error, warn}; +use common_telemetry::{debug, error, warn}; use common_time::range::TimestampRange; use common_time::timestamp::TimeUnit; use common_time::Timestamp; +use datafusion::arrow::record_batch::RecordBatch; use datafusion::parquet::file::metadata::RowGroupMetaData; use datafusion::physical_optimizer::pruning::{PruningPredicate, PruningStatistics}; -use datafusion_common::ToDFSchema; +use datafusion_common::{ScalarValue, ToDFSchema}; use datafusion_expr::expr::InList; -use datafusion_expr::{Between, BinaryExpr, Operator}; +use datafusion_expr::{Between, BinaryExpr, ColumnarValue, Operator}; use datafusion_physical_expr::execution_props::ExecutionProps; use datafusion_physical_expr::{create_physical_expr, PhysicalExpr}; +use datatypes::arrow::array::BooleanArray; use datatypes::schema::SchemaRef; use datatypes::value::scalar_value_to_timestamp; use snafu::ResultExt; @@ -119,6 +121,39 @@ impl Predicate { res } + /// Prunes primary keys + pub fn prune_primary_key(&self, primary_key: &RecordBatch) -> error::Result<bool> { + for expr in &self.exprs { + // evaluate every filter against primary key + let Ok(eva) = expr.evaluate(primary_key) else { + continue; + }; + let result = match eva { + ColumnarValue::Array(array) => { + let predicate_array = array.as_any().downcast_ref::<BooleanArray>().unwrap(); + predicate_array + .into_iter() + .map(|x| x.unwrap_or(true)) + .next() + .unwrap_or(true) + } + // result was a column + ColumnarValue::Scalar(ScalarValue::Boolean(v)) => v.unwrap_or(true), + _ => { + unreachable!("Unexpected primary key record batch evaluation result: {:?}, primary key: {:?}", eva, primary_key); + } + }; + debug!( + "Evaluate primary key {:?} against filter: {:?}, result: {:?}", + primary_key, expr, result + ); + if !result { + return Ok(false); + } + } + Ok(true) + } + /// Evaluates the predicate against the `stats`. /// Returns a vector of boolean values, among which `false` means the row group can be skipped. pub fn prune_with_stats<S: PruningStatistics>(&self, stats: &S) -> Vec<bool> {
feat
memtable filter push down (#2539)
d931389a4c385a250c83646ac3d7421b0c9cb7b4
2024-11-29 08:36:27
discord9
fix(flow): minor fix about count(*)&sink keyword (#5061)
false
diff --git a/src/flow/src/df_optimizer.rs b/src/flow/src/df_optimizer.rs index bb296cba7079..a6f609274978 100644 --- a/src/flow/src/df_optimizer.rs +++ b/src/flow/src/df_optimizer.rs @@ -23,6 +23,7 @@ use common_error::ext::BoxedError; use common_telemetry::debug; use datafusion::config::ConfigOptions; use datafusion::error::DataFusionError; +use datafusion::optimizer::analyzer::count_wildcard_rule::CountWildcardRule; use datafusion::optimizer::analyzer::type_coercion::TypeCoercion; use datafusion::optimizer::common_subexpr_eliminate::CommonSubexprEliminate; use datafusion::optimizer::optimize_projections::OptimizeProjections; @@ -59,6 +60,7 @@ pub async fn apply_df_optimizer( ) -> Result<datafusion_expr::LogicalPlan, Error> { let cfg = ConfigOptions::new(); let analyzer = Analyzer::with_rules(vec![ + Arc::new(CountWildcardRule::new()), Arc::new(AvgExpandRule::new()), Arc::new(TumbleExpandRule::new()), Arc::new(CheckGroupByRule::new()), diff --git a/src/sql/src/parsers/create_parser.rs b/src/sql/src/parsers/create_parser.rs index 296110f4039f..bb9aadadb703 100644 --- a/src/sql/src/parsers/create_parser.rs +++ b/src/sql/src/parsers/create_parser.rs @@ -259,9 +259,17 @@ impl<'a> ParserContext<'a> { let flow_name = self.intern_parse_table_name()?; - self.parser - .expect_token(&Token::make_keyword(SINK)) - .context(SyntaxSnafu)?; + // make `SINK` case in-sensitive + if let Token::Word(word) = self.parser.peek_token().token + && word.value.eq_ignore_ascii_case(SINK) + { + self.parser.next_token(); + } else { + Err(ParserError::ParserError( + "Expect `SINK` keyword".to_string(), + )) + .context(SyntaxSnafu)? + } self.parser .expect_keyword(Keyword::TO) .context(SyntaxSnafu)?; diff --git a/tests/cases/standalone/common/flow/flow_basic.result b/tests/cases/standalone/common/flow/flow_basic.result index 4c6095d2529c..cc9b4e038b0f 100644 --- a/tests/cases/standalone/common/flow/flow_basic.result +++ b/tests/cases/standalone/common/flow/flow_basic.result @@ -112,6 +112,73 @@ DROP TABLE out_num_cnt_basic; Affected Rows: 0 +-- test count(*) rewrite +CREATE TABLE input_basic ( + number INT, + ts TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + PRIMARY KEY(number), + TIME INDEX(ts) +); + +Affected Rows: 0 + +CREATE FLOW test_wildcard_basic SiNk TO out_basic AS +SELECT + COUNT(*) as wildcard +FROM + input_basic; + +Affected Rows: 0 + +DROP FLOW test_wildcard_basic; + +Affected Rows: 0 + +CREATE FLOW test_wildcard_basic sink TO out_basic AS +SELECT + COUNT(*) as wildcard +FROM + input_basic; + +Affected Rows: 0 + +INSERT INTO + input_basic +VALUES + (23, "2021-07-01 00:00:01.000"), + (24, "2021-07-01 00:00:01.500"); + +Affected Rows: 2 + +-- SQLNESS REPLACE (ADMIN\sFLUSH_FLOW\('\w+'\)\s+\|\n\+-+\+\n\|\s+)[0-9]+\s+\| $1 FLOW_FLUSHED | +ADMIN FLUSH_FLOW('test_wildcard_basic'); + ++-----------------------------------------+ +| ADMIN FLUSH_FLOW('test_wildcard_basic') | ++-----------------------------------------+ +| FLOW_FLUSHED | ++-----------------------------------------+ + +SELECT wildcard FROM out_basic; + ++----------+ +| wildcard | ++----------+ +| 2 | ++----------+ + +DROP FLOW test_wildcard_basic; + +Affected Rows: 0 + +DROP TABLE out_basic; + +Affected Rows: 0 + +DROP TABLE input_basic; + +Affected Rows: 0 + -- test distinct CREATE TABLE distinct_basic ( number INT, diff --git a/tests/cases/standalone/common/flow/flow_basic.sql b/tests/cases/standalone/common/flow/flow_basic.sql index 3a1a53d0edfb..70d7b14157c2 100644 --- a/tests/cases/standalone/common/flow/flow_basic.sql +++ b/tests/cases/standalone/common/flow/flow_basic.sql @@ -61,6 +61,43 @@ DROP TABLE numbers_input_basic; DROP TABLE out_num_cnt_basic; +-- test count(*) rewrite +CREATE TABLE input_basic ( + number INT, + ts TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + PRIMARY KEY(number), + TIME INDEX(ts) +); + +CREATE FLOW test_wildcard_basic SiNk TO out_basic AS +SELECT + COUNT(*) as wildcard +FROM + input_basic; + +DROP FLOW test_wildcard_basic; + +CREATE FLOW test_wildcard_basic sink TO out_basic AS +SELECT + COUNT(*) as wildcard +FROM + input_basic; + +INSERT INTO + input_basic +VALUES + (23, "2021-07-01 00:00:01.000"), + (24, "2021-07-01 00:00:01.500"); + +-- SQLNESS REPLACE (ADMIN\sFLUSH_FLOW\('\w+'\)\s+\|\n\+-+\+\n\|\s+)[0-9]+\s+\| $1 FLOW_FLUSHED | +ADMIN FLUSH_FLOW('test_wildcard_basic'); + +SELECT wildcard FROM out_basic; + +DROP FLOW test_wildcard_basic; +DROP TABLE out_basic; +DROP TABLE input_basic; + -- test distinct CREATE TABLE distinct_basic ( number INT,
fix
minor fix about count(*)&sink keyword (#5061)
09e0e1b246eb55dd3aa05d6cedfb58174f28ea43
2024-06-07 09:17:40
Weny Xu
chore: run fuzz tests with kafka remote wal (#4105)
false
diff --git a/.github/actions/setup-greptimedb-cluster/action.yml b/.github/actions/setup-greptimedb-cluster/action.yml index eaf0032c7715..8fc5acf78262 100644 --- a/.github/actions/setup-greptimedb-cluster/action.yml +++ b/.github/actions/setup-greptimedb-cluster/action.yml @@ -22,6 +22,9 @@ inputs: etcd-endpoints: default: "etcd.etcd-cluster.svc.cluster.local:2379" description: "Etcd endpoints" + values-filename: + default: "with-minio.yaml" + runs: using: composite @@ -57,7 +60,7 @@ runs: greptime/greptimedb-cluster \ --create-namespace \ -n my-greptimedb \ - --values ./.github/actions/setup-greptimedb-cluster/values.yaml \ + --values ./.github/actions/setup-greptimedb-cluster/${{ inputs.values-filename }} \ --wait \ --wait-for-jobs - name: Wait for GreptimeDB diff --git a/.github/actions/setup-greptimedb-cluster/values.yaml b/.github/actions/setup-greptimedb-cluster/with-disk.yaml similarity index 93% rename from .github/actions/setup-greptimedb-cluster/values.yaml rename to .github/actions/setup-greptimedb-cluster/with-disk.yaml index b7ac1eb86e17..2b5b85547651 100644 --- a/.github/actions/setup-greptimedb-cluster/values.yaml +++ b/.github/actions/setup-greptimedb-cluster/with-disk.yaml @@ -15,4 +15,4 @@ frontend: [runtime] read_rt_size = 8 write_rt_size = 8 - bg_rt_size = 8 \ No newline at end of file + bg_rt_size = 8 diff --git a/.github/actions/setup-greptimedb-cluster/with-minio.yaml b/.github/actions/setup-greptimedb-cluster/with-minio.yaml new file mode 100644 index 000000000000..d5ddcddba56a --- /dev/null +++ b/.github/actions/setup-greptimedb-cluster/with-minio.yaml @@ -0,0 +1,34 @@ +meta: + config: |- + [runtime] + read_rt_size = 8 + write_rt_size = 8 + bg_rt_size = 8 + + [datanode] + [datanode.client] + timeout = "60s" +datanode: + config: |- + [runtime] + read_rt_size = 8 + write_rt_size = 8 + bg_rt_size = 8 +frontend: + config: |- + [runtime] + read_rt_size = 8 + write_rt_size = 8 + bg_rt_size = 8 + + [meta_client] + ddl_timeout = "60s" +objectStorage: + s3: + bucket: default + region: us-west-2 + root: test-root + endpoint: http://minio.minio.svc.cluster.local + credentials: + accessKeyId: rootuser + secretAccessKey: rootpass123 diff --git a/.github/actions/setup-greptimedb-cluster/with-remote-wal.yaml b/.github/actions/setup-greptimedb-cluster/with-remote-wal.yaml new file mode 100644 index 000000000000..bf4d3da65c7b --- /dev/null +++ b/.github/actions/setup-greptimedb-cluster/with-remote-wal.yaml @@ -0,0 +1,45 @@ +meta: + config: |- + [runtime] + read_rt_size = 8 + write_rt_size = 8 + bg_rt_size = 8 + + [wal] + provider = "kafka" + broker_endpoints = ["kafka.kafka-cluster.svc.cluster.local:9092"] + num_topics = 3 + + + [datanode] + [datanode.client] + timeout = "60s" +datanode: + config: |- + [runtime] + read_rt_size = 8 + write_rt_size = 8 + bg_rt_size = 8 + + [wal] + provider = "kafka" + broker_endpoints = ["kafka.kafka-cluster.svc.cluster.local:9092"] + linger = "2ms" +frontend: + config: |- + [runtime] + read_rt_size = 8 + write_rt_size = 8 + bg_rt_size = 8 + + [meta_client] + ddl_timeout = "60s" +objectStorage: + s3: + bucket: default + region: us-west-2 + root: test-root + endpoint: http://minio.minio.svc.cluster.local + credentials: + accessKeyId: rootuser + secretAccessKey: rootpass123 diff --git a/.github/actions/setup-kafka-cluster/action.yml b/.github/actions/setup-kafka-cluster/action.yml new file mode 100644 index 000000000000..b8a73394235a --- /dev/null +++ b/.github/actions/setup-kafka-cluster/action.yml @@ -0,0 +1,24 @@ +name: Setup Kafka cluster +description: Deploy Kafka cluster on Kubernetes +inputs: + controller-replicas: + default: 3 + description: "Kafka controller replicas" + namespace: + default: "kafka-cluster" + +runs: + using: composite + steps: + - name: Install Kafka cluster + shell: bash + run: | + helm upgrade \ + --install kafka oci://registry-1.docker.io/bitnamicharts/kafka \ + --set controller.replicaCount=${{ inputs.controller-replicas }} \ + --set controller.resources.requests.cpu=50m \ + --set controller.resources.requests.memory=128Mi \ + --set listeners.controller.protocol=PLAINTEXT \ + --set listeners.client.protocol=PLAINTEXT \ + --create-namespace \ + -n ${{ inputs.namespace }} diff --git a/.github/actions/setup-minio/action.yml b/.github/actions/setup-minio/action.yml new file mode 100644 index 000000000000..89fbe28baa7e --- /dev/null +++ b/.github/actions/setup-minio/action.yml @@ -0,0 +1,24 @@ +name: Setup Minio cluster +description: Deploy Minio cluster on Kubernetes +inputs: + replicas: + default: 1 + description: "replicas" + +runs: + using: composite + steps: + - name: Install Etcd cluster + shell: bash + run: | + helm repo add minio https://charts.min.io/ + helm upgrade --install minio \ + --set resources.requests.memory=128Mi \ + --set replicas=${{ inputs.replicas }} \ + --set mode=standalone \ + --set rootUser=rootuser,rootPassword=rootpass123 \ + --set buckets[0].name=default \ + --set service.port=80,service.targetPort=9000 \ + minio/minio \ + --create-namespace \ + -n minio diff --git a/.github/workflows/develop.yml b/.github/workflows/develop.yml index 52975fa746ca..e12c17b7eb80 100644 --- a/.github/workflows/develop.yml +++ b/.github/workflows/develop.yml @@ -155,7 +155,6 @@ jobs: with: # Shares across multiple jobs shared-key: "fuzz-test-targets" - cache-targets: "false" - name: Set Rust Fuzz shell: bash run: | @@ -203,7 +202,6 @@ jobs: with: # Shares across multiple jobs shared-key: "fuzz-test-targets" - cache-targets: "false" - name: Set Rust Fuzz shell: bash run: | @@ -277,16 +275,35 @@ jobs: version: current distributed-fuzztest: - name: Fuzz Test (Distributed, Disk) + name: Fuzz Test (Distributed, ${{ matrix.mode.name }}, ${{ matrix.target }}) runs-on: ubuntu-latest needs: build-greptime-ci strategy: matrix: target: [ "fuzz_create_table", "fuzz_alter_table", "fuzz_create_database", "fuzz_create_logical_table", "fuzz_alter_logical_table", "fuzz_insert", "fuzz_insert_logical_table" ] + mode: + - name: "Disk" + minio: false + kafka: false + values: "with-disk.yaml" + - name: "Minio" + minio: true + kafka: false + values: "with-minio.yaml" + - name: "Remote WAL" + minio: true + kafka: true + values: "with-remote-wal.yaml" steps: - uses: actions/checkout@v4 - name: Setup Kind uses: ./.github/actions/setup-kind + - if: matrix.mode.minio + name: Setup Minio + uses: ./.github/actions/setup-minio + - if: matrix.mode.kafka + name: Setup Kafka cluser + uses: ./.github/actions/setup-kafka-cluster - name: Setup Etcd cluser uses: ./.github/actions/setup-etcd-cluster # Prepares for fuzz tests @@ -301,7 +318,6 @@ jobs: with: # Shares across multiple jobs shared-key: "fuzz-test-targets" - cache-targets: "false" - name: Set Rust Fuzz shell: bash run: | @@ -327,6 +343,22 @@ jobs: pod -l app.kubernetes.io/instance=etcd \ --timeout=120s \ -n etcd-cluster + - if: matrix.mode.minio + name: Wait for minio + run: | + kubectl wait \ + --for=condition=Ready \ + pod -l app=minio \ + --timeout=120s \ + -n minio + - if: matrix.mode.kafka + name: Wait for kafka + run: | + kubectl wait \ + --for=condition=Ready \ + pod -l app.kubernetes.io/instance=kafka \ + --timeout=120s \ + -n kafka-cluster - name: Print etcd info shell: bash run: kubectl get all --show-labels -n etcd-cluster @@ -335,6 +367,7 @@ jobs: uses: ./.github/actions/setup-greptimedb-cluster with: image-registry: localhost:5001 + values-filename: ${{ matrix.mode.values }} - name: Port forward (mysql) run: | kubectl port-forward service/my-greptimedb-frontend 4002:4002 -n my-greptimedb& @@ -360,7 +393,7 @@ jobs: if: failure() uses: actions/upload-artifact@v4 with: - name: fuzz-tests-kind-logs-${{ matrix.target }} + name: fuzz-tests-kind-logs-${{ matrix.mode.name }}-${{ matrix.target }} path: /tmp/kind retention-days: 3 - name: Delete cluster diff --git a/src/meta-client/src/lib.rs b/src/meta-client/src/lib.rs index f7329b4fb643..fb340c2f4adc 100644 --- a/src/meta-client/src/lib.rs +++ b/src/meta-client/src/lib.rs @@ -21,71 +21,37 @@ pub mod error; // Options for meta client in datanode instance. #[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] +#[serde(default)] pub struct MetaClientOptions { pub metasrv_addrs: Vec<String>, - #[serde(default = "default_timeout")] #[serde(with = "humantime_serde")] pub timeout: Duration, - #[serde(default = "default_heartbeat_timeout")] #[serde(with = "humantime_serde")] pub heartbeat_timeout: Duration, - #[serde(default = "default_ddl_timeout")] #[serde(with = "humantime_serde")] pub ddl_timeout: Duration, - #[serde(default = "default_connect_timeout")] #[serde(with = "humantime_serde")] pub connect_timeout: Duration, pub tcp_nodelay: bool, - #[serde(default = "default_metadata_cache_max_capacity")] pub metadata_cache_max_capacity: u64, - #[serde(default = "default_metadata_cache_ttl")] #[serde(with = "humantime_serde")] pub metadata_cache_ttl: Duration, - #[serde(default = "default_metadata_cache_tti")] #[serde(with = "humantime_serde")] pub metadata_cache_tti: Duration, } -fn default_heartbeat_timeout() -> Duration { - Duration::from_millis(500u64) -} - -fn default_ddl_timeout() -> Duration { - Duration::from_millis(10_000u64) -} - -fn default_connect_timeout() -> Duration { - Duration::from_millis(1_000u64) -} - -fn default_timeout() -> Duration { - Duration::from_millis(3_000u64) -} - -fn default_metadata_cache_max_capacity() -> u64 { - 100_000u64 -} - -fn default_metadata_cache_ttl() -> Duration { - Duration::from_secs(600u64) -} - -fn default_metadata_cache_tti() -> Duration { - Duration::from_secs(300u64) -} - impl Default for MetaClientOptions { fn default() -> Self { Self { metasrv_addrs: vec!["127.0.0.1:3002".to_string()], - timeout: default_timeout(), - heartbeat_timeout: default_heartbeat_timeout(), - ddl_timeout: default_ddl_timeout(), - connect_timeout: default_connect_timeout(), + timeout: Duration::from_millis(3_000u64), + heartbeat_timeout: Duration::from_millis(500u64), + ddl_timeout: Duration::from_millis(10_000u64), + connect_timeout: Duration::from_millis(1_000u64), tcp_nodelay: true, - metadata_cache_max_capacity: default_metadata_cache_max_capacity(), - metadata_cache_ttl: default_metadata_cache_ttl(), - metadata_cache_tti: default_metadata_cache_tti(), + metadata_cache_max_capacity: 100_000u64, + metadata_cache_ttl: Duration::from_secs(600u64), + metadata_cache_tti: Duration::from_secs(300u64), } } }
chore
run fuzz tests with kafka remote wal (#4105)
d363c8ee3c21ea45ca9dea28432cc64891ce4c77
2025-02-28 12:16:48
Ruihang Xia
fix: check physical region before use (#5612)
false
diff --git a/src/metric-engine/src/engine/create.rs b/src/metric-engine/src/engine/create.rs index c7e5ffde98cc..e08a1c5e78a4 100644 --- a/src/metric-engine/src/engine/create.rs +++ b/src/metric-engine/src/engine/create.rs @@ -162,15 +162,38 @@ impl MetricEngineInner { let physical_region_id = validate_create_logical_regions(&requests)?; let data_region_id = utils::to_data_region_id(physical_region_id); + ensure!( + self.state + .read() + .unwrap() + .exist_physical_region(data_region_id), + PhysicalRegionNotFoundSnafu { + region_id: data_region_id, + } + ); + // Filters out the requests that the logical region already exists let requests = { let state = self.state.read().unwrap(); - let logical_region_exists = state.logical_region_exists_filter(data_region_id); - // TODO(weny): log the skipped logical regions - requests - .into_iter() - .filter(|(region_id, _)| !logical_region_exists(region_id)) - .collect::<Vec<_>>() + let mut skipped = Vec::with_capacity(requests.len()); + let mut kept_requests = Vec::with_capacity(requests.len()); + + for (region_id, request) in requests { + if state.is_logical_region_exist(region_id) { + skipped.push(region_id); + } else { + kept_requests.push((region_id, request)); + } + } + + // log skipped regions + if !skipped.is_empty() { + info!( + "Skipped creating logical regions {skipped:?} because they already exist", + skipped = skipped + ); + } + kept_requests }; // Finds new columns to add to physical region diff --git a/src/metric-engine/src/engine/state.rs b/src/metric-engine/src/engine/state.rs index 42975e83e643..19d15acbb8e9 100644 --- a/src/metric-engine/src/engine/state.rs +++ b/src/metric-engine/src/engine/state.rs @@ -83,18 +83,6 @@ pub(crate) struct MetricEngineState { } impl MetricEngineState { - pub fn logical_region_exists_filter( - &self, - physical_region_id: RegionId, - ) -> impl for<'a> Fn(&'a RegionId) -> bool + use<'_> { - let state = self - .physical_region_states() - .get(&physical_region_id) - .unwrap(); - - move |logical_region_id| state.logical_regions().contains(logical_region_id) - } - pub fn add_physical_region( &mut self, physical_region_id: RegionId,
fix
check physical region before use (#5612)
3a996c2f00ef20b434f60c053cdb691c2a6c05f5
2025-01-10 22:35:10
yihong
feat: add set search_path to 'xxx' for pg (#5342)
false
diff --git a/src/operator/src/statement.rs b/src/operator/src/statement.rs index 2d4b2081bc8c..fdc5dc475775 100644 --- a/src/operator/src/statement.rs +++ b/src/operator/src/statement.rs @@ -72,7 +72,9 @@ use table::table_name::TableName; use table::table_reference::TableReference; use table::TableRef; -use self::set::{set_bytea_output, set_datestyle, set_timezone, validate_client_encoding}; +use self::set::{ + set_bytea_output, set_datestyle, set_search_path, set_timezone, validate_client_encoding, +}; use crate::error::{ self, CatalogSnafu, ExecLogicalPlanSnafu, ExternalSnafu, InvalidSqlSnafu, NotSupportedSnafu, PlanStatementSnafu, Result, SchemaNotFoundSnafu, StatementTimeoutSnafu, @@ -408,6 +410,16 @@ impl StatementExecutor { .fail(); } } + "SEARCH_PATH" => { + if query_ctx.channel() == Channel::Postgres { + set_search_path(set_var.value, query_ctx)? + } else { + return NotSupportedSnafu { + feat: format!("Unsupported set variable {}", var_name), + } + .fail(); + } + } _ => { // for postgres, we give unknown SET statements a warning with // success, this is prevent the SET call becoming a blocker diff --git a/src/operator/src/statement/set.rs b/src/operator/src/statement/set.rs index 7b26b7f794d2..6211f1a55482 100644 --- a/src/operator/src/statement/set.rs +++ b/src/operator/src/statement/set.rs @@ -81,6 +81,26 @@ pub fn set_bytea_output(exprs: Vec<Expr>, ctx: QueryContextRef) -> Result<()> { Ok(()) } +pub fn set_search_path(exprs: Vec<Expr>, ctx: QueryContextRef) -> Result<()> { + let search_expr = exprs.first().context(NotSupportedSnafu { + feat: "No search path find in set variable statement", + })?; + match search_expr { + Expr::Value(Value::SingleQuotedString(search_path)) + | Expr::Value(Value::DoubleQuotedString(search_path)) => { + ctx.set_current_schema(&search_path.clone()); + Ok(()) + } + expr => NotSupportedSnafu { + feat: format!( + "Unsupported search path expr {} in set variable statement", + expr + ), + } + .fail(), + } +} + pub fn validate_client_encoding(set: SetVariables) -> Result<()> { let Some((encoding, [])) = set.value.split_first() else { return InvalidSqlSnafu { diff --git a/tests/cases/standalone/common/system/pg_catalog.result b/tests/cases/standalone/common/system/pg_catalog.result index 5cefa8c8b28c..d43c707bcc4a 100644 --- a/tests/cases/standalone/common/system/pg_catalog.result +++ b/tests/cases/standalone/common/system/pg_catalog.result @@ -33,6 +33,34 @@ show search_path; | public | +-------------+ +-- set search_path for pg using schema for now FIXME when support real search_path +create database test; + +Affected Rows: 1 + +-- SQLNESS PROTOCOL POSTGRES +set search_path to 'test'; + +Affected Rows: 0 + +drop database test; + +Affected Rows: 0 + +-- SQLNESS PROTOCOL POSTGRES +set search_path to 'public'; + +Affected Rows: 0 + +-- SQLNESS PROTOCOL POSTGRES +select current_schema(); + ++------------------+ +| current_schema() | ++------------------+ +| public | ++------------------+ + -- make sure all the pg_catalog tables are only visible to postgres select * from pg_catalog.pg_class; diff --git a/tests/cases/standalone/common/system/pg_catalog.sql b/tests/cases/standalone/common/system/pg_catalog.sql index 03c4968d5dec..2a8815cd32c0 100644 --- a/tests/cases/standalone/common/system/pg_catalog.sql +++ b/tests/cases/standalone/common/system/pg_catalog.sql @@ -13,6 +13,17 @@ select current_schema(); -- SQLNESS PROTOCOL POSTGRES show search_path; +-- set search_path for pg using schema for now FIXME when support real search_path +create database test; +-- SQLNESS PROTOCOL POSTGRES +set search_path to 'test'; +drop database test; +-- SQLNESS PROTOCOL POSTGRES +set search_path to 'public'; + +-- SQLNESS PROTOCOL POSTGRES +select current_schema(); + -- make sure all the pg_catalog tables are only visible to postgres select * from pg_catalog.pg_class; select * from pg_catalog.pg_namespace;
feat
add set search_path to 'xxx' for pg (#5342)
e352fb449504ece91b28760af2005b204cb0db31
2023-09-26 17:42:08
Ruihang Xia
fix: check for table scan before expanding (#2491)
false
diff --git a/src/query/src/dist_plan/analyzer.rs b/src/query/src/dist_plan/analyzer.rs index 45c77edb81a1..7807913e74b5 100644 --- a/src/query/src/dist_plan/analyzer.rs +++ b/src/query/src/dist_plan/analyzer.rs @@ -14,7 +14,6 @@ use std::sync::Arc; -use common_telemetry::info; use datafusion::datasource::DefaultTableSource; use datafusion::error::Result as DfResult; use datafusion_common::config::ConfigOptions; @@ -46,7 +45,9 @@ impl AnalyzerRule for DistPlannerAnalyzer { ) -> datafusion_common::Result<LogicalPlan> { let plan = plan.transform(&Self::inspect_plan_with_subquery)?; let mut rewriter = PlanRewriter::default(); - plan.rewrite(&mut rewriter) + let result = plan.rewrite(&mut rewriter)?; + + Ok(result) } } @@ -138,10 +139,6 @@ impl PlanRewriter { /// Return true if should stop and expand. The input plan is the parent node of current node fn should_expand(&mut self, plan: &LogicalPlan) -> bool { if DFLogicalSubstraitConvertor.encode(plan).is_err() { - info!( - "substrait error: {:?}", - DFLogicalSubstraitConvertor.encode(plan) - ); return true; } @@ -251,6 +248,13 @@ impl TreeNodeRewriter for PlanRewriter { return Ok(node); } + // only expand when the leaf is table scan + if node.inputs().is_empty() && !matches!(node, LogicalPlan::TableScan(_)) { + self.set_expanded(); + self.pop_stack(); + return Ok(node); + } + self.maybe_set_partitions(&node); let Some(parent) = self.get_parent() else { diff --git a/tests-integration/tests/sql.rs b/tests-integration/tests/sql.rs index a8efea7da026..f8e81230eb3c 100644 --- a/tests-integration/tests/sql.rs +++ b/tests-integration/tests/sql.rs @@ -54,13 +54,10 @@ macro_rules! sql_tests { $service, test_mysql_auth, - // ignore: https://github.com/GreptimeTeam/greptimedb/issues/2445 - // test_mysql_crud, + test_mysql_crud, test_postgres_auth, - // ignore: https://github.com/GreptimeTeam/greptimedb/issues/2445 - // test_postgres_crud, - // ignore: https://github.com/GreptimeTeam/greptimedb/issues/2445 - // test_postgres_parameter_inference, + test_postgres_crud, + test_postgres_parameter_inference, ); )* }; @@ -123,7 +120,6 @@ pub async fn test_mysql_auth(store_type: StorageType) { guard.remove_all().await; } -#[allow(dead_code)] pub async fn test_mysql_crud(store_type: StorageType) { common_telemetry::init_default_ut_logging(); @@ -135,12 +131,12 @@ pub async fn test_mysql_crud(store_type: StorageType) { .await .unwrap(); - assert!(sqlx::query( + sqlx::query( "create table demo(i bigint, ts timestamp time index, d date, dt datetime, b blob)", ) .execute(&pool) .await - .is_ok()); + .unwrap(); for i in 0..10 { let dt: DateTime<Utc> = DateTime::from_naive_utc_and_offset( NaiveDateTime::from_timestamp_opt(60, i).unwrap(), @@ -149,7 +145,7 @@ pub async fn test_mysql_crud(store_type: StorageType) { let d = NaiveDate::from_yo_opt(2015, 100).unwrap(); let hello = format!("hello{i}"); let bytes = hello.as_bytes(); - assert!(sqlx::query("insert into demo values(?, ?, ?, ?, ?)") + sqlx::query("insert into demo values(?, ?, ?, ?, ?)") .bind(i) .bind(i) .bind(d) @@ -157,7 +153,7 @@ pub async fn test_mysql_crud(store_type: StorageType) { .bind(bytes) .execute(&pool) .await - .is_ok()); + .unwrap(); } let rows = sqlx::query("select i, d, dt, b from demo") @@ -270,7 +266,6 @@ pub async fn test_postgres_auth(store_type: StorageType) { guard.remove_all().await; } -#[allow(dead_code)] pub async fn test_postgres_crud(store_type: StorageType) { let (addr, mut guard, fe_pg_server) = setup_pg_server(store_type, "sql_crud").await; @@ -347,7 +342,6 @@ pub async fn test_postgres_crud(store_type: StorageType) { guard.remove_all().await; } -#[allow(dead_code)] pub async fn test_postgres_parameter_inference(store_type: StorageType) { let (addr, mut guard, fe_pg_server) = setup_pg_server(store_type, "sql_inference").await;
fix
check for table scan before expanding (#2491)
20b7f907b2ea28668e422ae1e5744510c7bdfd56
2023-08-25 08:37:44
Ruihang Xia
fix: promql planner should clear its states on each selector (#2247)
false
diff --git a/src/promql/src/planner.rs b/src/promql/src/planner.rs index bf761dca433f..bf8c8eeb6c4d 100644 --- a/src/promql/src/planner.rs +++ b/src/promql/src/planner.rs @@ -98,6 +98,16 @@ impl PromPlannerContext { ..Default::default() } } + + /// Reset all planner states + fn reset(&mut self) { + self.table_name = None; + self.time_index_column = None; + self.field_columns = vec![]; + self.tag_columns = vec![]; + self.field_column_matcher = None; + self.range = None; + } } pub struct PromPlanner { @@ -473,6 +483,9 @@ impl PromPlanner { /// Extract metric name from `__name__` matcher and set it into [PromPlannerContext]. /// Returns a new [Matchers] that doesn't contains metric name matcher. /// + /// Each call to this function means new selector is started. Thus the context will be reset + /// at first. + /// /// Name rule: /// - if `name` is some, then the matchers MUST NOT contains `__name__` matcher. /// - if `name` is none, then the matchers MAY contains NONE OR MULTIPLE `__name__` matchers. @@ -481,6 +494,7 @@ impl PromPlanner { label_matchers: &Matchers, name: &Option<String>, ) -> Result<Matchers> { + self.ctx.reset(); let metric_name; if let Some(name) = name.clone() { metric_name = Some(name); @@ -600,7 +614,14 @@ impl PromPlanner { let _ = result_set.remove(&col); } - self.ctx.field_columns = result_set.iter().cloned().collect(); + // mask the field columns in context using computed result set + self.ctx.field_columns = self + .ctx + .field_columns + .drain(..) + .filter(|col| result_set.contains(col)) + .collect(); + let exprs = result_set .into_iter() .map(|col| DfExpr::Column(col.into())) diff --git a/tests/cases/standalone/common/tql/binary_operator.result b/tests/cases/standalone/common/tql/binary_operator.result index bce78de11c10..4f0a022c483d 100644 --- a/tests/cases/standalone/common/tql/binary_operator.result +++ b/tests/cases/standalone/common/tql/binary_operator.result @@ -37,3 +37,52 @@ drop table data; Affected Rows: 1 +-- Binary operator on table with multiple field columns +create table data (ts timestamp time index, val1 double, val2 double, val3 double); + +Affected Rows: 0 + +insert into data values (0, 1, 100, 10000), (10000, 2, 200, 20000), (20000, 3, 300, 30000); + +Affected Rows: 3 + +-- SQLNESS SORT_RESULT 3 1 +tql eval (0, 30, '10s'), data / data; + ++---------------------+-----------------------+-----------------------+-----------------------+ +| ts | data.val1 / data.val1 | data.val2 / data.val2 | data.val3 / data.val3 | ++---------------------+-----------------------+-----------------------+-----------------------+ +| 1970-01-01T00:00:00 | 1.0 | 1.0 | 1.0 | +| 1970-01-01T00:00:10 | 1.0 | 1.0 | 1.0 | +| 1970-01-01T00:00:20 | 1.0 | 1.0 | 1.0 | +| 1970-01-01T00:00:30 | 1.0 | 1.0 | 1.0 | ++---------------------+-----------------------+-----------------------+-----------------------+ + +-- SQLNESS SORT_RESULT 3 1 +tql eval (0, 30, '10s'), data{__field__="val1"} + data{__field__="val2"}; + ++---------------------+-----------------------+ +| ts | data.val1 + data.val2 | ++---------------------+-----------------------+ +| 1970-01-01T00:00:00 | 101.0 | +| 1970-01-01T00:00:10 | 202.0 | +| 1970-01-01T00:00:20 | 303.0 | +| 1970-01-01T00:00:30 | 303.0 | ++---------------------+-----------------------+ + +-- SQLNESS SORT_RESULT 3 1 +tql eval (0, 30, '10s'), data{__field__="val1", __field__="val2"} + data{__field__="val2", __field__="val3"}; + ++---------------------+-----------------------+-----------------------+ +| ts | data.val1 + data.val2 | data.val2 + data.val3 | ++---------------------+-----------------------+-----------------------+ +| 1970-01-01T00:00:00 | 101.0 | 10100.0 | +| 1970-01-01T00:00:10 | 202.0 | 20200.0 | +| 1970-01-01T00:00:20 | 303.0 | 30300.0 | +| 1970-01-01T00:00:30 | 303.0 | 30300.0 | ++---------------------+-----------------------+-----------------------+ + +drop table data; + +Affected Rows: 1 + diff --git a/tests/cases/standalone/common/tql/binary_operator.sql b/tests/cases/standalone/common/tql/binary_operator.sql index 82c2682fbfb9..2b749711ad91 100644 --- a/tests/cases/standalone/common/tql/binary_operator.sql +++ b/tests/cases/standalone/common/tql/binary_operator.sql @@ -9,3 +9,20 @@ tql eval (0, 30, '10s'), data + (1 < bool 2); tql eval (0, 30, '10s'), data + (1 > bool 2); drop table data; + +-- Binary operator on table with multiple field columns + +create table data (ts timestamp time index, val1 double, val2 double, val3 double); + +insert into data values (0, 1, 100, 10000), (10000, 2, 200, 20000), (20000, 3, 300, 30000); + +-- SQLNESS SORT_RESULT 3 1 +tql eval (0, 30, '10s'), data / data; + +-- SQLNESS SORT_RESULT 3 1 +tql eval (0, 30, '10s'), data{__field__="val1"} + data{__field__="val2"}; + +-- SQLNESS SORT_RESULT 3 1 +tql eval (0, 30, '10s'), data{__field__="val1", __field__="val2"} + data{__field__="val2", __field__="val3"}; + +drop table data;
fix
promql planner should clear its states on each selector (#2247)
863ee608ca8e49c0d7e82d21e66e5125a8714040
2024-04-30 11:50:23
shuiyisong
chore: adding Grafana config for cluster monitor (#3781)
false
diff --git a/grafana/README.md b/grafana/README.md index 0fc62f388b9b..264ee23c5771 100644 --- a/grafana/README.md +++ b/grafana/README.md @@ -7,4 +7,60 @@ Status notify: we are still working on this config. It's expected to change freq # How to use +## `greptimedb.json` + Open Grafana Dashboard page, choose `New` -> `Import`. And upload `greptimedb.json` file. + +## `greptimedb-cluster.json` + +This cluster dashboard provides a comprehensive view of incoming requests, response statuses, and internal activities such as flush and compaction, with a layered structure from frontend to datanode. Designed with a focus on alert functionality, its primary aim is to highlight any anomalies in metrics, allowing users to quickly pinpoint the cause of errors. + +We use Prometheus to scrape off metrics from nodes in GreptimeDB cluster, Grafana to visualize the diagram. Any compatible stack should work too. + +__Note__: This dashboard is still in an early stage of development. Any issue or advice on improvement is welcomed. + +### Configuration + +Please ensure the following configuration before importing the dashboard into Grafana. + +__1. Prometheus scrape config__ + +Assign `greptime_pod` label to each host target. We use this label to identify each node instance. + +```yml +# example config +# only to indicate how to assign labels to each target +# modify yours accordingly +scrape_configs: + - job_name: metasrv + static_configs: + - targets: ['<ip>:<port>'] + labels: + greptime_pod: metasrv + + - job_name: datanode + static_configs: + - targets: ['<ip>:<port>'] + labels: + greptime_pod: datanode1 + - targets: ['<ip>:<port>'] + labels: + greptime_pod: datanode2 + - targets: ['<ip>:<port>'] + labels: + greptime_pod: datanode3 + + - job_name: frontend + static_configs: + - targets: ['<ip>:<port>'] + labels: + greptime_pod: frontend +``` + +__2. Grafana config__ + +Create a Prometheus data source in Grafana before using this dashboard. We use `datasource` as a variable in Grafana dashboard so that multiple environments are supported. + +### Usage + +Use `datasource` or `greptime_pod` on the upper-left corner to filter data from certain node. diff --git a/grafana/greptimedb-cluster.json b/grafana/greptimedb-cluster.json new file mode 100644 index 000000000000..1e2473fda302 --- /dev/null +++ b/grafana/greptimedb-cluster.json @@ -0,0 +1,4862 @@ +{ + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": { + "type": "grafana", + "uid": "-- Grafana --" + }, + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "target": { + "limit": 100, + "matchAny": false, + "tags": [], + "type": "dashboard" + }, + "type": "dashboard" + } + ] + }, + "description": "", + "editable": true, + "fiscalYearStartMonth": 0, + "graphTooltip": 1, + "id": 1, + "links": [], + "liveNow": false, + "panels": [ + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 155, + "panels": [], + "title": "Frontend Entry Middleware", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "points", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "s" + }, + "overrides": [ + { + "matcher": { + "id": "byRegexp", + "options": ".*?-qps" + }, + "properties": [ + { + "id": "custom.drawStyle", + "value": "line" + }, + { + "id": "custom.axisPlacement", + "value": "right" + }, + { + "id": "unit", + "value": "ops" + }, + { + "id": "custom.stacking", + "value": { + "group": "A", + "mode": "none" + } + } + ] + }, + { + "matcher": { + "id": "byValue", + "options": { + "op": "gte", + "reducer": "allIsZero", + "value": 0 + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": true, + "tooltip": true, + "viz": true + } + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 1 + }, + "id": 152, + "options": { + "legend": { + "calcs": [ + "last" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "disableTextWrap": false, + "editorMode": "code", + "exemplar": false, + "expr": "histogram_quantile(0.99, sum by(greptime_pod, le, path, code) (rate(greptime_servers_grpc_requests_elapsed_bucket{greptime_pod=~\"$greptime_pod\"}[$__rate_interval])))", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{greptime_pod}}-{{path}}-{{code}}-p99", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "disableTextWrap": false, + "editorMode": "code", + "exemplar": false, + "expr": "sum by(greptime_pod, path, code) (rate(greptime_servers_grpc_requests_elapsed_count{greptime_pod=~\"$greptime_pod\"}[$__rate_interval]))", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "interval": "$__rate_interval", + "legendFormat": "{{greptime_pod}}-{{path}}-{{code}}-qps", + "range": true, + "refId": "B", + "useBackend": false + } + ], + "title": "gRPC middleware", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "points", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "s" + }, + "overrides": [ + { + "matcher": { + "id": "byRegexp", + "options": ".*?-qps" + }, + "properties": [ + { + "id": "custom.drawStyle", + "value": "line" + }, + { + "id": "custom.axisPlacement", + "value": "right" + }, + { + "id": "unit", + "value": "ops" + }, + { + "id": "custom.stacking", + "value": { + "group": "A", + "mode": "none" + } + } + ] + }, + { + "matcher": { + "id": "byValue", + "options": { + "op": "gte", + "reducer": "allIsZero", + "value": 0 + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": true, + "tooltip": true, + "viz": true + } + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 1 + }, + "id": 154, + "options": { + "legend": { + "calcs": [ + "last" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true, + "sortBy": "Name", + "sortDesc": false + }, + "tooltip": { + "mode": "multi", + "sort": "desc" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "disableTextWrap": false, + "editorMode": "code", + "exemplar": false, + "expr": "histogram_quantile(0.99, sum by(greptime_pod, le, path, method, code) (rate(greptime_servers_http_requests_elapsed_bucket{greptime_pod=~\"$greptime_pod\",path!~\"/health|/metrics\"}[$__rate_interval])))", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{greptime_pod}}-{{path}}-{{method}}-{{code}}-p99", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "disableTextWrap": false, + "editorMode": "code", + "exemplar": false, + "expr": "sum by(greptime_pod, path, method, code) (rate(greptime_servers_http_requests_elapsed_count{greptime_pod=~\"$greptime_pod\",path!~\"/health|/metrics\"}[$__rate_interval]))", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "interval": "$__rate_interval", + "legendFormat": "{{greptime_pod}}-{{path}}-{{method}}-{{code}}-qps", + "range": true, + "refId": "B", + "useBackend": false + } + ], + "title": "HTTP middleware", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "points", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "s" + }, + "overrides": [ + { + "matcher": { + "id": "byRegexp", + "options": ".*?-qps" + }, + "properties": [ + { + "id": "custom.drawStyle", + "value": "line" + }, + { + "id": "custom.axisPlacement", + "value": "right" + }, + { + "id": "unit", + "value": "ops" + }, + { + "id": "custom.stacking", + "value": { + "group": "A", + "mode": "none" + } + } + ] + }, + { + "matcher": { + "id": "byValue", + "options": { + "op": "gte", + "reducer": "allIsZero", + "value": 0 + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": true, + "tooltip": true, + "viz": true + } + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 9 + }, + "id": 156, + "options": { + "legend": { + "calcs": [ + "last" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true, + "sortBy": "Name", + "sortDesc": true + }, + "tooltip": { + "mode": "multi", + "sort": "desc" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "disableTextWrap": false, + "editorMode": "code", + "exemplar": false, + "expr": "histogram_quantile(0.99, sum by(greptime_pod, le, subprotocol, db) (rate(greptime_servers_mysql_query_elapsed_bucket{greptime_pod=~\"$greptime_pod\"}[$__rate_interval])))", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{greptime_pod}}-{{subprotocol}}-{{db}}-p99", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "disableTextWrap": false, + "editorMode": "code", + "exemplar": false, + "expr": "sum by(greptime_pod, subprotocol, db) (rate(greptime_servers_mysql_query_elapsed_count{greptime_pod=~\"$greptime_pod\"}[$__rate_interval]))", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "interval": "$__rate_interval", + "legendFormat": "{{greptime_pod}}-{{subprotocol}}-{{db}}-qps", + "range": true, + "refId": "B", + "useBackend": false + } + ], + "title": "MySQL per DB", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "points", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "s" + }, + "overrides": [ + { + "matcher": { + "id": "byRegexp", + "options": ".*?-qps" + }, + "properties": [ + { + "id": "custom.drawStyle", + "value": "line" + }, + { + "id": "custom.axisPlacement", + "value": "right" + }, + { + "id": "unit", + "value": "ops" + }, + { + "id": "custom.stacking", + "value": { + "group": "A", + "mode": "none" + } + } + ] + }, + { + "matcher": { + "id": "byValue", + "options": { + "op": "gte", + "reducer": "allIsZero", + "value": 0 + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": true, + "tooltip": true, + "viz": true + } + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 9 + }, + "id": 157, + "options": { + "legend": { + "calcs": [ + "last" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true, + "sortBy": "Name", + "sortDesc": true + }, + "tooltip": { + "mode": "multi", + "sort": "desc" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "disableTextWrap": false, + "editorMode": "code", + "exemplar": false, + "expr": "histogram_quantile(0.99, sum by(greptime_pod, le, subprotocol, db) (rate(greptime_servers_postgres_query_elapsed_bucket{greptime_pod=~\"$greptime_pod\"}[$__rate_interval])))", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{greptime_pod}}-{{subprotocol}}-{{db}}-p99", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "disableTextWrap": false, + "editorMode": "code", + "exemplar": false, + "expr": "sum by(greptime_pod, subprotocol, db) (rate(greptime_servers_postgres_query_elapsed_count{greptime_pod=~\"$greptime_pod\"}[$__rate_interval]))", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "interval": "$__rate_interval", + "legendFormat": "{{greptime_pod}}-{{subprotocol}}-{{db}}-qps", + "range": true, + "refId": "B", + "useBackend": false + } + ], + "title": "PostgreSQL per DB", + "type": "timeseries" + }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 17 + }, + "id": 158, + "panels": [], + "title": "Frontend HTTP per DB", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "points", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "s" + }, + "overrides": [ + { + "matcher": { + "id": "byRegexp", + "options": ".*?-qps" + }, + "properties": [ + { + "id": "custom.drawStyle", + "value": "line" + }, + { + "id": "custom.axisPlacement", + "value": "right" + }, + { + "id": "unit", + "value": "ops" + }, + { + "id": "custom.stacking", + "value": { + "group": "A", + "mode": "none" + } + } + ] + }, + { + "matcher": { + "id": "byValue", + "options": { + "op": "gte", + "reducer": "allIsZero", + "value": 0 + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": true, + "tooltip": true, + "viz": true + } + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 8, + "x": 0, + "y": 18 + }, + "id": 159, + "options": { + "legend": { + "calcs": [ + "last" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true, + "sortBy": "Name", + "sortDesc": false + }, + "tooltip": { + "mode": "multi", + "sort": "desc" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "disableTextWrap": false, + "editorMode": "code", + "exemplar": false, + "expr": "histogram_quantile(0.99, sum by(greptime_pod, le, db) (rate(greptime_servers_http_sql_elapsed_bucket{greptime_pod=~\"$greptime_pod\"}[$__rate_interval])))", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{greptime_pod}}-{{db}}-p99", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "disableTextWrap": false, + "editorMode": "code", + "exemplar": false, + "expr": "sum by(greptime_pod, db) (rate(greptime_servers_http_sql_elapsed_count{greptime_pod=~\"$greptime_pod\"}[$__rate_interval]))", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "interval": "$__rate_interval", + "legendFormat": "{{greptime_pod}}-{{db}}-qps", + "range": true, + "refId": "B", + "useBackend": false + } + ], + "title": "HTTP sql per DB", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "points", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "s" + }, + "overrides": [ + { + "matcher": { + "id": "byRegexp", + "options": ".*?-qps" + }, + "properties": [ + { + "id": "custom.drawStyle", + "value": "line" + }, + { + "id": "custom.axisPlacement", + "value": "right" + }, + { + "id": "unit", + "value": "ops" + }, + { + "id": "custom.stacking", + "value": { + "group": "A", + "mode": "none" + } + } + ] + }, + { + "matcher": { + "id": "byValue", + "options": { + "op": "gte", + "reducer": "allIsZero", + "value": 0 + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": true, + "tooltip": true, + "viz": true + } + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 8, + "x": 8, + "y": 18 + }, + "id": 160, + "options": { + "legend": { + "calcs": [ + "last" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true, + "sortBy": "Name", + "sortDesc": false + }, + "tooltip": { + "mode": "multi", + "sort": "desc" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "exemplar": false, + "expr": "histogram_quantile(0.99, sum by(greptime_pod, le, db) (rate(greptime_servers_http_promql_elapsed_bucket{greptime_pod=~\"$greptime_pod\"}[$__rate_interval])))", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{greptime_pod}}-{{db}}-p99", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "exemplar": false, + "expr": "sum by(greptime_pod, db) (rate(greptime_servers_http_promql_elapsed_count{greptime_pod=~\"$greptime_pod\"}[$__rate_interval]))", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "interval": "$__rate_interval", + "legendFormat": "{{greptime_pod}}-{{db}}-qps", + "range": true, + "refId": "B", + "useBackend": false + } + ], + "title": "HTTP promql per DB", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "points", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "s" + }, + "overrides": [ + { + "matcher": { + "id": "byRegexp", + "options": ".*?-qps" + }, + "properties": [ + { + "id": "custom.drawStyle", + "value": "line" + }, + { + "id": "custom.axisPlacement", + "value": "right" + }, + { + "id": "unit", + "value": "ops" + }, + { + "id": "custom.stacking", + "value": { + "group": "A", + "mode": "none" + } + } + ] + }, + { + "matcher": { + "id": "byValue", + "options": { + "op": "gte", + "reducer": "allIsZero", + "value": 0 + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": true, + "tooltip": true, + "viz": true + } + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 8, + "x": 16, + "y": 18 + }, + "id": 161, + "options": { + "legend": { + "calcs": [ + "last" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true, + "sortBy": "Name", + "sortDesc": false + }, + "tooltip": { + "mode": "multi", + "sort": "desc" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "exemplar": false, + "expr": "histogram_quantile(0.99, sum by(greptime_pod, le, db) (rate(greptime_servers_http_influxdb_write_elapsed_bucket{greptime_pod=~\"$greptime_pod\"}[$__rate_interval])))", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{greptime_pod}}-{{db}}-p99", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "exemplar": false, + "expr": "sum by(greptime_pod, db) (rate(greptime_servers_http_influxdb_write_elapsed_count{greptime_pod=~\"$greptime_pod\"}[$__rate_interval]))", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "interval": "$__rate_interval", + "legendFormat": "{{greptime_pod}}-{{db}}-qps", + "range": true, + "refId": "B", + "useBackend": false + } + ], + "title": "HTTP influxdb per DB", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "points", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "s" + }, + "overrides": [ + { + "matcher": { + "id": "byRegexp", + "options": ".*?-qps" + }, + "properties": [ + { + "id": "custom.drawStyle", + "value": "line" + }, + { + "id": "custom.axisPlacement", + "value": "right" + }, + { + "id": "unit", + "value": "ops" + }, + { + "id": "custom.stacking", + "value": { + "group": "A", + "mode": "none" + } + } + ] + }, + { + "matcher": { + "id": "byValue", + "options": { + "op": "gte", + "reducer": "allIsZero", + "value": 0 + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": true, + "tooltip": true, + "viz": true + } + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 6, + "x": 0, + "y": 26 + }, + "id": 162, + "options": { + "legend": { + "calcs": [ + "last" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true, + "sortBy": "Name", + "sortDesc": false + }, + "tooltip": { + "mode": "multi", + "sort": "desc" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "exemplar": false, + "expr": "histogram_quantile(0.99, sum by(greptime_pod, le, db) (rate(greptime_servers_http_prometheus_write_elapsed_bucket{greptime_pod=~\"$greptime_pod\"}[$__rate_interval])))", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{greptime_pod}}-{{db}}-p99", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "exemplar": false, + "expr": "sum by(greptime_pod, db) (rate(greptime_servers_http_prometheus_write_elapsed_count{greptime_pod=~\"$greptime_pod\"}[$__rate_interval]))", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "interval": "$__rate_interval", + "legendFormat": "{{greptime_pod}}-{{db}}-qps", + "range": true, + "refId": "B", + "useBackend": false + } + ], + "title": "HTTP prom store write per DB", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "points", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "s" + }, + "overrides": [ + { + "matcher": { + "id": "byRegexp", + "options": ".*?-qps" + }, + "properties": [ + { + "id": "custom.drawStyle", + "value": "line" + }, + { + "id": "custom.axisPlacement", + "value": "right" + }, + { + "id": "unit", + "value": "ops" + }, + { + "id": "custom.stacking", + "value": { + "group": "A", + "mode": "none" + } + } + ] + }, + { + "matcher": { + "id": "byValue", + "options": { + "op": "gte", + "reducer": "allIsZero", + "value": 0 + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": true, + "tooltip": true, + "viz": true + } + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 6, + "x": 6, + "y": 26 + }, + "id": 183, + "options": { + "legend": { + "calcs": [ + "last" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true, + "sortBy": "Name", + "sortDesc": false + }, + "tooltip": { + "mode": "multi", + "sort": "desc" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "exemplar": false, + "expr": "histogram_quantile(0.99, sum by(greptime_pod, le, db) (rate(greptime_servers_http_prometheus_read_elapsed_bucket{greptime_pod=~\"$greptime_pod\"}[$__rate_interval])))", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{greptime_pod}}-{{db}}-p99", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "exemplar": false, + "expr": "sum by(greptime_pod, db) (rate(greptime_servers_http_prometheus_read_elapsed_count{greptime_pod=~\"$greptime_pod\"}[$__rate_interval]))", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "interval": "$__rate_interval", + "legendFormat": "{{greptime_pod}}-{{db}}-qps", + "range": true, + "refId": "B", + "useBackend": false + } + ], + "title": "HTTP prom store read per DB", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "points", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "s" + }, + "overrides": [ + { + "matcher": { + "id": "byRegexp", + "options": ".*?-qps" + }, + "properties": [ + { + "id": "custom.drawStyle", + "value": "line" + }, + { + "id": "custom.axisPlacement", + "value": "right" + }, + { + "id": "unit", + "value": "ops" + }, + { + "id": "custom.stacking", + "value": { + "group": "A", + "mode": "none" + } + } + ] + }, + { + "matcher": { + "id": "byValue", + "options": { + "op": "gte", + "reducer": "allIsZero", + "value": 0 + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": true, + "tooltip": true, + "viz": true + } + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 6, + "x": 12, + "y": 26 + }, + "id": 184, + "options": { + "legend": { + "calcs": [ + "last" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true, + "sortBy": "Name", + "sortDesc": false + }, + "tooltip": { + "mode": "multi", + "sort": "desc" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "exemplar": false, + "expr": "histogram_quantile(0.99, sum by(greptime_pod, le, db) (rate(greptime_servers_http_otlp_metrics_elapsed_bucket{greptime_pod=~\"$greptime_pod\"}[$__rate_interval])))", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{greptime_pod}}-{{db}}-p99", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "exemplar": false, + "expr": "sum by(greptime_pod, db) (rate(greptime_servers_http_otlp_metrics_elapsed_count{greptime_pod=~\"$greptime_pod\"}[$__rate_interval]))", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "interval": "$__rate_interval", + "legendFormat": "{{greptime_pod}}-{{db}}-qps", + "range": true, + "refId": "B", + "useBackend": false + } + ], + "title": "HTTP otlp metrics per DB", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "points", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "s" + }, + "overrides": [ + { + "matcher": { + "id": "byRegexp", + "options": ".*?-qps" + }, + "properties": [ + { + "id": "custom.drawStyle", + "value": "line" + }, + { + "id": "custom.axisPlacement", + "value": "right" + }, + { + "id": "unit", + "value": "ops" + }, + { + "id": "custom.stacking", + "value": { + "group": "A", + "mode": "none" + } + } + ] + }, + { + "matcher": { + "id": "byValue", + "options": { + "op": "gte", + "reducer": "allIsZero", + "value": 0 + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": true, + "tooltip": true, + "viz": true + } + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 6, + "x": 18, + "y": 26 + }, + "id": 185, + "options": { + "legend": { + "calcs": [ + "last" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true, + "sortBy": "Name", + "sortDesc": false + }, + "tooltip": { + "mode": "multi", + "sort": "desc" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "exemplar": false, + "expr": "histogram_quantile(0.99, sum by(greptime_pod, le, db) (rate(greptime_servers_http_otlp_traces_elapsed_bucket{greptime_pod=~\"$greptime_pod\"}[$__rate_interval])))", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{greptime_pod}}-{{db}}-p99", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "exemplar": false, + "expr": "sum by(greptime_pod, db) (rate(greptime_servers_http_otlp_traces_elapsed_count{greptime_pod=~\"$greptime_pod\"}[$__rate_interval]))", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "interval": "$__rate_interval", + "legendFormat": "{{greptime_pod}}-{{db}}-qps", + "range": true, + "refId": "B", + "useBackend": false + } + ], + "title": "HTTP otlp traces per DB", + "type": "timeseries" + }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 34 + }, + "id": 163, + "panels": [], + "title": "Frontend gRPC per DB", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "points", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "s" + }, + "overrides": [ + { + "matcher": { + "id": "byRegexp", + "options": ".*?-qps" + }, + "properties": [ + { + "id": "custom.drawStyle", + "value": "line" + }, + { + "id": "custom.axisPlacement", + "value": "right" + }, + { + "id": "unit", + "value": "ops" + }, + { + "id": "custom.stacking", + "value": { + "group": "A", + "mode": "none" + } + } + ] + }, + { + "matcher": { + "id": "byValue", + "options": { + "op": "gte", + "reducer": "allIsZero", + "value": 0 + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": true, + "tooltip": true, + "viz": true + } + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 35 + }, + "id": 164, + "options": { + "legend": { + "calcs": [ + "last" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true, + "sortBy": "Name", + "sortDesc": false + }, + "tooltip": { + "mode": "multi", + "sort": "desc" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "exemplar": false, + "expr": "label_replace(histogram_quantile(0.99, sum by(greptime_pod, le, db, type, code) (rate(greptime_servers_grpc_db_request_elapsed_bucket{greptime_pod=~\"$greptime_pod\"}[$__rate_interval]))), \"db\", \"$1\", \"db\", \"(.*)-public\")", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{greptime_pod}}-{{db}}-{{type}}-{{code}}-p99", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "exemplar": false, + "expr": "sum by(greptime_pod, db, type, code) (rate(greptime_servers_grpc_db_request_elapsed_count{greptime_pod=~\"$greptime_pod\"}[$__rate_interval]))", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "interval": "$__rate_interval", + "legendFormat": "{{greptime_pod}}-{{db}}-{{type}}-{{code}}-qps", + "range": true, + "refId": "B", + "useBackend": false + } + ], + "title": "gRPC per DB", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "points", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "s" + }, + "overrides": [ + { + "matcher": { + "id": "byRegexp", + "options": ".*?-qps" + }, + "properties": [ + { + "id": "custom.drawStyle", + "value": "line" + }, + { + "id": "custom.axisPlacement", + "value": "right" + }, + { + "id": "unit", + "value": "ops" + }, + { + "id": "custom.stacking", + "value": { + "group": "A", + "mode": "none" + } + } + ] + }, + { + "matcher": { + "id": "byValue", + "options": { + "op": "gte", + "reducer": "allIsZero", + "value": 0 + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": true, + "tooltip": true, + "viz": true + } + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 35 + }, + "id": 165, + "options": { + "legend": { + "calcs": [ + "last" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true, + "sortBy": "Name", + "sortDesc": false + }, + "tooltip": { + "mode": "multi", + "sort": "desc" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "exemplar": false, + "expr": "histogram_quantile(0.99, sum by(greptime_pod, le, db) (rate(greptime_servers_grpc_prom_request_elapsed_bucket{greptime_pod=~\"$greptime_pod\"}[$__rate_interval])))", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{greptime_pod}}-{{db}}-p99", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "exemplar": false, + "expr": "sum by(greptime_pod, db) (rate(greptime_servers_grpc_prom_request_elapsed_count{greptime_pod=~\"$greptime_pod\"}[$__rate_interval]))", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "interval": "$__rate_interval", + "legendFormat": "{{greptime_pod}}-{{db}}-qps", + "range": true, + "refId": "B", + "useBackend": false + } + ], + "title": "gRPC prom per DB", + "type": "timeseries" + }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 43 + }, + "id": 166, + "panels": [], + "title": "Frontend-Datanode Call", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "points", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "s" + }, + "overrides": [ + { + "matcher": { + "id": "byRegexp", + "options": ".*?-rps" + }, + "properties": [ + { + "id": "custom.drawStyle", + "value": "line" + }, + { + "id": "custom.axisPlacement", + "value": "right" + }, + { + "id": "unit", + "value": "rowsps" + }, + { + "id": "custom.stacking", + "value": { + "group": "A", + "mode": "none" + } + } + ] + }, + { + "matcher": { + "id": "byValue", + "options": { + "op": "gte", + "reducer": "allIsZero", + "value": 0 + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": true, + "tooltip": true, + "viz": true + } + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 44 + }, + "id": 186, + "options": { + "legend": { + "calcs": [ + "last" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true, + "sortBy": "Name", + "sortDesc": true + }, + "tooltip": { + "mode": "multi", + "sort": "desc" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "exemplar": false, + "expr": "rate(greptime_table_operator_ingest_rows{greptime_pod=~\"$greptime_pod\"}[$__rate_interval])", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{greptime_pod}}-rps", + "range": true, + "refId": "A", + "useBackend": false + } + ], + "title": "ingested rows", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "points", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "s" + }, + "overrides": [ + { + "matcher": { + "id": "byRegexp", + "options": ".*?-qps" + }, + "properties": [ + { + "id": "custom.drawStyle", + "value": "line" + }, + { + "id": "custom.axisPlacement", + "value": "right" + }, + { + "id": "unit", + "value": "ops" + }, + { + "id": "custom.stacking", + "value": { + "group": "A", + "mode": "none" + } + } + ] + }, + { + "matcher": { + "id": "byValue", + "options": { + "op": "gte", + "reducer": "allIsZero", + "value": 0 + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": true, + "tooltip": true, + "viz": true + } + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 44 + }, + "id": 167, + "options": { + "legend": { + "calcs": [ + "last" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "exemplar": false, + "expr": "histogram_quantile(0.99, sum by(greptime_pod, le, request_type) (rate(greptime_grpc_region_request_bucket{greptime_pod=~\"$greptime_pod\"}[$__rate_interval])))", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{greptime_pod}}-{{request_type}}-p99", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "exemplar": false, + "expr": "sum by(greptime_pod, request_type) (rate(greptime_grpc_region_request_count{greptime_pod=~\"$greptime_pod\"}[$__rate_interval]))", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "interval": "$__rate_interval", + "legendFormat": "{{greptime_pod}}-{{request_type}}-qps", + "range": true, + "refId": "B", + "useBackend": false + } + ], + "title": "gRPC region call", + "type": "timeseries" + }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 52 + }, + "id": 168, + "panels": [], + "title": "Datanode Mito", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "s" + }, + "overrides": [ + { + "matcher": { + "id": "byRegexp", + "options": ".*?-qps" + }, + "properties": [ + { + "id": "custom.drawStyle", + "value": "points" + }, + { + "id": "custom.axisPlacement", + "value": "right" + }, + { + "id": "unit", + "value": "ops" + }, + { + "id": "custom.stacking", + "value": { + "group": "A", + "mode": "none" + } + } + ] + }, + { + "matcher": { + "id": "byValue", + "options": { + "op": "gte", + "reducer": "allIsZero", + "value": 0 + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": true, + "tooltip": true, + "viz": true + } + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 53 + }, + "id": 188, + "options": { + "legend": { + "calcs": [ + "last" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "exemplar": false, + "expr": "histogram_quantile(0.99, sum by(greptime_pod, le, type) (rate(greptime_mito_handle_request_elapsed_bucket{greptime_pod=~\"$greptime_pod\"}[$__rate_interval])))", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{greptime_pod}}-{{type}}-p99", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "exemplar": false, + "expr": "sum by(greptime_pod, type) (rate(greptime_mito_handle_request_elapsed_count{greptime_pod=~\"$greptime_pod\"}[$__rate_interval]))", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{greptime_pod}}-{{type}}-qps", + "range": true, + "refId": "B", + "useBackend": false + } + ], + "title": "handle request", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "decbytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 53 + }, + "id": 187, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "greptime_mito_write_buffer_bytes{greptime_pod=~\"$greptime_pod\"}", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{greptime_pod}}", + "range": true, + "refId": "A", + "useBackend": false + } + ], + "title": "Write buffer bytes", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "ops" + }, + "overrides": [ + { + "matcher": { + "id": "byRegexp", + "options": ".*?-bytes" + }, + "properties": [ + { + "id": "custom.drawStyle", + "value": "points" + }, + { + "id": "custom.axisPlacement", + "value": "right" + }, + { + "id": "unit", + "value": "bytes" + }, + { + "id": "custom.stacking", + "value": { + "group": "A", + "mode": "none" + } + } + ] + }, + { + "matcher": { + "id": "byValue", + "options": { + "op": "gte", + "reducer": "allIsZero", + "value": 0 + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": true, + "tooltip": true, + "viz": true + } + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 61 + }, + "id": 169, + "options": { + "legend": { + "calcs": [ + "last" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "exemplar": false, + "expr": "sum by(greptime_pod, reason) (rate(greptime_mito_flush_requests_total{greptime_pod=~\"$greptime_pod\"}[$__rate_interval]))", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{greptime_pod}}-{{reason}}-success", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "exemplar": false, + "expr": "sum by(greptime_pod, reason) (rate(greptime_mito_flush_errors_total{greptime_pod=~\"$greptime_pod\"}[$__rate_interval]))", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{greptime_pod}}-{{reason}}-error", + "range": true, + "refId": "B", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "exemplar": false, + "expr": "sum by(greptime_pod) (rate(greptime_mito_flush_bytes_total{greptime_pod=~\"$greptime_pod\"}[$__rate_interval]))", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{greptime_pod}}-bytes", + "range": true, + "refId": "C", + "useBackend": false + } + ], + "title": "flush total", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "points", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "s" + }, + "overrides": [ + { + "matcher": { + "id": "byRegexp", + "options": ".*?-qps" + }, + "properties": [ + { + "id": "custom.drawStyle", + "value": "line" + }, + { + "id": "custom.axisPlacement", + "value": "right" + }, + { + "id": "unit", + "value": "ops" + }, + { + "id": "custom.stacking", + "value": { + "group": "A", + "mode": "none" + } + } + ] + }, + { + "matcher": { + "id": "byValue", + "options": { + "op": "gte", + "reducer": "allIsZero", + "value": 0 + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": true, + "tooltip": true, + "viz": true + } + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 61 + }, + "id": 170, + "options": { + "legend": { + "calcs": [ + "last" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "exemplar": false, + "expr": "histogram_quantile(0.99, sum by(greptime_pod, le, stage) (rate(greptime_mito_write_stage_elapsed_bucket{greptime_pod=~\"$greptime_pod\"}[$__rate_interval])))", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{greptime_pod}}-{{stage}}-p99", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "exemplar": false, + "expr": "sum by(greptime_pod, stage) (rate(greptime_mito_write_stage_elapsed_count{greptime_pod=~\"$greptime_pod\"}[$__rate_interval]))", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "interval": "$__rate_interval", + "legendFormat": "{{greptime_pod}}-{{stage}}-qps", + "range": true, + "refId": "B", + "useBackend": false + } + ], + "title": "write stage", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "points", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "s" + }, + "overrides": [ + { + "matcher": { + "id": "byRegexp", + "options": ".*?-qps" + }, + "properties": [ + { + "id": "custom.drawStyle", + "value": "line" + }, + { + "id": "custom.axisPlacement", + "value": "right" + }, + { + "id": "unit", + "value": "ops" + }, + { + "id": "custom.stacking", + "value": { + "group": "A", + "mode": "none" + } + } + ] + }, + { + "matcher": { + "id": "byValue", + "options": { + "op": "gte", + "reducer": "allIsZero", + "value": 0 + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": true, + "tooltip": true, + "viz": true + } + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 69 + }, + "id": 172, + "options": { + "legend": { + "calcs": [ + "last" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "exemplar": false, + "expr": "histogram_quantile(0.99, sum by(greptime_pod, le) (rate(greptime_mito_compaction_total_elapsed_bucket{greptime_pod=~\"$greptime_pod\"}[$__rate_interval])))", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{greptime_pod}}-p99", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "exemplar": false, + "expr": "sum by(greptime_pod) (rate(greptime_mito_compaction_total_elapsed_count{greptime_pod=~\"$greptime_pod\"}[$__rate_interval]))", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "interval": "$__rate_interval", + "legendFormat": "{{greptime_pod}}-qps", + "range": true, + "refId": "B", + "useBackend": false + } + ], + "title": "compaction total", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "points", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "s" + }, + "overrides": [ + { + "matcher": { + "id": "byRegexp", + "options": ".*?-qps" + }, + "properties": [ + { + "id": "custom.drawStyle", + "value": "line" + }, + { + "id": "custom.axisPlacement", + "value": "right" + }, + { + "id": "unit", + "value": "ops" + }, + { + "id": "custom.stacking", + "value": { + "group": "A", + "mode": "none" + } + } + ] + }, + { + "matcher": { + "id": "byValue", + "options": { + "op": "gte", + "reducer": "allIsZero", + "value": 0 + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": true, + "tooltip": true, + "viz": true + } + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 69 + }, + "id": 171, + "options": { + "legend": { + "calcs": [ + "last" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "exemplar": false, + "expr": "histogram_quantile(0.99, sum by(greptime_pod, le, stage) (rate(greptime_mito_compaction_stage_elapsed_bucket{greptime_pod=~\"$greptime_pod\"}[$__rate_interval])))", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{greptime_pod}}-{{stage}}-p99", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "exemplar": false, + "expr": "sum by(greptime_pod, stage) (rate(greptime_mito_compaction_stage_elapsed_count{greptime_pod=~\"$greptime_pod\"}[$__rate_interval]))", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "interval": "$__rate_interval", + "legendFormat": "{{greptime_pod}}-{{stage}}-qps", + "range": true, + "refId": "B", + "useBackend": false + } + ], + "title": "compaction stage", + "type": "timeseries" + }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 77 + }, + "id": 173, + "panels": [], + "title": "OpenDAL", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "points", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "s" + }, + "overrides": [ + { + "matcher": { + "id": "byRegexp", + "options": ".*?-qps" + }, + "properties": [ + { + "id": "custom.drawStyle", + "value": "line" + }, + { + "id": "custom.axisPlacement", + "value": "right" + }, + { + "id": "unit", + "value": "ops" + }, + { + "id": "custom.stacking", + "value": { + "group": "A", + "mode": "none" + } + } + ] + }, + { + "matcher": { + "id": "byValue", + "options": { + "op": "gte", + "reducer": "allIsZero", + "value": 0 + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": true, + "tooltip": true, + "viz": true + } + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 6, + "x": 0, + "y": 78 + }, + "id": 178, + "options": { + "legend": { + "calcs": [ + "last" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "exemplar": false, + "expr": "histogram_quantile(0.99, sum by(greptime_pod, le, scheme) (rate(opendal_requests_duration_seconds_bucket{greptime_pod=~\"$greptime_pod\",operation=\"read\"}[$__rate_interval])))", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{greptime_pod}}-{{scheme}}-p99", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "exemplar": false, + "expr": "sum by(greptime_pod, scheme) (rate(opendal_requests_duration_seconds_count{greptime_pod=~\"$greptime_pod\", operation=\"read\"}[$__rate_interval]))", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "interval": "$__rate_interval", + "legendFormat": "{{greptime_pod}}-{{scheme}}-qps", + "range": true, + "refId": "B", + "useBackend": false + } + ], + "title": "requests_duration_seconds_READ", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "points", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "s" + }, + "overrides": [ + { + "matcher": { + "id": "byRegexp", + "options": ".*?-qps" + }, + "properties": [ + { + "id": "custom.drawStyle", + "value": "line" + }, + { + "id": "custom.axisPlacement", + "value": "right" + }, + { + "id": "unit", + "value": "ops" + }, + { + "id": "custom.stacking", + "value": { + "group": "A", + "mode": "none" + } + } + ] + }, + { + "matcher": { + "id": "byValue", + "options": { + "op": "gte", + "reducer": "allIsZero", + "value": 0 + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": true, + "tooltip": true, + "viz": true + } + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 6, + "x": 6, + "y": 78 + }, + "id": 179, + "options": { + "legend": { + "calcs": [ + "last" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "exemplar": false, + "expr": "histogram_quantile(0.99, sum by(greptime_pod, le, scheme) (rate(opendal_requests_duration_seconds_bucket{greptime_pod=~\"$greptime_pod\", operation=\"write\"}[$__rate_interval])))", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{greptime_pod}}-{{scheme}}-p99", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "exemplar": false, + "expr": "sum by(greptime_pod, scheme) (rate(opendal_requests_duration_seconds_count{greptime_pod=~\"$greptime_pod\", operation=\"write\"}[$__rate_interval]))", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "interval": "$__rate_interval", + "legendFormat": "{{greptime_pod}}-{{scheme}}-qps", + "range": true, + "refId": "B", + "useBackend": false + } + ], + "title": "requests_duration_seconds_WRITE", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "points", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "s" + }, + "overrides": [ + { + "matcher": { + "id": "byRegexp", + "options": ".*?-qps" + }, + "properties": [ + { + "id": "custom.drawStyle", + "value": "line" + }, + { + "id": "custom.axisPlacement", + "value": "right" + }, + { + "id": "unit", + "value": "ops" + }, + { + "id": "custom.stacking", + "value": { + "group": "A", + "mode": "none" + } + } + ] + }, + { + "matcher": { + "id": "byValue", + "options": { + "op": "gte", + "reducer": "allIsZero", + "value": 0 + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": true, + "tooltip": true, + "viz": true + } + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 6, + "x": 12, + "y": 78 + }, + "id": 180, + "options": { + "legend": { + "calcs": [ + "last" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "exemplar": false, + "expr": "histogram_quantile(0.99, sum by(greptime_pod, le, scheme) (rate(opendal_requests_duration_seconds_bucket{greptime_pod=~\"$greptime_pod\", operation=\"list\"}[$__rate_interval])))", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{greptime_pod}}-{{scheme}}-p99", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "exemplar": false, + "expr": "sum by(greptime_pod, scheme) (rate(opendal_requests_duration_seconds_count{greptime_pod=~\"$greptime_pod\", operation=\"list\"}[$__rate_interval]))", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "interval": "$__rate_interval", + "legendFormat": "{{greptime_pod}}-{{scheme}}-qps", + "range": true, + "refId": "B", + "useBackend": false + } + ], + "title": "requests_duration_seconds_LIST", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "points", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "s" + }, + "overrides": [ + { + "matcher": { + "id": "byRegexp", + "options": ".*?-qps" + }, + "properties": [ + { + "id": "custom.drawStyle", + "value": "line" + }, + { + "id": "custom.axisPlacement", + "value": "right" + }, + { + "id": "unit", + "value": "ops" + }, + { + "id": "custom.stacking", + "value": { + "group": "A", + "mode": "none" + } + } + ] + }, + { + "matcher": { + "id": "byValue", + "options": { + "op": "gte", + "reducer": "allIsZero", + "value": 0 + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": true, + "tooltip": true, + "viz": true + } + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 6, + "x": 18, + "y": 78 + }, + "id": 182, + "options": { + "legend": { + "calcs": [ + "last" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "exemplar": false, + "expr": "histogram_quantile(0.99, sum by(greptime_pod, le, scheme) (rate(opendal_requests_duration_seconds_bucket{greptime_pod=~\"$greptime_pod\", operation=\"stat\"}[$__rate_interval])))", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{greptime_pod}}-{{scheme}}-p99", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "exemplar": false, + "expr": "sum by(greptime_pod, scheme) (rate(opendal_requests_duration_seconds_count{greptime_pod=~\"$greptime_pod\", operation=\"stat\"}[$__rate_interval]))", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "interval": "$__rate_interval", + "legendFormat": "{{greptime_pod}}-{{scheme}}-qps", + "range": true, + "refId": "B", + "useBackend": false + } + ], + "title": "requests_duration_seconds_STAT", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "points", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "s" + }, + "overrides": [ + { + "matcher": { + "id": "byRegexp", + "options": ".*?-qps" + }, + "properties": [ + { + "id": "custom.drawStyle", + "value": "line" + }, + { + "id": "custom.axisPlacement", + "value": "right" + }, + { + "id": "unit", + "value": "ops" + }, + { + "id": "custom.stacking", + "value": { + "group": "A", + "mode": "none" + } + } + ] + }, + { + "matcher": { + "id": "byValue", + "options": { + "op": "gte", + "reducer": "allIsZero", + "value": 0 + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": true, + "tooltip": true, + "viz": true + } + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 8, + "x": 0, + "y": 86 + }, + "id": 181, + "options": { + "legend": { + "calcs": [ + "last" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "exemplar": false, + "expr": "histogram_quantile(0.99, sum by(greptime_pod, le, scheme, operation) (rate(opendal_requests_duration_seconds_bucket{greptime_pod=~\"$greptime_pod\", operation!~\"read|write|list\"}[$__rate_interval])))", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{greptime_pod}}-{{scheme}}-{{operation}}-p99", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "exemplar": false, + "expr": "sum by(greptime_pod, scheme, operation) (rate(opendal_requests_duration_seconds_count{greptime_pod=~\"$greptime_pod\", operation!~\"read|write|list\"}[$__rate_interval]))", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "interval": "$__rate_interval", + "legendFormat": "{{greptime_pod}}-{{scheme}}-{{operation}}-qps", + "range": true, + "refId": "B", + "useBackend": false + } + ], + "title": "requests_duration_seconds", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "points", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "s" + }, + "overrides": [ + { + "matcher": { + "id": "byRegexp", + "options": ".*?-bytes" + }, + "properties": [ + { + "id": "custom.drawStyle", + "value": "line" + }, + { + "id": "custom.axisPlacement", + "value": "right" + }, + { + "id": "unit", + "value": "bytes" + }, + { + "id": "custom.stacking", + "value": { + "group": "A", + "mode": "none" + } + } + ] + }, + { + "matcher": { + "id": "byValue", + "options": { + "op": "gte", + "reducer": "allIsZero", + "value": 0 + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": true, + "tooltip": true, + "viz": true + } + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 8, + "x": 8, + "y": 86 + }, + "id": 177, + "options": { + "legend": { + "calcs": [ + "last" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "exemplar": false, + "expr": "histogram_quantile(0.99, sum by(greptime_pod, le, scheme, operation) (rate(opendal_bytes_total_bucket{greptime_pod=~\"$greptime_pod\"}[$__rate_interval])))", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{greptime_pod}}-{{scheme}}-{{operation}}-p99", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "exemplar": false, + "expr": "sum by(greptime_pod, scheme, operation) (rate(opendal_bytes_total_count{greptime_pod=~\"$greptime_pod\"}[$__rate_interval]))", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "interval": "$__rate_interval", + "legendFormat": "{{greptime_pod}}-{{scheme}}-{{operation}}-bytes", + "range": true, + "refId": "B", + "useBackend": false + } + ], + "title": "bytes_total", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "points", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "s" + }, + "overrides": [ + { + "matcher": { + "id": "byRegexp", + "options": ".*?-qps" + }, + "properties": [ + { + "id": "custom.drawStyle", + "value": "line" + }, + { + "id": "custom.axisPlacement", + "value": "right" + }, + { + "id": "unit", + "value": "ops" + }, + { + "id": "custom.stacking", + "value": { + "group": "A", + "mode": "none" + } + } + ] + }, + { + "matcher": { + "id": "byValue", + "options": { + "op": "gte", + "reducer": "allIsZero", + "value": 0 + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": true, + "tooltip": true, + "viz": true + } + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 8, + "x": 16, + "y": 86 + }, + "id": 176, + "options": { + "legend": { + "calcs": [ + "last" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "exemplar": false, + "expr": "sum by(greptime_pod, scheme, operation) (rate(opendal_requests_total{greptime_pod=~\"$greptime_pod\"}[$__rate_interval]))", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{greptime_pod}}-{{scheme}}-{{operation}}-qps", + "range": true, + "refId": "B", + "useBackend": false + } + ], + "title": "requests_total", + "type": "timeseries" + } + ], + "refresh": "5s", + "schemaVersion": 39, + "tags": [], + "templating": { + "list": [ + { + "current": {}, + "hide": 0, + "includeAll": false, + "multi": false, + "name": "datasource", + "options": [], + "query": "prometheus", + "queryValue": "", + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "type": "datasource" + }, + { + "current": {}, + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "definition": "label_values(greptime_pod)", + "hide": 0, + "includeAll": true, + "multi": false, + "name": "greptime_pod", + "options": [], + "query": { + "qryType": 1, + "query": "label_values(greptime_pod)", + "refId": "PrometheusVariableQueryEditor-VariableQuery" + }, + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "sort": 0, + "type": "query" + } + ] + }, + "time": { + "from": "now-1h", + "to": "now" + }, + "timepicker": {}, + "timezone": "", + "title": "GreptimeDB-Cluster", + "uid": "ea35efe5-918e-44fa-9743-e9aa1a340a3f", + "version": 9, + "weekStart": "" + } \ No newline at end of file
chore
adding Grafana config for cluster monitor (#3781)
0c829a971264c1e6c93b22f5428a520309531c0b
2022-10-10 12:38:26
Ruihang Xia
chore: ignore vscode config directory in git (#299)
false
diff --git a/.gitignore b/.gitignore index 9670cc7f2298..304df68683bb 100644 --- a/.gitignore +++ b/.gitignore @@ -19,12 +19,12 @@ debug/ # JetBrains IDE config directory .idea/ +# VSCode IDE config directory +.vscode/ + # Logs **/__unittest_logs logs/ -.DS_store -.gitignore - # cpython's generated python byte code **/__pycache__/
chore
ignore vscode config directory in git (#299)
f696f41a02907cd6ff8275b69ea7630206435179
2024-05-17 13:08:35
Weny Xu
fix: prevent registering logical regions with AliveKeeper (#3965)
false
diff --git a/src/client/src/error.rs b/src/client/src/error.rs index 5add743ce31d..29197450b62d 100644 --- a/src/client/src/error.rs +++ b/src/client/src/error.rs @@ -18,7 +18,7 @@ use common_error::ext::{BoxedError, ErrorExt}; use common_error::status_code::StatusCode; use common_error::{GREPTIME_DB_HEADER_ERROR_CODE, GREPTIME_DB_HEADER_ERROR_MSG}; use common_macro::stack_trace_debug; -use snafu::{Location, Snafu}; +use snafu::{location, Location, Snafu}; use tonic::{Code, Status}; #[derive(Snafu)] @@ -83,14 +83,28 @@ pub enum Error { }, #[snafu(display("Failed to request RegionServer, code: {}", code))] - RegionServer { code: Code, source: BoxedError }, + RegionServer { + code: Code, + source: BoxedError, + #[snafu(implicit)] + location: Location, + }, // Server error carried in Tonic Status's metadata. #[snafu(display("{}", msg))] - Server { code: StatusCode, msg: String }, + Server { + code: StatusCode, + msg: String, + #[snafu(implicit)] + location: Location, + }, #[snafu(display("Illegal Database response: {err_msg}"))] - IllegalDatabaseResponse { err_msg: String }, + IllegalDatabaseResponse { + err_msg: String, + #[snafu(implicit)] + location: Location, + }, #[snafu(display("Failed to send request with streaming: {}", err_msg))] ClientStreaming { @@ -148,7 +162,11 @@ impl From<Status> for Error { let msg = get_metadata_value(&e, GREPTIME_DB_HEADER_ERROR_MSG) .unwrap_or_else(|| e.message().to_string()); - Self::Server { code, msg } + Self::Server { + code, + msg, + location: location!(), + } } } diff --git a/src/client/src/region.rs b/src/client/src/region.rs index e6c6e4af81a4..f00bb4265c1a 100644 --- a/src/client/src/region.rs +++ b/src/client/src/region.rs @@ -189,6 +189,7 @@ impl RegionRequester { error::Error::RegionServer { code, source: BoxedError::new(err), + location: location!(), } })? .into_inner(); @@ -272,7 +273,7 @@ mod test { err_msg: "blabla".to_string(), }), })); - let Server { code, msg } = result.unwrap_err() else { + let Server { code, msg, .. } = result.unwrap_err() else { unreachable!() }; assert_eq!(code, StatusCode::Internal); diff --git a/src/datanode/src/datanode.rs b/src/datanode/src/datanode.rs index e55ac27dfad0..cc88a3cdcd07 100644 --- a/src/datanode/src/datanode.rs +++ b/src/datanode/src/datanode.rs @@ -516,6 +516,7 @@ mod tests { use common_meta::key::datanode_table::DatanodeTableManager; use common_meta::kv_backend::memory::MemoryKvBackend; use common_meta::kv_backend::KvBackendRef; + use mito2::engine::MITO_ENGINE_NAME; use store_api::region_request::RegionRequest; use store_api::storage::RegionId; @@ -528,7 +529,7 @@ mod tests { let txn = mgr .build_create_txn( 1028, - "mock", + MITO_ENGINE_NAME, "foo/bar/weny", HashMap::from([("foo".to_string(), "bar".to_string())]), HashMap::default(), @@ -542,8 +543,9 @@ mod tests { #[tokio::test] async fn test_initialize_region_server() { + common_telemetry::init_default_ut_logging(); let mut mock_region_server = mock_region_server(); - let (mock_region, mut mock_region_handler) = MockRegionEngine::new(); + let (mock_region, mut mock_region_handler) = MockRegionEngine::new(MITO_ENGINE_NAME); mock_region_server.register_engine(mock_region.clone()); diff --git a/src/datanode/src/heartbeat/handler/upgrade_region.rs b/src/datanode/src/heartbeat/handler/upgrade_region.rs index 19267a2d4c41..76fa3327b505 100644 --- a/src/datanode/src/heartbeat/handler/upgrade_region.rs +++ b/src/datanode/src/heartbeat/handler/upgrade_region.rs @@ -121,6 +121,7 @@ mod tests { use std::time::Duration; use common_meta::instruction::{InstructionReply, UpgradeRegion}; + use mito2::engine::MITO_ENGINE_NAME; use store_api::region_engine::RegionRole; use store_api::storage::RegionId; use tokio::time::Instant; @@ -133,7 +134,7 @@ mod tests { #[tokio::test] async fn test_region_not_exist() { let mut mock_region_server = mock_region_server(); - let (mock_engine, _) = MockRegionEngine::new(); + let (mock_engine, _) = MockRegionEngine::new(MITO_ENGINE_NAME); mock_region_server.register_engine(mock_engine); let handler_context = HandlerContext { @@ -167,13 +168,14 @@ mod tests { let mock_region_server = mock_region_server(); let region_id = RegionId::new(1024, 1); - let (mock_engine, _) = MockRegionEngine::with_custom_apply_fn(|region_engine| { - region_engine.mock_role = Some(Some(RegionRole::Leader)); - region_engine.handle_request_mock_fn = Some(Box::new(|_, _| { - // Should be unreachable. - unreachable!(); - })); - }); + let (mock_engine, _) = + MockRegionEngine::with_custom_apply_fn(MITO_ENGINE_NAME, |region_engine| { + region_engine.mock_role = Some(Some(RegionRole::Leader)); + region_engine.handle_request_mock_fn = Some(Box::new(|_, _| { + // Should be unreachable. + unreachable!(); + })); + }); mock_region_server.register_test_region(region_id, mock_engine); let handler_context = HandlerContext { @@ -207,13 +209,14 @@ mod tests { let mock_region_server = mock_region_server(); let region_id = RegionId::new(1024, 1); - let (mock_engine, _) = MockRegionEngine::with_custom_apply_fn(|region_engine| { - // Region is not ready. - region_engine.mock_role = Some(Some(RegionRole::Follower)); - region_engine.handle_request_mock_fn = Some(Box::new(|_, _| Ok(0))); - // Note: Don't change. - region_engine.handle_request_delay = Some(Duration::from_secs(100)); - }); + let (mock_engine, _) = + MockRegionEngine::with_custom_apply_fn(MITO_ENGINE_NAME, |region_engine| { + // Region is not ready. + region_engine.mock_role = Some(Some(RegionRole::Follower)); + region_engine.handle_request_mock_fn = Some(Box::new(|_, _| Ok(0))); + // Note: Don't change. + region_engine.handle_request_delay = Some(Duration::from_secs(100)); + }); mock_region_server.register_test_region(region_id, mock_engine); let handler_context = HandlerContext { @@ -247,13 +250,14 @@ mod tests { let mock_region_server = mock_region_server(); let region_id = RegionId::new(1024, 1); - let (mock_engine, _) = MockRegionEngine::with_custom_apply_fn(|region_engine| { - // Region is not ready. - region_engine.mock_role = Some(Some(RegionRole::Follower)); - region_engine.handle_request_mock_fn = Some(Box::new(|_, _| Ok(0))); - // Note: Don't change. - region_engine.handle_request_delay = Some(Duration::from_millis(300)); - }); + let (mock_engine, _) = + MockRegionEngine::with_custom_apply_fn(MITO_ENGINE_NAME, |region_engine| { + // Region is not ready. + region_engine.mock_role = Some(Some(RegionRole::Follower)); + region_engine.handle_request_mock_fn = Some(Box::new(|_, _| Ok(0))); + // Note: Don't change. + region_engine.handle_request_delay = Some(Duration::from_millis(300)); + }); mock_region_server.register_test_region(region_id, mock_engine); let waits = vec![ @@ -308,18 +312,19 @@ mod tests { let mock_region_server = mock_region_server(); let region_id = RegionId::new(1024, 1); - let (mock_engine, _) = MockRegionEngine::with_custom_apply_fn(|region_engine| { - // Region is not ready. - region_engine.mock_role = Some(Some(RegionRole::Follower)); - region_engine.handle_request_mock_fn = Some(Box::new(|_, _| { - error::UnexpectedSnafu { - violated: "mock_error".to_string(), - } - .fail() - })); - // Note: Don't change. - region_engine.handle_request_delay = Some(Duration::from_millis(100)); - }); + let (mock_engine, _) = + MockRegionEngine::with_custom_apply_fn(MITO_ENGINE_NAME, |region_engine| { + // Region is not ready. + region_engine.mock_role = Some(Some(RegionRole::Follower)); + region_engine.handle_request_mock_fn = Some(Box::new(|_, _| { + error::UnexpectedSnafu { + violated: "mock_error".to_string(), + } + .fail() + })); + // Note: Don't change. + region_engine.handle_request_delay = Some(Duration::from_millis(100)); + }); mock_region_server.register_test_region(region_id, mock_engine); let handler_context = HandlerContext { diff --git a/src/datanode/src/region_server.rs b/src/datanode/src/region_server.rs index 440f0a3d036d..b5dbbac0b9ee 100644 --- a/src/datanode/src/region_server.rs +++ b/src/datanode/src/region_server.rs @@ -34,6 +34,7 @@ use common_telemetry::{info, warn}; use dashmap::DashMap; use futures_util::future::try_join_all; use metric_engine::engine::MetricEngine; +use mito2::engine::MITO_ENGINE_NAME; use prost::Message; pub use query::dummy_catalog::{ DummyCatalogList, DummyTableProviderFactory, TableProviderFactoryRef, @@ -44,7 +45,9 @@ use servers::grpc::flight::{FlightCraft, FlightRecordBatchStream, TonicStream}; use servers::grpc::region_server::RegionServerHandler; use session::context::{QueryContextBuilder, QueryContextRef}; use snafu::{OptionExt, ResultExt}; -use store_api::metric_engine_consts::{METRIC_ENGINE_NAME, PHYSICAL_TABLE_METADATA_KEY}; +use store_api::metric_engine_consts::{ + FILE_ENGINE_NAME, LOGICAL_TABLE_METADATA_KEY, METRIC_ENGINE_NAME, +}; use store_api::region_engine::{RegionEngineRef, RegionRole, SetReadonlyResponse}; use store_api::region_request::{AffectedRows, RegionCloseRequest, RegionRequest}; use store_api::storage::RegionId; @@ -403,7 +406,7 @@ impl RegionServerInner { let current_region_status = self.region_map.get(&region_id); let engine = match region_change { - RegionChange::Register(ref engine_type, _) => match current_region_status { + RegionChange::Register(attribute) => match current_region_status { Some(status) => match status.clone() { RegionEngineWithStatus::Registering(_) => { return Ok(CurrentEngine::EarlyReturn(0)) @@ -417,8 +420,10 @@ impl RegionServerInner { .engines .read() .unwrap() - .get(engine_type) - .with_context(|| RegionEngineNotFoundSnafu { name: engine_type })? + .get(attribute.engine()) + .with_context(|| RegionEngineNotFoundSnafu { + name: attribute.engine(), + })? .clone(), }, RegionChange::Deregisters => match current_region_status { @@ -461,11 +466,13 @@ impl RegionServerInner { .start_timer(); let region_change = match &request { - RegionRequest::Create(create) => RegionChange::Register(create.engine.clone(), false), + RegionRequest::Create(create) => { + let attribute = parse_region_attribute(&create.engine, &create.options)?; + RegionChange::Register(attribute) + } RegionRequest::Open(open) => { - let is_opening_physical_region = - open.options.contains_key(PHYSICAL_TABLE_METADATA_KEY); - RegionChange::Register(open.engine.clone(), is_opening_physical_region) + let attribute = parse_region_attribute(&open.engine, &open.options)?; + RegionChange::Register(attribute) } RegionRequest::Close(_) | RegionRequest::Drop(_) => RegionChange::Deregisters, RegionRequest::Put(_) @@ -514,7 +521,7 @@ impl RegionServerInner { region_change: &RegionChange, ) { match region_change { - RegionChange::Register(_, _) => { + RegionChange::Register(_) => { self.region_map.insert( region_id, RegionEngineWithStatus::Registering(engine.clone()), @@ -533,7 +540,7 @@ impl RegionServerInner { fn unset_region_status(&self, region_id: RegionId, region_change: RegionChange) { match region_change { RegionChange::None => {} - RegionChange::Register(_, _) | RegionChange::Deregisters => { + RegionChange::Register(_) | RegionChange::Deregisters => { self.region_map.remove(&region_id); } } @@ -548,15 +555,28 @@ impl RegionServerInner { let engine_type = engine.name(); match region_change { RegionChange::None => {} - RegionChange::Register(_, is_opening_physical_region) => { - if is_opening_physical_region { - self.register_logical_regions(&engine, region_id).await?; - } - - info!("Region {region_id} is registered to engine {engine_type}"); + RegionChange::Register(attribute) => { + info!( + "Region {region_id} is registered to engine {}", + attribute.engine() + ); self.region_map - .insert(region_id, RegionEngineWithStatus::Ready(engine)); - self.event_listener.on_region_registered(region_id); + .insert(region_id, RegionEngineWithStatus::Ready(engine.clone())); + + match attribute { + RegionAttribute::Metric { physical } => { + if physical { + // Registers the logical regions belong to the physical region (`region_id`). + self.register_logical_regions(&engine, region_id).await?; + // We only send the `on_region_registered` event of the physical region. + self.event_listener.on_region_registered(region_id); + } + } + RegionAttribute::Mito => self.event_listener.on_region_registered(region_id), + RegionAttribute::File => { + // do nothing + } + } } RegionChange::Deregisters => { info!("Region {region_id} is deregistered from engine {engine_type}"); @@ -699,10 +719,45 @@ impl RegionServerInner { enum RegionChange { None, - Register(String, bool), + Register(RegionAttribute), Deregisters, } +fn parse_region_attribute( + engine: &str, + options: &HashMap<String, String>, +) -> Result<RegionAttribute> { + match engine { + MITO_ENGINE_NAME => Ok(RegionAttribute::Mito), + METRIC_ENGINE_NAME => { + let physical = !options.contains_key(LOGICAL_TABLE_METADATA_KEY); + + Ok(RegionAttribute::Metric { physical }) + } + FILE_ENGINE_NAME => Ok(RegionAttribute::File), + _ => error::UnexpectedSnafu { + violated: format!("Unknown engine: {}", engine), + } + .fail(), + } +} + +enum RegionAttribute { + Mito, + Metric { physical: bool }, + File, +} + +impl RegionAttribute { + fn engine(&self) -> &'static str { + match self { + RegionAttribute::Mito => MITO_ENGINE_NAME, + RegionAttribute::Metric { .. } => METRIC_ENGINE_NAME, + RegionAttribute::File => FILE_ENGINE_NAME, + } + } +} + #[cfg(test)] mod tests { @@ -723,7 +778,7 @@ mod tests { common_telemetry::init_default_ut_logging(); let mut mock_region_server = mock_region_server(); - let (engine, _receiver) = MockRegionEngine::new(); + let (engine, _receiver) = MockRegionEngine::new(MITO_ENGINE_NAME); let engine_name = engine.name(); mock_region_server.register_engine(engine.clone()); @@ -781,7 +836,7 @@ mod tests { common_telemetry::init_default_ut_logging(); let mut mock_region_server = mock_region_server(); - let (engine, _receiver) = MockRegionEngine::new(); + let (engine, _receiver) = MockRegionEngine::new(MITO_ENGINE_NAME); mock_region_server.register_engine(engine.clone()); @@ -832,7 +887,7 @@ mod tests { common_telemetry::init_default_ut_logging(); let mut mock_region_server = mock_region_server(); - let (engine, _receiver) = MockRegionEngine::new(); + let (engine, _receiver) = MockRegionEngine::new(MITO_ENGINE_NAME); mock_region_server.register_engine(engine.clone()); @@ -857,13 +912,15 @@ mod tests { common_telemetry::init_default_ut_logging(); let mut mock_region_server = mock_region_server(); - let (engine, _receiver) = - MockRegionEngine::with_mock_fn(Box::new(|_region_id, _request| { + let (engine, _receiver) = MockRegionEngine::with_mock_fn( + MITO_ENGINE_NAME, + Box::new(|_region_id, _request| { error::UnexpectedSnafu { violated: "test".to_string(), } .fail() - })); + }), + ); mock_region_server.register_engine(engine.clone()); @@ -904,7 +961,7 @@ mod tests { common_telemetry::init_default_ut_logging(); let mut mock_region_server = mock_region_server(); - let (engine, _) = MockRegionEngine::new(); + let (engine, _) = MockRegionEngine::new(MITO_ENGINE_NAME); mock_region_server.register_engine(engine.clone()); let region_id = RegionId::new(1024, 1); @@ -950,7 +1007,7 @@ mod tests { CurrentEngineTest { region_id, current_region_status: None, - region_change: RegionChange::Register(engine.name().to_string(), false), + region_change: RegionChange::Register(RegionAttribute::Mito), assert: Box::new(|result| { let current_engine = result.unwrap(); assert_matches!(current_engine, CurrentEngine::Engine(_)); @@ -959,7 +1016,7 @@ mod tests { CurrentEngineTest { region_id, current_region_status: Some(RegionEngineWithStatus::Registering(engine.clone())), - region_change: RegionChange::Register(engine.name().to_string(), false), + region_change: RegionChange::Register(RegionAttribute::Mito), assert: Box::new(|result| { let current_engine = result.unwrap(); assert_matches!(current_engine, CurrentEngine::EarlyReturn(_)); @@ -968,7 +1025,7 @@ mod tests { CurrentEngineTest { region_id, current_region_status: Some(RegionEngineWithStatus::Deregistering(engine.clone())), - region_change: RegionChange::Register(engine.name().to_string(), false), + region_change: RegionChange::Register(RegionAttribute::Mito), assert: Box::new(|result| { let err = result.unwrap_err(); assert_eq!(err.status_code(), StatusCode::RegionBusy); @@ -977,7 +1034,7 @@ mod tests { CurrentEngineTest { region_id, current_region_status: Some(RegionEngineWithStatus::Ready(engine.clone())), - region_change: RegionChange::Register(engine.name().to_string(), false), + region_change: RegionChange::Register(RegionAttribute::Mito), assert: Box::new(|result| { let current_engine = result.unwrap(); assert_matches!(current_engine, CurrentEngine::Engine(_)); diff --git a/src/datanode/src/tests.rs b/src/datanode/src/tests.rs index 84244a4679fa..04af03ec857d 100644 --- a/src/datanode/src/tests.rs +++ b/src/datanode/src/tests.rs @@ -106,10 +106,11 @@ pub struct MockRegionEngine { pub(crate) handle_request_delay: Option<Duration>, pub(crate) handle_request_mock_fn: Option<MockRequestHandler>, pub(crate) mock_role: Option<Option<RegionRole>>, + engine: String, } impl MockRegionEngine { - pub fn new() -> (Arc<Self>, Receiver<(RegionId, RegionRequest)>) { + pub fn new(engine: &str) -> (Arc<Self>, Receiver<(RegionId, RegionRequest)>) { let (tx, rx) = tokio::sync::mpsc::channel(8); ( @@ -118,12 +119,14 @@ impl MockRegionEngine { sender: tx, handle_request_mock_fn: None, mock_role: None, + engine: engine.to_string(), }), rx, ) } pub fn with_mock_fn( + engine: &str, mock_fn: MockRequestHandler, ) -> (Arc<Self>, Receiver<(RegionId, RegionRequest)>) { let (tx, rx) = tokio::sync::mpsc::channel(8); @@ -134,12 +137,16 @@ impl MockRegionEngine { sender: tx, handle_request_mock_fn: Some(mock_fn), mock_role: None, + engine: engine.to_string(), }), rx, ) } - pub fn with_custom_apply_fn<F>(apply: F) -> (Arc<Self>, Receiver<(RegionId, RegionRequest)>) + pub fn with_custom_apply_fn<F>( + engine: &str, + apply: F, + ) -> (Arc<Self>, Receiver<(RegionId, RegionRequest)>) where F: FnOnce(&mut MockRegionEngine), { @@ -149,6 +156,7 @@ impl MockRegionEngine { sender: tx, handle_request_mock_fn: None, mock_role: None, + engine: engine.to_string(), }; apply(&mut region_engine); @@ -160,7 +168,7 @@ impl MockRegionEngine { #[async_trait::async_trait] impl RegionEngine for MockRegionEngine { fn name(&self) -> &str { - "mock" + &self.engine } async fn handle_request( diff --git a/src/store-api/src/metric_engine_consts.rs b/src/store-api/src/metric_engine_consts.rs index 16666167e457..0cb0e42237ee 100644 --- a/src/store-api/src/metric_engine_consts.rs +++ b/src/store-api/src/metric_engine_consts.rs @@ -39,6 +39,8 @@ pub const DATA_REGION_SUBDIR: &str = "data"; pub const METRIC_ENGINE_NAME: &str = "metric"; +pub const FILE_ENGINE_NAME: &str = "file"; + /// Metadata key present in the `CREATE TABLE ... WITH ()` clause. This key is /// used to identify the table is a physical metric table. E.g.: /// ```sql
fix
prevent registering logical regions with AliveKeeper (#3965)
bdd3d2d9ce2cb6444f7e0385770c6f45218fdf44
2024-07-26 09:06:20
Lei, HUANG
chore: add dynamic cache size adjustment for InvertedIndexConfig (#4433)
false
diff --git a/config/config.md b/config/config.md index 14f22f5b6c9a..7f48597737d3 100644 --- a/config/config.md +++ b/config/config.md @@ -129,6 +129,8 @@ | `region_engine.mito.inverted_index.apply_on_query` | String | `auto` | Whether to apply the index on query<br/>- `auto`: automatically (default)<br/>- `disable`: never | | `region_engine.mito.inverted_index.mem_threshold_on_create` | String | `auto` | Memory threshold for performing an external sort during index creation.<br/>- `auto`: automatically determine the threshold based on the system memory size (default)<br/>- `unlimited`: no memory limit<br/>- `[size]` e.g. `64MB`: fixed memory threshold | | `region_engine.mito.inverted_index.intermediate_path` | String | `""` | Deprecated, use `region_engine.mito.index.aux_path` instead. | +| `region_engine.mito.inverted_index.metadata_cache_size` | String | `64MiB` | Cache size for inverted index metadata. | +| `region_engine.mito.inverted_index.content_cache_size` | String | `128MiB` | Cache size for inverted index content. | | `region_engine.mito.fulltext_index` | -- | -- | The options for full-text index in Mito engine. | | `region_engine.mito.fulltext_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never | | `region_engine.mito.fulltext_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never | diff --git a/config/standalone.example.toml b/config/standalone.example.toml index 0944d5985e80..7d40927703fe 100644 --- a/config/standalone.example.toml +++ b/config/standalone.example.toml @@ -459,6 +459,12 @@ mem_threshold_on_create = "auto" ## Deprecated, use `region_engine.mito.index.aux_path` instead. intermediate_path = "" +## Cache size for inverted index metadata. +metadata_cache_size = "64MiB" + +## Cache size for inverted index content. +content_cache_size = "128MiB" + ## The options for full-text index in Mito engine. [region_engine.mito.fulltext_index] diff --git a/src/mito2/src/config.rs b/src/mito2/src/config.rs index 012c31aaad00..7fd87d774f6e 100644 --- a/src/mito2/src/config.rs +++ b/src/mito2/src/config.rs @@ -39,6 +39,8 @@ const DEFAULT_SCAN_CHANNEL_SIZE: usize = 32; const GLOBAL_WRITE_BUFFER_SIZE_FACTOR: u64 = 8; /// Use `1/SST_META_CACHE_SIZE_FACTOR` of OS memory size as SST meta cache size in default mode const SST_META_CACHE_SIZE_FACTOR: u64 = 32; +/// Use `1/INDEX_CONTENT_CACHE_SIZE_FACTOR` of OS memory size for inverted index file content cache by default. +const INDEX_CONTENT_CACHE_SIZE_FACTOR: u64 = 32; /// Use `1/MEM_CACHE_SIZE_FACTOR` of OS memory size as mem cache size in default mode const MEM_CACHE_SIZE_FACTOR: u64 = 16; /// Use `1/INDEX_CREATE_MEM_THRESHOLD_FACTOR` of OS memory size as mem threshold for creating index @@ -389,19 +391,35 @@ pub struct InvertedIndexConfig { pub content_cache_size: ReadableSize, } +impl InvertedIndexConfig { + /// Adjusts the cache size of [InvertedIndexConfig] according to system memory size. + fn adjust_cache_size(&mut self, sys_memory: ReadableSize) { + let content_cache_size = cmp::min( + sys_memory / INDEX_CONTENT_CACHE_SIZE_FACTOR, + ReadableSize::mb(128), + ); + self.content_cache_size = content_cache_size; + } +} + impl Default for InvertedIndexConfig { #[allow(deprecated)] fn default() -> Self { - Self { + let mut index_config = Self { create_on_flush: Mode::Auto, create_on_compaction: Mode::Auto, apply_on_query: Mode::Auto, mem_threshold_on_create: MemoryThreshold::Auto, write_buffer_size: ReadableSize::mb(8), intermediate_path: String::new(), - metadata_cache_size: ReadableSize::mb(32), - content_cache_size: ReadableSize::mb(32), + metadata_cache_size: ReadableSize::mb(64), + content_cache_size: ReadableSize::mb(128), + }; + + if let Some(sys_memory) = common_config::utils::get_sys_total_memory() { + index_config.adjust_cache_size(sys_memory); } + index_config } } diff --git a/tests-integration/tests/http.rs b/tests-integration/tests/http.rs index 75939b7c9485..38c87d865dd4 100644 --- a/tests-integration/tests/http.rs +++ b/tests-integration/tests/http.rs @@ -839,8 +839,6 @@ create_on_flush = "auto" create_on_compaction = "auto" apply_on_query = "auto" mem_threshold_on_create = "auto" -metadata_cache_size = "32MiB" -content_cache_size = "32MiB" [region_engine.mito.fulltext_index] create_on_flush = "auto" @@ -889,6 +887,8 @@ fn drop_lines_with_inconsistent_results(input: String) -> String { "vector_cache_size =", "page_cache_size =", "selector_result_cache_size =", + "metadata_cache_size =", + "content_cache_size =", ]; input
chore
add dynamic cache size adjustment for InvertedIndexConfig (#4433)
f3509fa312e04eb113cf816f9c0b2e32688e40c7
2024-11-05 08:25:11
jeremyhi
chore: minor refactor for weighted choose (#4917)
false
diff --git a/src/meta-srv/src/selector/common.rs b/src/meta-srv/src/selector/common.rs index 11e5b3741a68..85abcdfca24a 100644 --- a/src/meta-srv/src/selector/common.rs +++ b/src/meta-srv/src/selector/common.rs @@ -22,7 +22,7 @@ use crate::metasrv::SelectTarget; use crate::selector::SelectorOptions; /// According to the `opts`, choose peers from the `weight_array` through `weighted_choose`. -pub fn choose_peers<W>(opts: &SelectorOptions, weighted_choose: &mut W) -> Result<Vec<Peer>> +pub fn choose_items<W>(opts: &SelectorOptions, weighted_choose: &mut W) -> Result<Vec<Peer>> where W: WeightedChoose<Peer>, { @@ -36,20 +36,36 @@ where } ); + if min_required_items == 1 { + // fast path + return Ok(vec![weighted_choose.choose_one()?]); + } + + let available_count = weighted_choose.len(); + if opts.allow_duplication { - (0..min_required_items) - .map(|_| weighted_choose.choose_one()) - .collect::<Result<_>>() - } else { - let weight_array_len = weighted_choose.len(); + // Calculate how many complete rounds of `available_count` items to select, + // plus any additional items needed after complete rounds. + let complete_batches = min_required_items / available_count; + let leftover_items = min_required_items % available_count; + if complete_batches == 0 { + return weighted_choose.choose_multiple(leftover_items); + } + + let mut result = Vec::with_capacity(min_required_items); + for _ in 0..complete_batches { + result.extend(weighted_choose.choose_multiple(available_count)?); + } + result.extend(weighted_choose.choose_multiple(leftover_items)?); - // When opts.allow_duplication is false, we need to check that the length of the weighted array is greater than - // or equal to min_required_items, otherwise it may cause an infinite loop. + Ok(result) + } else { + // Ensure the available items are sufficient when duplication is not allowed. ensure!( - weight_array_len >= min_required_items, + available_count >= min_required_items, error::NoEnoughAvailableNodeSnafu { required: min_required_items, - available: weight_array_len, + available: available_count, select_target: SelectTarget::Datanode } ); @@ -64,7 +80,7 @@ mod tests { use common_meta::peer::Peer; - use crate::selector::common::choose_peers; + use crate::selector::common::choose_items; use crate::selector::weighted_choose::{RandomWeightedChoose, WeightedItem}; use crate::selector::SelectorOptions; @@ -115,7 +131,7 @@ mod tests { }; let selected_peers: HashSet<_> = - choose_peers(&opts, &mut RandomWeightedChoose::new(weight_array.clone())) + choose_items(&opts, &mut RandomWeightedChoose::new(weight_array.clone())) .unwrap() .into_iter() .collect(); @@ -129,7 +145,7 @@ mod tests { }; let selected_result = - choose_peers(&opts, &mut RandomWeightedChoose::new(weight_array.clone())); + choose_items(&opts, &mut RandomWeightedChoose::new(weight_array.clone())); assert!(selected_result.is_err()); for i in 1..=50 { @@ -139,7 +155,7 @@ mod tests { }; let selected_peers = - choose_peers(&opts, &mut RandomWeightedChoose::new(weight_array.clone())).unwrap(); + choose_items(&opts, &mut RandomWeightedChoose::new(weight_array.clone())).unwrap(); assert_eq!(i, selected_peers.len()); } diff --git a/src/meta-srv/src/selector/lease_based.rs b/src/meta-srv/src/selector/lease_based.rs index 3ab99eb31e6b..d9af63da6555 100644 --- a/src/meta-srv/src/selector/lease_based.rs +++ b/src/meta-srv/src/selector/lease_based.rs @@ -17,7 +17,7 @@ use common_meta::peer::Peer; use crate::error::Result; use crate::lease; use crate::metasrv::SelectorContext; -use crate::selector::common::choose_peers; +use crate::selector::common::choose_items; use crate::selector::weighted_choose::{RandomWeightedChoose, WeightedItem}; use crate::selector::{Namespace, Selector, SelectorOptions}; @@ -53,7 +53,7 @@ impl Selector for LeaseBasedSelector { // 3. choose peers by weight_array. let mut weighted_choose = RandomWeightedChoose::new(weight_array); - let selected = choose_peers(&opts, &mut weighted_choose)?; + let selected = choose_items(&opts, &mut weighted_choose)?; Ok(selected) } diff --git a/src/meta-srv/src/selector/load_based.rs b/src/meta-srv/src/selector/load_based.rs index f52d6f9fc38e..8a00c7fdb7bd 100644 --- a/src/meta-srv/src/selector/load_based.rs +++ b/src/meta-srv/src/selector/load_based.rs @@ -26,7 +26,7 @@ use crate::error::{self, Result}; use crate::key::{DatanodeLeaseKey, LeaseValue}; use crate::lease; use crate::metasrv::SelectorContext; -use crate::selector::common::choose_peers; +use crate::selector::common::choose_items; use crate::selector::weight_compute::{RegionNumsBasedWeightCompute, WeightCompute}; use crate::selector::weighted_choose::RandomWeightedChoose; use crate::selector::{Namespace, Selector, SelectorOptions}; @@ -94,7 +94,7 @@ where // 5. choose peers by weight_array. let mut weighted_choose = RandomWeightedChoose::new(weight_array); - let selected = choose_peers(&opts, &mut weighted_choose)?; + let selected = choose_items(&opts, &mut weighted_choose)?; debug!( "LoadBasedSelector select peers: {:?}, namespace: {}, opts: {:?}.",
chore
minor refactor for weighted choose (#4917)
2b064265bf8c7e9b81655b0e5c05493a411dba4a
2022-07-25 08:05:36
LFC
feat: UDAF made generically (#91)
false
diff --git a/Cargo.lock b/Cargo.lock index 73f7915de63e..bc5e2c84c622 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -621,10 +621,12 @@ dependencies = [ name = "common-function" version = "0.1.0" dependencies = [ + "arc-swap", "arrow2", "chrono-tz", "common-error", "common-query", + "datafusion-common", "datatypes", "num", "num-traits", @@ -644,6 +646,7 @@ dependencies = [ "datafusion-expr", "datatypes", "snafu", + "tokio", ] [[package]] @@ -1064,6 +1067,8 @@ dependencies = [ "common-error", "datafusion-common", "enum_dispatch", + "num", + "num-traits", "ordered-float 3.0.0", "paste", "serde", @@ -2696,6 +2701,7 @@ dependencies = [ name = "query" version = "0.1.0" dependencies = [ + "arc-swap", "arrow2", "async-trait", "common-error", @@ -2709,6 +2715,9 @@ dependencies = [ "futures", "futures-util", "metrics", + "num", + "num-traits", + "rand 0.8.5", "snafu", "sql", "table", diff --git a/docs/how-to/how-to-write-aggregate-function.md b/docs/how-to/how-to-write-aggregate-function.md new file mode 100644 index 000000000000..94b5200f03aa --- /dev/null +++ b/docs/how-to/how-to-write-aggregate-function.md @@ -0,0 +1,68 @@ +Currently, our query engine is based on DataFusion, so all aggregate function is executed by DataFusion, through its UDAF interface. You can find DataFusion's UDAF example [here](https://github.com/apache/arrow-datafusion/blob/arrow2/datafusion-examples/examples/simple_udaf.rs). Basically, we provide the same way as DataFusion to write aggregate functions: both are centered in a struct called "Accumulator" to accumulates states along the way in aggregation. + +However, DataFusion's UDAF implementation has a huge restriction, that it requires user to provide a concrete "Accumulator". Take `Median` aggregate function for example, to aggregate a `u32` datatype column, you have to write a `MedianU32`, and use `SELECT MEDIANU32(x)` in SQL. `MedianU32` cannot be used to aggregate a `i32` datatype column. Or, there's another way: you can use a special type that can hold all kinds of data (like our `Value` enum or Arrow's `ScalarValue`), and `match` all the way up to do aggregate calculations. It might work, though rather tedious. (But I think it's DataFusion's prefer way to write UDAF.) + +So is there a way we can make an aggregate function that automatically match the input data's type? For example, a `Median` aggregator that can work on both `u32` column and `i32`? The answer is yes until we found a way to bypassing DataFusion's restriction, a restriction that DataFusion simply don't pass the input data's type when creating an Accumulator. + +> There's an example in `my_sum_udaf_example.rs`, take that as quick start. + +# 1. Impl `AggregateFunctionCreator` trait for your accumulator creator. + +You must first define a struct that can store the input data's type. For example, + +```Rust +struct MySumAccumulatorCreator { + input_types: ArcSwapOption<Vec<ConcreteDataType>>, +} +``` + +Then impl `AggregateFunctionCreator` trait on it. The definition of the trait is: + +```Rust +pub trait AggregateFunctionCreator: Send + Sync + Debug { + fn creator(&self) -> AccumulatorCreatorFunction; + fn input_types(&self) -> Vec<ConcreteDataType>; + fn set_input_types(&self, input_types: Vec<ConcreteDataType>); + fn output_type(&self) -> ConcreteDataType; + fn state_types(&self) -> Vec<ConcreteDataType>; +} +``` + +our query engine will call `set_input_types` the very first, so you can use input data's type in methods that return output type and state types. + +The output type is aggregate function's output data's type. For example, `SUM` aggregate function's output type is `u64` for a `u32` datatype column. The state types are accumulator's internal states' types. Take `AVG` aggregate function on a `i32` column as example, it's state types are `i64` (for sum) and `u64` (for count). + +The `creator` function is where you define how an accumulator (that will be used in DataFusion) is created. You define "how" to create the accumulator (instead of "what" to create), using the input data's type as arguments. With input datatype known, you can create accumulator generically. + +# 2. Impl `Accumulator` trait for you accumulator. + +The accumulator is where you store the aggregate calculation states and evaluate a result. You must impl `Accumulator` trait for it. The trait's definition is: + +```Rust +pub trait Accumulator: Send + Sync + Debug { + fn state(&self) -> Result<Vec<Value>>; + fn update_batch(&mut self, values: &[VectorRef]) -> Result<()>; + fn merge_batch(&mut self, states: &[VectorRef]) -> Result<()>; + fn evaluate(&self) -> Result<Value>; +} +``` + +The DataFusion basically execute aggregate like this: + +1. Partitioning all input data for aggregate. Create an accumulator for each part. +2. Call `update_batch` on each accumulator with partitioned data, to let you update your aggregate calculation. +3. Call `state` to get each accumulator's internal state, the medial calculation result. +4. Call `merge_batch` to merge all accumulator's internal state to one. +5. Execute `evalute` on the chosen one to get the final calculation result. + +Once you know the meaning of each method, you can easily write your accumulator. You can refer to `Median` accumulator or `SUM` accumulator defined in file `my_sum_udaf_example.rs` for more details. + +# 3. Register your aggregate function to our query engine. + +You can call `register_aggregate_function` method in query engine to register your aggregate function. To do that, you have to new an instance of struct `AggregateFunctionMeta`. The struct has two fields, first is the name of your aggregate function's name. The function name is case-sensitive due to DataFusion's restriction. We strongly recommend using lowercase for your name. If you have to use uppercase name, wrap your aggregate function with quotation marks. For example, if you define an aggregate function named "my_aggr", you can use "`SELECT MY_AGGR(x)`"; if you define "my_AGGR", you have to use "`SELECT "my_AGGR"(x)`". + +The second field is a function about how to create your accumulator creator that you defined in step 1 above. Create creator, that's a bit intertwined, but it is how we make DataFusion use a newly created aggregate function each time it executes a SQL, preventing the stored input types from affecting each other. The key detail can be starting looking at our `DfContextProviderAdapter` struct's `get_aggregate_meta` method. + +# (Optional) 4. Make your aggregate function automatically registered. + +If you've written a great aggregate function that want to let everyone use it, you can make it automatically registered to our query engine at start time. It's quick simple, just refer to the `AggregateFunctions::register` function in `common/function/src/scalars/aggregate/mod.rs`. diff --git a/src/common/function/Cargo.toml b/src/common/function/Cargo.toml index 3cccfe5b893d..283e672144aa 100644 --- a/src/common/function/Cargo.toml +++ b/src/common/function/Cargo.toml @@ -11,9 +11,11 @@ features = ["io_csv", "io_json", "io_parquet", "io_parquet_compression", "io_ipc [dependencies] +arc-swap = "1.0" chrono-tz = "0.6" common-error = { path = "../error" } common-query = { path = "../query" } +datafusion-common = { git = "https://github.com/apache/arrow-datafusion.git" , branch = "arrow2" } datatypes = { path = "../../datatypes" } num = "0.4.0" num-traits = "0.2.14" diff --git a/src/common/function/src/scalars.rs b/src/common/function/src/scalars.rs index e171af88ca41..2773daec112f 100644 --- a/src/common/function/src/scalars.rs +++ b/src/common/function/src/scalars.rs @@ -1,3 +1,4 @@ +pub mod aggregate; pub mod expression; pub mod function; pub mod function_registry; @@ -7,5 +8,6 @@ pub mod numpy; pub(crate) mod test; pub mod udf; +pub use aggregate::MedianAccumulatorCreator; pub use function::{Function, FunctionRef}; pub use function_registry::{FunctionRegistry, FUNCTION_REGISTRY}; diff --git a/src/common/function/src/scalars/aggregate/median.rs b/src/common/function/src/scalars/aggregate/median.rs new file mode 100644 index 000000000000..5d4844f5230c --- /dev/null +++ b/src/common/function/src/scalars/aggregate/median.rs @@ -0,0 +1,300 @@ +use std::cmp::Reverse; +use std::collections::BinaryHeap; +use std::sync::Arc; + +use arc_swap::ArcSwapOption; +use common_query::error::{ + CreateAccumulatorSnafu, DowncastVectorSnafu, ExecuteFunctionSnafu, FromScalarValueSnafu, Result, +}; +use common_query::logical_plan::{Accumulator, AggregateFunctionCreator}; +use common_query::prelude::*; +use datafusion_common::DataFusionError; +use datatypes::prelude::*; +use datatypes::value::ListValue; +use datatypes::vectors::{ConstantVector, ListVector}; +use datatypes::with_match_ordered_primitive_type_id; +use num::NumCast; +use snafu::{OptionExt, ResultExt}; + +// This median calculation algorithm's details can be found at +// https://leetcode.cn/problems/find-median-from-data-stream/ +// +// Basically, it uses two heaps, a maximum heap and a minimum. The maximum heap stores numbers that +// are not greater than the median, and the minimum heap stores the greater. In a streaming of +// numbers, when a number is arrived, we adjust the heaps' tops, so that either one top is the +// median or both tops can be averaged to get the median. +// +// The time complexity to update the median is O(logn), O(1) to get the median; and the space +// complexity is O(n). (Ignore the costs for heap expansion.) +// +// From the point of algorithm, [quick select](https://en.wikipedia.org/wiki/Quickselect) might be +// better. But to use quick select here, we need a mutable self in the final calculation(`evaluate`) +// to swap stored numbers in the states vector. Though we can make our `evaluate` received +// `&mut self`, DataFusion calls our accumulator with `&self` (see `DfAccumulatorAdaptor`). That +// means we have to introduce some kinds of interior mutability, and the overhead is not neglectable. +// +// TODO(LFC): Use quick select to get median when we can modify DataFusion's code, and benchmark with two-heap algorithm. +#[derive(Debug, Default)] +pub struct Median<T> +where + T: Primitive + Ord, +{ + greater: BinaryHeap<Reverse<T>>, + not_greater: BinaryHeap<T>, +} + +impl<T> Median<T> +where + T: Primitive + Ord, +{ + fn push(&mut self, value: T) { + if self.not_greater.is_empty() { + self.not_greater.push(value); + return; + } + // The `unwrap`s below are safe because there are `push`s before them. + if value <= *self.not_greater.peek().unwrap() { + self.not_greater.push(value); + if self.not_greater.len() > self.greater.len() + 1 { + self.greater.push(Reverse(self.not_greater.pop().unwrap())); + } + } else { + self.greater.push(Reverse(value)); + if self.greater.len() > self.not_greater.len() { + self.not_greater.push(self.greater.pop().unwrap().0); + } + } + } +} + +// UDAFs are built using the trait `Accumulator`, that offers DataFusion the necessary functions +// to use them. +impl<T> Accumulator for Median<T> +where + T: Primitive + Ord, + for<'a> T: Scalar<RefType<'a> = T>, +{ + // This function serializes our state to `ScalarValue`, which DataFusion uses to pass this + // state between execution stages. Note that this can be arbitrary data. + // + // The `ScalarValue`s returned here will be passed in as argument `states: &[VectorRef]` to + // `merge_batch` function. + fn state(&self) -> Result<Vec<Value>> { + let nums = self + .greater + .iter() + .map(|x| &x.0) + .chain(self.not_greater.iter()) + .map(|&n| n.into()) + .collect::<Vec<Value>>(); + Ok(vec![Value::List(ListValue::new( + Some(Box::new(nums)), + T::default().into().data_type(), + ))]) + } + + // DataFusion calls this function to update the accumulator's state for a batch of inputs rows. + // It is expected this function to update the accumulator's state. + fn update_batch(&mut self, values: &[VectorRef]) -> Result<()> { + if values.is_empty() { + return Ok(()); + }; + + // This is a unary accumulator, so only one column is provided. + let column = &values[0]; + let column: &<T as Scalar>::VectorType = if column.is_const() { + let column: &ConstantVector = unsafe { VectorHelper::static_cast(column) }; + unsafe { VectorHelper::static_cast(column.inner()) } + } else { + unsafe { VectorHelper::static_cast(column) } + }; + for v in column.iter_data().flatten() { + self.push(v); + } + Ok(()) + } + + // DataFusion executes accumulators in partitions. In some execution stage, DataFusion will + // merge states from other accumulators (returned by `state()` method). + fn merge_batch(&mut self, states: &[VectorRef]) -> Result<()> { + if states.is_empty() { + return Ok(()); + }; + + // The states here are returned by the `state` method. Since we only returned a vector + // with one value in that method, `states[0]` is fine. + let states = &states[0]; + let states = states + .as_any() + .downcast_ref::<ListVector>() + .with_context(|| DowncastVectorSnafu { + err_msg: format!( + "expect ListVector, got vector type {}", + states.vector_type_name() + ), + })?; + for state in states.values_iter() { + let state = state.context(FromScalarValueSnafu)?; + // merging state is simply accumulate stored numbers from others', so just call update + self.update_batch(&[state])? + } + Ok(()) + } + + // DataFusion expects this function to return the final value of this aggregator. + fn evaluate(&self) -> Result<Value> { + if self.not_greater.is_empty() { + assert!( + self.greater.is_empty(), + "not expected in two-heap median algorithm, there must be a bug when implementing it" + ); + return Ok(Value::Null); + } + + // unwrap is safe because we checked not_greater heap's len above + let not_greater = *self.not_greater.peek().unwrap(); + let median = if self.not_greater.len() > self.greater.len() { + not_greater.into() + } else { + // unwrap is safe because greater heap len >= not_greater heap len, which is > 0 + let greater = self.greater.peek().unwrap(); + + // the following three NumCast's `unwrap`s are safe because T is primitive + let not_greater_v: f64 = NumCast::from(not_greater).unwrap(); + let greater_v: f64 = NumCast::from(greater.0).unwrap(); + let median: T = NumCast::from((not_greater_v + greater_v) / 2.0).unwrap(); + median.into() + }; + Ok(median) + } +} + +#[derive(Debug, Default)] +pub struct MedianAccumulatorCreator { + input_types: ArcSwapOption<Vec<ConcreteDataType>>, +} + +impl AggregateFunctionCreator for MedianAccumulatorCreator { + fn creator(&self) -> AccumulatorCreatorFunction { + let creator: AccumulatorCreatorFunction = Arc::new(move |types: &[ConcreteDataType]| { + let input_type = &types[0]; + with_match_ordered_primitive_type_id!( + input_type.logical_type_id(), + |$S| { + Ok(Box::new(Median::<$S>::default())) + }, + { + let err_msg = format!( + "\"MEDIAN\" aggregate function not support data type {:?}", + input_type.logical_type_id(), + ); + CreateAccumulatorSnafu { err_msg }.fail()? + } + ) + }); + creator + } + + fn input_types(&self) -> Result<Vec<ConcreteDataType>> { + let input_types = self.input_types.load(); + if input_types.is_none() { + return Err(datafusion_internal_error()).context(ExecuteFunctionSnafu)?; + } + Ok(input_types.as_ref().unwrap().as_ref().clone()) + } + + fn set_input_types(&self, input_types: Vec<ConcreteDataType>) -> Result<()> { + let old = self.input_types.swap(Some(Arc::new(input_types.clone()))); + if let Some(old) = old { + if old.len() != input_types.len() { + return Err(datafusion_internal_error()).context(ExecuteFunctionSnafu)?; + } + for (x, y) in old.iter().zip(input_types.iter()) { + if x != y { + return Err(datafusion_internal_error()).context(ExecuteFunctionSnafu)?; + } + } + } + Ok(()) + } + + fn output_type(&self) -> Result<ConcreteDataType> { + let input_types = self.input_types()?; + if input_types.len() != 1 { + return Err(datafusion_internal_error()).context(ExecuteFunctionSnafu)?; + } + // unwrap is safe because we have checked input_types len must equals 1 + Ok(input_types.into_iter().next().unwrap()) + } + + fn state_types(&self) -> Result<Vec<ConcreteDataType>> { + Ok(vec![ConcreteDataType::list_datatype(self.output_type()?)]) + } +} + +fn datafusion_internal_error() -> DataFusionError { + DataFusionError::Internal( + "Illegal input_types status, check if DataFusion has changed its UDAF execution logic." + .to_string(), + ) +} + +#[cfg(test)] +mod test { + use datatypes::vectors::PrimitiveVector; + + use super::*; + #[test] + fn test_update_batch() { + // test update empty batch, expect not updating anything + let mut median = Median::<i32>::default(); + assert!(median.update_batch(&[]).is_ok()); + assert!(median.not_greater.is_empty()); + assert!(median.greater.is_empty()); + assert_eq!(Value::Null, median.evaluate().unwrap()); + + // test update one not-null value + let mut median = Median::<i32>::default(); + let v: Vec<VectorRef> = vec![Arc::new(PrimitiveVector::<i32>::from(vec![Some(42)]))]; + assert!(median.update_batch(&v).is_ok()); + assert_eq!(Value::Int32(42), median.evaluate().unwrap()); + + // test update one null value + let mut median = Median::<i32>::default(); + let v: Vec<VectorRef> = vec![Arc::new(PrimitiveVector::<i32>::from(vec![ + Option::<i32>::None, + ]))]; + assert!(median.update_batch(&v).is_ok()); + assert_eq!(Value::Null, median.evaluate().unwrap()); + + // test update no null-value batch + let mut median = Median::<i32>::default(); + let v: Vec<VectorRef> = vec![Arc::new(PrimitiveVector::<i32>::from(vec![ + Some(-1i32), + Some(1), + Some(2), + ]))]; + assert!(median.update_batch(&v).is_ok()); + assert_eq!(Value::Int32(1), median.evaluate().unwrap()); + + // test update null-value batch + let mut median = Median::<i32>::default(); + let v: Vec<VectorRef> = vec![Arc::new(PrimitiveVector::<i32>::from(vec![ + Some(-2i32), + None, + Some(3), + Some(4), + ]))]; + assert!(median.update_batch(&v).is_ok()); + assert_eq!(Value::Int32(3), median.evaluate().unwrap()); + + // test update with constant vector + let mut median = Median::<i32>::default(); + let v: Vec<VectorRef> = vec![Arc::new(ConstantVector::new( + Arc::new(PrimitiveVector::<i32>::from_vec(vec![4])), + 10, + ))]; + assert!(median.update_batch(&v).is_ok()); + assert_eq!(Value::Int32(4), median.evaluate().unwrap()); + } +} diff --git a/src/common/function/src/scalars/aggregate/mod.rs b/src/common/function/src/scalars/aggregate/mod.rs new file mode 100644 index 000000000000..02ea8821941f --- /dev/null +++ b/src/common/function/src/scalars/aggregate/mod.rs @@ -0,0 +1,50 @@ +mod median; + +use std::sync::Arc; + +use common_query::logical_plan::AggregateFunctionCreatorRef; +pub use median::MedianAccumulatorCreator; + +use crate::scalars::FunctionRegistry; + +/// A function creates `AggregateFunctionCreator`. +/// "Aggregator" *is* AggregatorFunction. Since the later one is long, we named an short alias for it. +/// The two names might be used interchangeably. +type AggregatorCreatorFunction = Arc<dyn Fn() -> AggregateFunctionCreatorRef + Send + Sync>; + +/// `AggregateFunctionMeta` dynamically creates AggregateFunctionCreator. +#[derive(Clone)] +pub struct AggregateFunctionMeta { + name: String, + creator: AggregatorCreatorFunction, +} + +pub type AggregateFunctionMetaRef = Arc<AggregateFunctionMeta>; + +impl AggregateFunctionMeta { + pub fn new(name: &str, creator: AggregatorCreatorFunction) -> Self { + Self { + name: name.to_string(), + creator, + } + } + + pub fn name(&self) -> String { + self.name.to_string() + } + + pub fn create(&self) -> AggregateFunctionCreatorRef { + (self.creator)() + } +} + +pub(crate) struct AggregateFunctions; + +impl AggregateFunctions { + pub fn register(registry: &FunctionRegistry) { + registry.register_aggregate_function(Arc::new(AggregateFunctionMeta::new( + "median", + Arc::new(|| Arc::new(MedianAccumulatorCreator::default())), + ))); + } +} diff --git a/src/common/function/src/scalars/function_registry.rs b/src/common/function/src/scalars/function_registry.rs index 7e9db97609ae..0de2935592f9 100644 --- a/src/common/function/src/scalars/function_registry.rs +++ b/src/common/function/src/scalars/function_registry.rs @@ -5,6 +5,7 @@ use std::sync::RwLock; use once_cell::sync::Lazy; +use crate::scalars::aggregate::{AggregateFunctionMetaRef, AggregateFunctions}; use crate::scalars::function::FunctionRef; use crate::scalars::math::MathFunction; use crate::scalars::numpy::NumpyFunction; @@ -12,6 +13,7 @@ use crate::scalars::numpy::NumpyFunction; #[derive(Default)] pub struct FunctionRegistry { functions: RwLock<HashMap<String, FunctionRef>>, + aggregate_functions: RwLock<HashMap<String, AggregateFunctionMetaRef>>, } impl FunctionRegistry { @@ -22,6 +24,13 @@ impl FunctionRegistry { .insert(func.name().to_string(), func); } + pub fn register_aggregate_function(&self, func: AggregateFunctionMetaRef) { + self.aggregate_functions + .write() + .unwrap() + .insert(func.name(), func); + } + pub fn get_function(&self, name: &str) -> Option<FunctionRef> { self.functions.read().unwrap().get(name).cloned() } @@ -29,6 +38,15 @@ impl FunctionRegistry { pub fn functions(&self) -> Vec<FunctionRef> { self.functions.read().unwrap().values().cloned().collect() } + + pub fn aggregate_functions(&self) -> Vec<AggregateFunctionMetaRef> { + self.aggregate_functions + .read() + .unwrap() + .values() + .cloned() + .collect() + } } pub static FUNCTION_REGISTRY: Lazy<Arc<FunctionRegistry>> = Lazy::new(|| { @@ -37,6 +55,8 @@ pub static FUNCTION_REGISTRY: Lazy<Arc<FunctionRegistry>> = Lazy::new(|| { MathFunction::register(&function_registry); NumpyFunction::register(&function_registry); + AggregateFunctions::register(&function_registry); + Arc::new(function_registry) }); diff --git a/src/common/query/Cargo.toml b/src/common/query/Cargo.toml index 99a5912c9337..7623c1a332b5 100644 --- a/src/common/query/Cargo.toml +++ b/src/common/query/Cargo.toml @@ -13,4 +13,7 @@ datafusion = { git = "https://github.com/apache/arrow-datafusion.git" , branch = datafusion-common = { git = "https://github.com/apache/arrow-datafusion.git" , branch = "arrow2"} datafusion-expr = { git = "https://github.com/apache/arrow-datafusion.git" , branch = "arrow2"} datatypes = { path = "../../datatypes"} -snafu = { version = "0.7", features = ["backtraces"] } \ No newline at end of file +snafu = { version = "0.7", features = ["backtraces"] } + +[dev-dependencies] +tokio = { version = "1.0", features = ["full"] } diff --git a/src/common/query/src/error.rs b/src/common/query/src/error.rs index b89b897c0f11..fc418546effe 100644 --- a/src/common/query/src/error.rs +++ b/src/common/query/src/error.rs @@ -28,6 +28,12 @@ pub enum InnerError { source: DataTypeError, data_type: ArrowDatatype, }, + + #[snafu(display("Failed to create accumulator: {}", err_msg))] + CreateAccumulator { err_msg: String }, + + #[snafu(display("Failed to downcast vector: {}", err_msg))] + DowncastVector { err_msg: String }, } pub type Result<T> = std::result::Result<T, Error>; @@ -35,7 +41,9 @@ pub type Result<T> = std::result::Result<T, Error>; impl ErrorExt for InnerError { fn status_code(&self) -> StatusCode { match self { - InnerError::ExecuteFunction { .. } => StatusCode::EngineExecuteQuery, + InnerError::ExecuteFunction { .. } + | InnerError::CreateAccumulator { .. } + | InnerError::DowncastVector { .. } => StatusCode::EngineExecuteQuery, InnerError::IntoVector { source, .. } => source.status_code(), InnerError::FromScalarValue { source } => source.status_code(), } diff --git a/src/common/query/src/function.rs b/src/common/query/src/function.rs index 5de28324be7f..7f123a3fdf01 100644 --- a/src/common/query/src/function.rs +++ b/src/common/query/src/function.rs @@ -1,10 +1,13 @@ use std::sync::Arc; -use datatypes::prelude::ConcreteDataType; +use arrow::datatypes::DataType as ArrowDataType; +use datafusion_expr::ReturnTypeFunction as DfReturnTypeFunction; +use datatypes::prelude::{ConcreteDataType, DataType}; use datatypes::vectors::VectorRef; use snafu::ResultExt; use crate::error::{ExecuteFunctionSnafu, Result}; +use crate::logical_plan::Accumulator; use crate::prelude::{ColumnarValue, ScalarValue}; /// Scalar function @@ -22,6 +25,13 @@ pub type ScalarFunctionImplementation = pub type ReturnTypeFunction = Arc<dyn Fn(&[ConcreteDataType]) -> Result<Arc<ConcreteDataType>> + Send + Sync>; +/// Accumulator creator that will be used by DataFusion +pub type AccumulatorFunctionImpl = Arc<dyn Fn() -> Result<Box<dyn Accumulator>> + Send + Sync>; + +/// Create Accumulator with the data type of input columns. +pub type AccumulatorCreatorFunction = + Arc<dyn Fn(&[ConcreteDataType]) -> Result<Box<dyn Accumulator>> + Sync + Send>; + /// This signature corresponds to which types an aggregator serializes /// its state, given its return datatype. pub type StateTypeFunction = @@ -69,6 +79,25 @@ where }) } +pub fn to_df_return_type(func: ReturnTypeFunction) -> DfReturnTypeFunction { + let df_func = move |data_types: &[ArrowDataType]| { + // DataFusion DataType -> ConcreteDataType + let concrete_data_types = data_types + .iter() + .map(ConcreteDataType::from_arrow_type) + .collect::<Vec<_>>(); + + // evaluate ConcreteDataType + let eval_result = (func)(&concrete_data_types); + + // ConcreteDataType -> DataFusion DataType + eval_result + .map(|t| Arc::new(t.as_arrow_type())) + .map_err(|e| e.into()) + }; + Arc::new(df_func) +} + #[cfg(test)] mod tests { use std::sync::Arc; diff --git a/src/common/query/src/logical_plan/accumulator.rs b/src/common/query/src/logical_plan/accumulator.rs new file mode 100644 index 000000000000..e7a0eeb307a8 --- /dev/null +++ b/src/common/query/src/logical_plan/accumulator.rs @@ -0,0 +1,125 @@ +//! Accumulator module contains the trait definition for aggregation function's accumulators. + +use std::fmt::Debug; +use std::sync::Arc; + +use arrow::array::ArrayRef; +use datafusion_common::Result as DfResult; +use datafusion_expr::Accumulator as DfAccumulator; +use datatypes::prelude::*; +use datatypes::vectors::Helper as VectorHelper; +use datatypes::vectors::VectorRef; +use snafu::ResultExt; + +use crate::error::{Error, FromScalarValueSnafu, IntoVectorSnafu, Result}; +use crate::prelude::*; + +pub type AggregateFunctionCreatorRef = Arc<dyn AggregateFunctionCreator>; + +/// An accumulator represents a stateful object that lives throughout the evaluation of multiple rows and +/// generically accumulates values. +/// +/// An accumulator knows how to: +/// * update its state from inputs via `update_batch` +/// * convert its internal state to a vector of scalar values +/// * update its state from multiple accumulators' states via `merge_batch` +/// * compute the final value from its internal state via `evaluate` +/// +/// Modified from DataFusion. +pub trait Accumulator: Send + Sync + Debug { + /// Returns the state of the accumulator at the end of the accumulation. + // in the case of an average on which we track `sum` and `n`, this function should return a vector + // of two values, sum and n. + fn state(&self) -> Result<Vec<Value>>; + + /// updates the accumulator's state from a vector of arrays. + fn update_batch(&mut self, values: &[VectorRef]) -> Result<()>; + + /// updates the accumulator's state from a vector of states. + fn merge_batch(&mut self, states: &[VectorRef]) -> Result<()>; + + /// returns its value based on its current state. + fn evaluate(&self) -> Result<Value>; +} + +/// An `AggregateFunctionCreator` dynamically creates `Accumulator`. +/// DataFusion does not provide the input data's types when creating Accumulator, we have to stores +/// it somewhere else ourself. So an `AggregateFunctionCreator` often has a companion struct, that +/// can store the input data types, and knows the output and states types of an Accumulator. +/// That's how we create the Accumulator generically. +pub trait AggregateFunctionCreator: Send + Sync + Debug { + /// Create a function that can create a new accumulator with some input data type. + fn creator(&self) -> AccumulatorCreatorFunction; + + /// Get the input data type of the Accumulator. + fn input_types(&self) -> Result<Vec<ConcreteDataType>>; + + /// Store the input data type that is provided by DataFusion at runtime. + fn set_input_types(&self, input_types: Vec<ConcreteDataType>) -> Result<()>; + + /// Get the Accumulator's output data type. + fn output_type(&self) -> Result<ConcreteDataType>; + + /// Get the Accumulator's state data types. + fn state_types(&self) -> Result<Vec<ConcreteDataType>>; +} + +pub fn make_accumulator_function( + creator: Arc<dyn AggregateFunctionCreator>, +) -> AccumulatorFunctionImpl { + Arc::new(move || { + let input_types = creator.input_types()?; + let creator = creator.creator(); + creator(&input_types) + }) +} + +pub fn make_return_function(creator: Arc<dyn AggregateFunctionCreator>) -> ReturnTypeFunction { + Arc::new(move |input_types| { + creator.set_input_types(input_types.to_vec())?; + + let output_type = creator.output_type()?; + Ok(Arc::new(output_type)) + }) +} + +pub fn make_state_function(creator: Arc<dyn AggregateFunctionCreator>) -> StateTypeFunction { + Arc::new(move |_| Ok(Arc::new(creator.state_types()?))) +} + +/// A wrapper newtype for our Accumulator to DataFusion's Accumulator, +/// so to make our Accumulator able to be executed by DataFusion query engine. +#[derive(Debug)] +pub struct DfAccumulatorAdaptor(pub Box<dyn Accumulator>); + +impl DfAccumulator for DfAccumulatorAdaptor { + fn state(&self) -> DfResult<Vec<ScalarValue>> { + let state = self.0.state()?; + Ok(state.into_iter().map(ScalarValue::from).collect()) + } + + fn update_batch(&mut self, values: &[ArrayRef]) -> DfResult<()> { + let vectors = VectorHelper::try_into_vectors(values) + .context(FromScalarValueSnafu) + .map_err(Error::from)?; + self.0.update_batch(&vectors).map_err(|e| e.into()) + } + + fn merge_batch(&mut self, states: &[ArrayRef]) -> DfResult<()> { + let mut vectors = Vec::with_capacity(states.len()); + for array in states.iter() { + vectors.push( + VectorHelper::try_into_vector(array) + .context(IntoVectorSnafu { + data_type: array.data_type().clone(), + }) + .map_err(Error::from)?, + ); + } + self.0.merge_batch(&vectors).map_err(|e| e.into()) + } + + fn evaluate(&self) -> DfResult<ScalarValue> { + Ok(ScalarValue::from(self.0.evaluate()?)) + } +} diff --git a/src/common/query/src/logical_plan/mod.rs b/src/common/query/src/logical_plan/mod.rs index 81a44b327772..ffc0f5f317c4 100644 --- a/src/common/query/src/logical_plan/mod.rs +++ b/src/common/query/src/logical_plan/mod.rs @@ -1,13 +1,18 @@ +mod accumulator; mod expr; +mod udaf; mod udf; use std::sync::Arc; use datatypes::prelude::ConcreteDataType; +pub use self::accumulator::{Accumulator, AggregateFunctionCreator, AggregateFunctionCreatorRef}; pub use self::expr::Expr; +pub use self::udaf::AggregateFunction; pub use self::udf::ScalarUdf; use crate::function::{ReturnTypeFunction, ScalarFunctionImplementation}; +use crate::logical_plan::accumulator::*; use crate::signature::{Signature, Volatility}; /// Creates a new UDF with a specific signature and specific return type. @@ -31,6 +36,22 @@ pub fn create_udf( ) } +pub fn create_aggregate_function( + name: String, + creator: Arc<dyn AggregateFunctionCreator>, +) -> AggregateFunction { + let return_type = make_return_function(creator.clone()); + let accumulator = make_accumulator_function(creator.clone()); + let state_type = make_state_function(creator.clone()); + AggregateFunction::new( + name, + Signature::any(1, Volatility::Immutable), + return_type, + accumulator, + state_type, + ) +} + #[cfg(test)] mod tests { use std::sync::Arc; @@ -40,13 +61,13 @@ mod tests { use datafusion_expr::ColumnarValue as DfColumnarValue; use datafusion_expr::ScalarUDF as DfScalarUDF; use datafusion_expr::TypeSignature as DfTypeSignature; - use datatypes::prelude::ScalarVector; + use datatypes::prelude::*; use datatypes::vectors::BooleanVector; use datatypes::vectors::VectorRef; use super::*; use crate::error::Result; - use crate::function::make_scalar_function; + use crate::function::{make_scalar_function, AccumulatorCreatorFunction}; use crate::prelude::ScalarValue; use crate::signature::TypeSignature; @@ -129,4 +150,76 @@ mod tests { _ => unreachable!(), } } + + #[derive(Debug)] + struct DummyAccumulator; + + impl Accumulator for DummyAccumulator { + fn state(&self) -> Result<Vec<Value>> { + Ok(vec![]) + } + + fn update_batch(&mut self, _values: &[VectorRef]) -> Result<()> { + Ok(()) + } + + fn merge_batch(&mut self, _states: &[VectorRef]) -> Result<()> { + Ok(()) + } + + fn evaluate(&self) -> Result<Value> { + Ok(Value::Int32(0)) + } + } + + #[derive(Debug)] + struct DummyAccumulatorCreator; + + impl AggregateFunctionCreator for DummyAccumulatorCreator { + fn creator(&self) -> AccumulatorCreatorFunction { + Arc::new(|_| Ok(Box::new(DummyAccumulator))) + } + + fn input_types(&self) -> Result<Vec<ConcreteDataType>> { + Ok(vec![ConcreteDataType::float64_datatype()]) + } + + fn set_input_types(&self, _: Vec<ConcreteDataType>) -> Result<()> { + Ok(()) + } + + fn output_type(&self) -> Result<ConcreteDataType> { + Ok(self.input_types()?.into_iter().next().unwrap()) + } + + fn state_types(&self) -> Result<Vec<ConcreteDataType>> { + Ok(vec![ + ConcreteDataType::float64_datatype(), + ConcreteDataType::uint32_datatype(), + ]) + } + } + + #[test] + fn test_create_udaf() { + let creator = DummyAccumulatorCreator; + let udaf = create_aggregate_function("dummy".to_string(), Arc::new(creator)); + assert_eq!("dummy", udaf.name); + + let signature = udaf.signature; + assert_eq!(TypeSignature::Any(1), signature.type_signature); + assert_eq!(Volatility::Immutable, signature.volatility); + + assert_eq!( + Arc::new(ConcreteDataType::float64_datatype()), + (udaf.return_type)(&[ConcreteDataType::float64_datatype()]).unwrap() + ); + assert_eq!( + Arc::new(vec![ + ConcreteDataType::float64_datatype(), + ConcreteDataType::uint32_datatype(), + ]), + (udaf.state_type)(&ConcreteDataType::float64_datatype()).unwrap() + ); + } } diff --git a/src/common/query/src/logical_plan/udaf.rs b/src/common/query/src/logical_plan/udaf.rs new file mode 100644 index 000000000000..ff629a71a8c3 --- /dev/null +++ b/src/common/query/src/logical_plan/udaf.rs @@ -0,0 +1,104 @@ +//! Udaf module contains functions and structs supporting user-defined aggregate functions. +//! +//! Modified from DataFusion. + +use std::fmt::{self, Debug, Formatter}; +use std::sync::Arc; + +use arrow::datatypes::DataType as ArrowDataType; +use datafusion_expr::AccumulatorFunctionImplementation as DfAccumulatorFunctionImplementation; +use datafusion_expr::AggregateUDF as DfAggregateUdf; +use datafusion_expr::StateTypeFunction as DfStateTypeFunction; +use datatypes::prelude::*; + +use crate::function::{ + to_df_return_type, AccumulatorFunctionImpl, ReturnTypeFunction, StateTypeFunction, +}; +use crate::logical_plan::accumulator::DfAccumulatorAdaptor; +use crate::signature::Signature; + +/// Logical representation of a user-defined aggregate function (UDAF) +/// A UDAF is different from a UDF in that it is stateful across batches. +#[derive(Clone)] +pub struct AggregateFunction { + /// name + pub name: String, + /// signature + pub signature: Signature, + /// Return type + pub return_type: ReturnTypeFunction, + /// actual implementation + pub accumulator: AccumulatorFunctionImpl, + /// the accumulator's state's description as a function of the return type + pub state_type: StateTypeFunction, +} + +impl Debug for AggregateFunction { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + f.debug_struct("AggregateUDF") + .field("name", &self.name) + .field("signature", &self.signature) + .field("fun", &"<FUNC>") + .finish() + } +} + +impl PartialEq for AggregateFunction { + fn eq(&self, other: &Self) -> bool { + self.name == other.name && self.signature == other.signature + } +} + +impl AggregateFunction { + /// Create a new AggregateUDF + pub fn new( + name: String, + signature: Signature, + return_type: ReturnTypeFunction, + accumulator: AccumulatorFunctionImpl, + state_type: StateTypeFunction, + ) -> Self { + Self { + name, + signature, + return_type, + accumulator, + state_type, + } + } +} + +impl From<AggregateFunction> for DfAggregateUdf { + fn from(udaf: AggregateFunction) -> Self { + DfAggregateUdf::new( + &udaf.name, + &udaf.signature.into(), + &to_df_return_type(udaf.return_type), + &to_df_accumulator_func(udaf.accumulator), + &to_df_state_type(udaf.state_type), + ) + } +} + +fn to_df_accumulator_func(func: AccumulatorFunctionImpl) -> DfAccumulatorFunctionImplementation { + Arc::new(move || { + let acc = func()?; + Ok(Box::new(DfAccumulatorAdaptor(acc))) + }) +} + +fn to_df_state_type(func: StateTypeFunction) -> DfStateTypeFunction { + let df_func = move |data_type: &ArrowDataType| { + // DataFusion DataType -> ConcreteDataType + let concrete_data_type = ConcreteDataType::from_arrow_type(data_type); + + // evaluate ConcreteDataType + let eval_result = (func)(&concrete_data_type); + + // ConcreteDataType -> DataFusion DataType + eval_result + .map(|ts| Arc::new(ts.iter().map(|t| t.as_arrow_type()).collect())) + .map_err(|e| e.into()) + }; + Arc::new(df_func) +} diff --git a/src/common/query/src/logical_plan/udf.rs b/src/common/query/src/logical_plan/udf.rs index 3f5286f78579..9ba40f69a357 100644 --- a/src/common/query/src/logical_plan/udf.rs +++ b/src/common/query/src/logical_plan/udf.rs @@ -5,15 +5,14 @@ use std::fmt::Debug; use std::fmt::Formatter; use std::sync::Arc; -use arrow::datatypes::DataType as ArrowDataType; use datafusion_expr::{ - ColumnarValue as DfColumnarValue, ReturnTypeFunction as DfReturnTypeFunction, + ColumnarValue as DfColumnarValue, ScalarFunctionImplementation as DfScalarFunctionImplementation, ScalarUDF as DfScalarUDF, }; -use datatypes::prelude::{ConcreteDataType, DataType}; use crate::error::Result; use crate::function::{ReturnTypeFunction, ScalarFunctionImplementation}; +use crate::prelude::to_df_return_type; use crate::signature::Signature; /// Logical representation of a UDF. @@ -60,27 +59,12 @@ impl ScalarUdf { DfScalarUDF::new( &self.name, &self.signature.into(), - &to_df_returntype(self.return_type), + &to_df_return_type(self.return_type), &to_df_scalar_func(self.fun), ) } } -fn to_df_returntype(fun: ReturnTypeFunction) -> DfReturnTypeFunction { - Arc::new(move |data_types: &[ArrowDataType]| { - let concret_types = data_types - .iter() - .map(ConcreteDataType::from_arrow_type) - .collect::<Vec<ConcreteDataType>>(); - - let result = (fun)(&concret_types); - - result - .map(|t| Arc::new(t.as_arrow_type())) - .map_err(|e| e.into()) - }) -} - fn to_df_scalar_func(fun: ScalarFunctionImplementation) -> DfScalarFunctionImplementation { Arc::new(move |args: &[DfColumnarValue]| { let args: Result<Vec<_>> = args.iter().map(TryFrom::try_from).collect(); diff --git a/src/common/query/src/prelude.rs b/src/common/query/src/prelude.rs index c537b7cee498..709457e393e0 100644 --- a/src/common/query/src/prelude.rs +++ b/src/common/query/src/prelude.rs @@ -3,6 +3,7 @@ pub use datafusion_common::ScalarValue; pub use crate::columnar_value::ColumnarValue; pub use crate::function::*; pub use crate::logical_plan::create_udf; +pub use crate::logical_plan::AggregateFunction; pub use crate::logical_plan::Expr; pub use crate::logical_plan::ScalarUdf; pub use crate::signature::{Signature, TypeSignature, Volatility}; diff --git a/src/datatypes/Cargo.toml b/src/datatypes/Cargo.toml index 7b6ebddd9be2..b08d59c81550 100644 --- a/src/datatypes/Cargo.toml +++ b/src/datatypes/Cargo.toml @@ -15,6 +15,8 @@ datafusion-common = { git = "https://github.com/apache/arrow-datafusion.git" , b enum_dispatch = "0.3" ordered-float = "3.0" paste = "1.0" +num = "0.4" +num-traits = "0.2" serde = { version = "1.0.136", features = ["derive"] } serde_json = "1.0" snafu = { version = "0.7", features = ["backtraces"] } diff --git a/src/datatypes/src/data_type.rs b/src/datatypes/src/data_type.rs index c814d7044841..249875e472d5 100644 --- a/src/datatypes/src/data_type.rs +++ b/src/datatypes/src/data_type.rs @@ -7,7 +7,7 @@ use crate::error::{self, Error, Result}; use crate::type_id::LogicalTypeId; use crate::types::{ BinaryType, BooleanType, Float32Type, Float64Type, Int16Type, Int32Type, Int64Type, Int8Type, - NullType, StringType, UInt16Type, UInt32Type, UInt64Type, UInt8Type, + ListType, NullType, StringType, UInt16Type, UInt32Type, UInt64Type, UInt8Type, }; use crate::value::Value; @@ -32,6 +32,8 @@ pub enum ConcreteDataType { // String types Binary(BinaryType), String(StringType), + + List(ListType), } impl ConcreteDataType { @@ -113,6 +115,9 @@ impl TryFrom<&ArrowDataType> for ConcreteDataType { ArrowDataType::Float64 => Self::float64_datatype(), ArrowDataType::Binary | ArrowDataType::LargeBinary => Self::binary_datatype(), ArrowDataType::Utf8 | ArrowDataType::LargeUtf8 => Self::string_datatype(), + ArrowDataType::List(field) => Self::List(ListType::new( + ConcreteDataType::from_arrow_type(&field.data_type), + )), _ => { return error::UnsupportedArrowTypeSnafu { arrow_type: dt.clone(), @@ -144,6 +149,12 @@ impl_new_concrete_type_functions!( Binary, String ); +impl ConcreteDataType { + pub fn list_datatype(inner_type: ConcreteDataType) -> ConcreteDataType { + ConcreteDataType::List(ListType::new(inner_type)) + } +} + /// Data type abstraction. #[enum_dispatch::enum_dispatch] pub trait DataType: std::fmt::Debug + Send + Sync { @@ -164,6 +175,8 @@ pub type DataTypeRef = Arc<dyn DataType>; #[cfg(test)] mod tests { + use arrow::datatypes::Field; + use super::*; #[test] @@ -242,5 +255,13 @@ mod tests { ConcreteDataType::from_arrow_type(&ArrowDataType::LargeUtf8), ConcreteDataType::String(_) )); + assert_eq!( + ConcreteDataType::from_arrow_type(&ArrowDataType::List(Box::new(Field::new( + "item", + ArrowDataType::Int32, + true + )))), + ConcreteDataType::List(ListType::new(ConcreteDataType::int32_datatype())) + ); } } diff --git a/src/datatypes/src/macros.rs b/src/datatypes/src/macros.rs index 0f309b630a3c..d1b8d4db77d8 100644 --- a/src/datatypes/src/macros.rs +++ b/src/datatypes/src/macros.rs @@ -38,6 +38,23 @@ macro_rules! for_all_primitive_types{ }; } +#[macro_export] +macro_rules! for_all_ordered_primitive_types { + ($macro:tt $(, $x:tt)*) => { + $macro! { + [$($x),*], + { i8 }, + { i16 }, + { i32 }, + { i64 }, + { u8 }, + { u16 }, + { u32 }, + { u64 } + } + }; +} + #[macro_export] macro_rules! with_match_primitive_type_id { ($key_type:expr, | $_:tt $T:ident | $body:tt, $nbody:tt) => {{ @@ -63,3 +80,27 @@ macro_rules! with_match_primitive_type_id { } }}; } + +#[macro_export] +macro_rules! with_match_ordered_primitive_type_id { + ($key_type:expr, | $_:tt $T:ident | $body:tt, $nbody:tt) => {{ + macro_rules! __with_ty__ { + ( $_ $T:ident ) => { + $body + }; + } + + match $key_type { + LogicalTypeId::Int8 => __with_ty__! { i8 }, + LogicalTypeId::Int16 => __with_ty__! { i16 }, + LogicalTypeId::Int32 => __with_ty__! { i32 }, + LogicalTypeId::Int64 => __with_ty__! { i64 }, + LogicalTypeId::UInt8 => __with_ty__! { u8 }, + LogicalTypeId::UInt16 => __with_ty__! { u16 }, + LogicalTypeId::UInt32 => __with_ty__! { u32 }, + LogicalTypeId::UInt64 => __with_ty__! { u64 }, + + _ => $nbody, + } + }}; +} diff --git a/src/datatypes/src/prelude.rs b/src/datatypes/src/prelude.rs index 527ea967ef6d..88e1505fb9b3 100644 --- a/src/datatypes/src/prelude.rs +++ b/src/datatypes/src/prelude.rs @@ -2,6 +2,7 @@ pub use crate::data_type::{ConcreteDataType, DataType, DataTypeRef}; pub use crate::macros::*; pub use crate::scalars::{Scalar, ScalarRef, ScalarVector, ScalarVectorBuilder}; pub use crate::type_id::LogicalTypeId; +pub use crate::types::Primitive; pub use crate::value::Value; pub use crate::vectors::{ Helper as VectorHelper, MutableVector, Validity, Vector, VectorBuilder, VectorRef, diff --git a/src/datatypes/src/type_id.rs b/src/datatypes/src/type_id.rs index 629604c31f6d..4372afe1bbee 100644 --- a/src/datatypes/src/type_id.rs +++ b/src/datatypes/src/type_id.rs @@ -29,6 +29,8 @@ pub enum LogicalTypeId { /// Datetime representing the elapsed time since UNIX epoch (1970-01-01) in /// seconds/milliseconds/microseconds/nanoseconds, determined by precision. DateTime, + + List, } impl LogicalTypeId { @@ -50,7 +52,7 @@ impl LogicalTypeId { LogicalTypeId::Float64 => ConcreteDataType::float64_datatype(), LogicalTypeId::String => ConcreteDataType::string_datatype(), LogicalTypeId::Binary => ConcreteDataType::binary_datatype(), - LogicalTypeId::Date | LogicalTypeId::DateTime => { + LogicalTypeId::Date | LogicalTypeId::DateTime | LogicalTypeId::List => { unimplemented!("Data type for {:?} is unimplemented", self) } } diff --git a/src/datatypes/src/types.rs b/src/datatypes/src/types.rs index 0255734dc369..efa32244233b 100644 --- a/src/datatypes/src/types.rs +++ b/src/datatypes/src/types.rs @@ -1,5 +1,6 @@ mod binary_type; mod boolean_type; +mod list_type; mod null_type; mod primitive_traits; mod primitive_type; @@ -7,6 +8,7 @@ mod string_type; pub use binary_type::BinaryType; pub use boolean_type::BooleanType; +pub use list_type::ListType; pub use null_type::NullType; pub use primitive_traits::Primitive; pub use primitive_type::{ diff --git a/src/datatypes/src/types/list_type.rs b/src/datatypes/src/types/list_type.rs new file mode 100644 index 000000000000..a93352a814b3 --- /dev/null +++ b/src/datatypes/src/types/list_type.rs @@ -0,0 +1,65 @@ +use arrow::datatypes::{DataType as ArrowDataType, Field}; + +use crate::prelude::*; +use crate::value::ListValue; + +/// Used to represent the List datatype. +#[derive(Debug, Clone, PartialEq)] +pub struct ListType { + /// The type of List's inner data. + inner: Box<ConcreteDataType>, +} + +impl Default for ListType { + fn default() -> Self { + ListType::new(ConcreteDataType::null_datatype()) + } +} + +impl ListType { + pub fn new(datatype: ConcreteDataType) -> Self { + ListType { + inner: Box::new(datatype), + } + } +} + +impl DataType for ListType { + fn name(&self) -> &str { + "List" + } + + fn logical_type_id(&self) -> LogicalTypeId { + LogicalTypeId::List + } + + fn default_value(&self) -> Value { + Value::List(ListValue::new(None, *self.inner.clone())) + } + + fn as_arrow_type(&self) -> ArrowDataType { + let field = Box::new(Field::new("item", self.inner.as_arrow_type(), true)); + ArrowDataType::List(field) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::value::ListValue; + + #[test] + fn test_list_type() { + let t = ListType::new(ConcreteDataType::boolean_datatype()); + assert_eq!("List", t.name()); + assert_eq!(LogicalTypeId::List, t.logical_type_id()); + assert_eq!( + Value::List(ListValue::new(None, ConcreteDataType::boolean_datatype())), + t.default_value() + ); + assert_eq!( + ArrowDataType::List(Box::new(Field::new("item", ArrowDataType::Boolean, true))), + t.as_arrow_type() + ); + } +} diff --git a/src/datatypes/src/types/primitive_traits.rs b/src/datatypes/src/types/primitive_traits.rs index 3394c5d9386e..cfd11f3d066f 100644 --- a/src/datatypes/src/types/primitive_traits.rs +++ b/src/datatypes/src/types/primitive_traits.rs @@ -1,10 +1,22 @@ +use arrow::compute::arithmetics::basic::NativeArithmetics; use arrow::types::NativeType; +use num::NumCast; +use crate::prelude::Scalar; use crate::value::Value; /// Primitive type. pub trait Primitive: - PartialOrd + Default + Clone + Copy + Into<Value> + NativeType + serde::Serialize + PartialOrd + + Default + + Clone + + Copy + + Into<Value> + + NativeType + + serde::Serialize + + NativeArithmetics + + NumCast + + Scalar { /// Largest numeric type this primitive type can be cast to. type LargestType: Primitive; diff --git a/src/datatypes/src/value.rs b/src/datatypes/src/value.rs index 8fec708cf882..3066f7460a74 100644 --- a/src/datatypes/src/value.rs +++ b/src/datatypes/src/value.rs @@ -1,8 +1,11 @@ +use std::cmp::Ordering; + use common_base::bytes::{Bytes, StringBytes}; +use datafusion_common::ScalarValue; pub use ordered_float::OrderedFloat; use serde::{Serialize, Serializer}; -use crate::data_type::ConcreteDataType; +use crate::prelude::*; pub type OrderedF32 = OrderedFloat<f32>; pub type OrderedF64 = OrderedFloat<f64>; @@ -36,6 +39,8 @@ pub enum Value { // Date & Time types: Date(i32), DateTime(i64), + + List(ListValue), } impl Value { @@ -59,7 +64,7 @@ impl Value { Value::Float64(_) => ConcreteDataType::float64_datatype(), Value::String(_) => ConcreteDataType::string_datatype(), Value::Binary(_) => ConcreteDataType::binary_datatype(), - Value::Date(_) | Value::DateTime(_) => { + Value::Date(_) | Value::DateTime(_) | Value::List(_) => { unimplemented!("Unsupported data type of value {:?}", self) } } @@ -149,10 +154,73 @@ impl Serialize for Value { Value::Binary(bytes) => bytes.serialize(serializer), Value::Date(v) => v.serialize(serializer), Value::DateTime(v) => v.serialize(serializer), + Value::List(_) => unimplemented!(), + } + } +} + +impl From<Value> for ScalarValue { + fn from(value: Value) -> Self { + match value { + Value::Boolean(v) => ScalarValue::Boolean(Some(v)), + Value::UInt8(v) => ScalarValue::UInt8(Some(v)), + Value::UInt16(v) => ScalarValue::UInt16(Some(v)), + Value::UInt32(v) => ScalarValue::UInt32(Some(v)), + Value::UInt64(v) => ScalarValue::UInt64(Some(v)), + Value::Int8(v) => ScalarValue::Int8(Some(v)), + Value::Int16(v) => ScalarValue::Int16(Some(v)), + Value::Int32(v) => ScalarValue::Int32(Some(v)), + Value::Int64(v) => ScalarValue::Int64(Some(v)), + Value::Float32(v) => ScalarValue::Float32(Some(v.0)), + Value::Float64(v) => ScalarValue::Float64(Some(v.0)), + Value::String(v) => ScalarValue::LargeUtf8(Some(v.as_utf8().to_string())), + Value::Binary(v) => ScalarValue::LargeBinary(Some(v.to_vec())), + Value::Date(v) => ScalarValue::Date32(Some(v)), + Value::DateTime(v) => ScalarValue::Date64(Some(v)), + Value::Null => ScalarValue::Boolean(None), + Value::List(v) => ScalarValue::List( + v.items + .map(|vs| Box::new(vs.into_iter().map(ScalarValue::from).collect())), + Box::new(v.datatype.as_arrow_type()), + ), } } } +#[derive(Debug, Clone, PartialEq)] +pub struct ListValue { + /// List of nested Values (boxed to reduce size_of(Value)) + #[allow(clippy::box_collection)] + items: Option<Box<Vec<Value>>>, + /// Inner values datatype, to distinguish empty lists of different datatypes. + /// Restricted by DataFusion, cannot use null datatype for empty list. + datatype: ConcreteDataType, +} + +impl Eq for ListValue {} + +impl ListValue { + pub fn new(items: Option<Box<Vec<Value>>>, datatype: ConcreteDataType) -> Self { + Self { items, datatype } + } +} + +impl PartialOrd for ListValue { + fn partial_cmp(&self, other: &Self) -> Option<Ordering> { + Some(self.cmp(other)) + } +} + +impl Ord for ListValue { + fn cmp(&self, other: &Self) -> Ordering { + assert_eq!( + self.datatype, other.datatype, + "Cannot compare different datatypes!" + ); + self.items.cmp(&other.items) + } +} + #[cfg(test)] mod tests { use super::*; @@ -297,4 +365,76 @@ mod tests { let world: &[u8] = b"world"; assert_eq!(Value::Binary(Bytes::from(world)), Value::from(world)); } + + #[test] + fn test_value_into_scalar_value() { + assert_eq!( + ScalarValue::Boolean(Some(true)), + Value::Boolean(true).into() + ); + assert_eq!( + ScalarValue::Boolean(Some(true)), + Value::Boolean(true).into() + ); + + assert_eq!( + ScalarValue::UInt8(Some(u8::MIN + 1)), + Value::UInt8(u8::MIN + 1).into() + ); + assert_eq!( + ScalarValue::UInt16(Some(u16::MIN + 2)), + Value::UInt16(u16::MIN + 2).into() + ); + assert_eq!( + ScalarValue::UInt32(Some(u32::MIN + 3)), + Value::UInt32(u32::MIN + 3).into() + ); + assert_eq!( + ScalarValue::UInt64(Some(u64::MIN + 4)), + Value::UInt64(u64::MIN + 4).into() + ); + + assert_eq!( + ScalarValue::Int8(Some(i8::MIN + 4)), + Value::Int8(i8::MIN + 4).into() + ); + assert_eq!( + ScalarValue::Int16(Some(i16::MIN + 5)), + Value::Int16(i16::MIN + 5).into() + ); + assert_eq!( + ScalarValue::Int32(Some(i32::MIN + 6)), + Value::Int32(i32::MIN + 6).into() + ); + assert_eq!( + ScalarValue::Int64(Some(i64::MIN + 7)), + Value::Int64(i64::MIN + 7).into() + ); + + assert_eq!( + ScalarValue::Float32(Some(8.0f32)), + Value::Float32(OrderedFloat(8.0f32)).into() + ); + assert_eq!( + ScalarValue::Float64(Some(9.0f64)), + Value::Float64(OrderedFloat(9.0f64)).into() + ); + + assert_eq!( + ScalarValue::LargeUtf8(Some("hello".to_string())), + Value::String(StringBytes::from("hello")).into() + ); + assert_eq!( + ScalarValue::LargeBinary(Some("world".as_bytes().to_vec())), + Value::Binary(Bytes::from("world".as_bytes())).into() + ); + + assert_eq!(ScalarValue::Date32(Some(10i32)), Value::Date(10i32).into()); + assert_eq!( + ScalarValue::Date64(Some(20i64)), + Value::DateTime(20i64).into() + ); + + assert_eq!(ScalarValue::Boolean(None), Value::Null.into()); + } } diff --git a/src/datatypes/src/vectors.rs b/src/datatypes/src/vectors.rs index dd79dc4f0fd7..29f7410b2ac2 100644 --- a/src/datatypes/src/vectors.rs +++ b/src/datatypes/src/vectors.rs @@ -3,6 +3,7 @@ pub mod boolean; mod builder; pub mod constant; mod helper; +mod list; pub mod mutable; pub mod null; pub mod primitive; @@ -18,6 +19,7 @@ pub use boolean::*; pub use builder::VectorBuilder; pub use constant::*; pub use helper::Helper; +pub use list::*; pub use mutable::MutableVector; pub use null::*; pub use primitive::*; diff --git a/src/datatypes/src/vectors/builder.rs b/src/datatypes/src/vectors/builder.rs index ce8318e6b1a2..a5911652a4da 100644 --- a/src/datatypes/src/vectors/builder.rs +++ b/src/datatypes/src/vectors/builder.rs @@ -78,6 +78,7 @@ impl VectorBuilder { ConcreteDataType::Binary(_) => { VectorBuilder::Binary(BinaryVectorBuilder::with_capacity(capacity)) } + _ => unimplemented!(), } } diff --git a/src/datatypes/src/vectors/helper.rs b/src/datatypes/src/vectors/helper.rs index 7b3ac2abba42..dbd7607f9e37 100644 --- a/src/datatypes/src/vectors/helper.rs +++ b/src/datatypes/src/vectors/helper.rs @@ -172,7 +172,35 @@ impl Helper { ArrowDataType::Utf8 | ArrowDataType::LargeUtf8 => { Arc::new(StringVector::try_from_arrow_array(array)?) } + ArrowDataType::List(_) => Arc::new(ListVector::try_from_arrow_array(array)?), _ => unimplemented!("Arrow array datatype: {:?}", array.as_ref().data_type()), }) } + + pub fn try_into_vectors(arrays: &[ArrayRef]) -> Result<Vec<VectorRef>> { + arrays.iter().map(Self::try_into_vector).collect() + } +} + +#[cfg(test)] +mod tests { + use arrow::array::Int32Array; + + use super::*; + + #[test] + fn test_try_into_vectors() { + let arrays: Vec<ArrayRef> = vec![ + Arc::new(Int32Array::from_vec(vec![1])), + Arc::new(Int32Array::from_vec(vec![2])), + Arc::new(Int32Array::from_vec(vec![3])), + ]; + let vectors = Helper::try_into_vectors(&arrays); + assert!(vectors.is_ok()); + let vectors = vectors.unwrap(); + vectors.iter().for_each(|v| assert_eq!(1, v.len())); + assert_eq!(Value::Int32(1), vectors[0].get(0)); + assert_eq!(Value::Int32(2), vectors[1].get(0)); + assert_eq!(Value::Int32(3), vectors[2].get(0)); + } } diff --git a/src/datatypes/src/vectors/list.rs b/src/datatypes/src/vectors/list.rs new file mode 100644 index 000000000000..560dd93f8e72 --- /dev/null +++ b/src/datatypes/src/vectors/list.rs @@ -0,0 +1,286 @@ +use std::any::Any; +use std::sync::Arc; + +use arrow::array::{Array, ArrayRef, ListArray}; +use arrow::datatypes::DataType as ArrowDataType; +use serde_json::Value as JsonValue; +use snafu::prelude::*; + +use crate::error::Result; +use crate::prelude::*; +use crate::serialize::Serializable; +use crate::types::ListType; +use crate::value::ListValue; +use crate::vectors::{impl_try_from_arrow_array_for_vector, impl_validity_for_vector}; + +type ArrowListArray = ListArray<i32>; + +/// Vector of Lists, basically backed by Arrow's `ListArray`. +#[derive(Debug, Clone)] +pub struct ListVector { + array: ArrowListArray, + inner_data_type: ConcreteDataType, +} + +impl ListVector { + pub fn values_iter(&self) -> Box<dyn Iterator<Item = Result<VectorRef>> + '_> { + Box::new(self.array.values_iter().map(VectorHelper::try_into_vector)) + } +} + +impl Vector for ListVector { + fn data_type(&self) -> ConcreteDataType { + ConcreteDataType::List(ListType::new(self.inner_data_type.clone())) + } + + fn vector_type_name(&self) -> String { + "ListVector".to_string() + } + + fn as_any(&self) -> &dyn Any { + self + } + + fn len(&self) -> usize { + self.array.len() + } + + fn to_arrow_array(&self) -> ArrayRef { + Arc::new(self.array.clone()) + } + + fn validity(&self) -> Validity { + impl_validity_for_vector!(self.array) + } + + fn memory_size(&self) -> usize { + let offsets_bytes = self.array.offsets().len() * std::mem::size_of::<i64>(); + let value_refs_bytes = self.array.values().len() * std::mem::size_of::<Arc<dyn Array>>(); + offsets_bytes + value_refs_bytes + } + + fn is_null(&self, row: usize) -> bool { + self.array.is_null(row) + } + + fn slice(&self, offset: usize, length: usize) -> VectorRef { + Arc::new(ListVector::from(self.array.slice(offset, length))) + } + + fn get(&self, index: usize) -> Value { + let array = &self.array.value(index); + let vector = VectorHelper::try_into_vector(array).unwrap_or_else(|_| { + panic!( + "arrow array with datatype {:?} cannot converted to our vector", + array.data_type() + ) + }); + let values = (0..vector.len()) + .map(|i| vector.get(i)) + .collect::<Vec<Value>>(); + Value::List(ListValue::new( + Some(Box::new(values)), + self.inner_data_type.clone(), + )) + } + + fn replicate(&self, _: &[usize]) -> VectorRef { + // ListVector can be a scalar vector for implementing this `replicate` method. However, + // that requires a lot of efforts, starting from not using Arrow's ListArray. + // Refer to Databend's `ArrayColumn` for more details. + unimplemented!() + } +} + +impl Serializable for ListVector { + fn serialize_to_json(&self) -> Result<Vec<JsonValue>> { + self.array + .iter() + .map(|v| match v { + None => Ok(JsonValue::Null), + Some(v) => VectorHelper::try_into_vector(v) + .and_then(|v| v.serialize_to_json()) + .map(JsonValue::Array), + }) + .collect() + } +} + +impl From<ArrowListArray> for ListVector { + fn from(array: ArrowListArray) -> Self { + let inner_data_type = ConcreteDataType::from_arrow_type(match array.data_type() { + ArrowDataType::List(field) => &field.data_type, + _ => unreachable!(), + }); + Self { + array, + inner_data_type, + } + } +} + +impl_try_from_arrow_array_for_vector!(ArrowListArray, ListVector); + +#[cfg(test)] +mod tests { + use arrow::array::{MutableListArray, MutablePrimitiveArray, TryExtend}; + + use super::*; + use crate::types::ListType; + + #[test] + fn test_list_vector() { + let data = vec![ + Some(vec![Some(1i32), Some(2), Some(3)]), + None, + Some(vec![Some(4), None, Some(6)]), + ]; + + let mut arrow_array = MutableListArray::<i32, MutablePrimitiveArray<i32>>::new(); + arrow_array.try_extend(data).unwrap(); + let arrow_array: ArrowListArray = arrow_array.into(); + + let list_vector = ListVector { + array: arrow_array.clone(), + inner_data_type: ConcreteDataType::int32_datatype(), + }; + assert_eq!( + ConcreteDataType::List(ListType::new(ConcreteDataType::int32_datatype())), + list_vector.data_type() + ); + assert_eq!("ListVector", list_vector.vector_type_name()); + assert_eq!(3, list_vector.len()); + assert!(!list_vector.is_null(0)); + assert!(list_vector.is_null(1)); + assert!(!list_vector.is_null(2)); + + assert_eq!( + arrow_array, + list_vector + .to_arrow_array() + .as_any() + .downcast_ref::<ArrowListArray>() + .unwrap() + .clone() + ); + assert_eq!( + Validity::Slots(arrow_array.validity().unwrap()), + list_vector.validity() + ); + assert_eq!( + arrow_array.offsets().len() * std::mem::size_of::<i64>() + + arrow_array.values().len() * std::mem::size_of::<Arc<dyn Array>>(), + list_vector.memory_size() + ); + + let slice = list_vector.slice(0, 2); + assert_eq!( + "ListArray[[1, 2, 3], None]", + format!("{:?}", slice.to_arrow_array()) + ); + + assert_eq!( + Value::List(ListValue::new( + Some(Box::new(vec![ + Value::Int32(1), + Value::Int32(2), + Value::Int32(3) + ])), + ConcreteDataType::int32_datatype() + )), + list_vector.get(0) + ); + assert_eq!( + Value::List(ListValue::new( + Some(Box::new(vec![])), + ConcreteDataType::int32_datatype() + )), + list_vector.get(1) + ); + assert_eq!( + Value::List(ListValue::new( + Some(Box::new(vec![ + Value::Int32(4), + Value::Null, + Value::Int32(6) + ])), + ConcreteDataType::int32_datatype() + )), + list_vector.get(2) + ); + } + + #[test] + fn test_from_arrow_array() { + let data = vec![ + Some(vec![Some(1u32), Some(2), Some(3)]), + None, + Some(vec![Some(4), None, Some(6)]), + ]; + + let mut arrow_array = MutableListArray::<i32, MutablePrimitiveArray<u32>>::new(); + arrow_array.try_extend(data).unwrap(); + let arrow_array: ArrowListArray = arrow_array.into(); + let array_ref: ArrayRef = Arc::new(arrow_array); + + let list_vector = ListVector::try_from_arrow_array(array_ref).unwrap(); + assert_eq!( + "ListVector { array: ListArray[[1, 2, 3], None, [4, None, 6]], inner_data_type: UInt32(UInt32) }", + format!("{:?}", list_vector) + ); + } + + #[test] + fn test_iter_list_vector_values() { + let data = vec![ + Some(vec![Some(1i64), Some(2), Some(3)]), + None, + Some(vec![Some(4), None, Some(6)]), + ]; + + let mut arrow_array = MutableListArray::<i32, MutablePrimitiveArray<i64>>::new(); + arrow_array.try_extend(data).unwrap(); + let arrow_array: ArrowListArray = arrow_array.into(); + + let list_vector = ListVector { + array: arrow_array, + inner_data_type: ConcreteDataType::int32_datatype(), + }; + let mut iter = list_vector.values_iter(); + assert_eq!( + "Int64[1, 2, 3]", + format!("{:?}", iter.next().unwrap().unwrap().to_arrow_array()) + ); + assert_eq!( + "Int64[]", + format!("{:?}", iter.next().unwrap().unwrap().to_arrow_array()) + ); + assert_eq!( + "Int64[4, None, 6]", + format!("{:?}", iter.next().unwrap().unwrap().to_arrow_array()) + ); + assert!(iter.next().is_none()) + } + + #[test] + fn test_serialize_to_json() { + let data = vec![ + Some(vec![Some(1i64), Some(2), Some(3)]), + None, + Some(vec![Some(4), None, Some(6)]), + ]; + + let mut arrow_array = MutableListArray::<i32, MutablePrimitiveArray<i64>>::new(); + arrow_array.try_extend(data).unwrap(); + let arrow_array: ArrowListArray = arrow_array.into(); + + let list_vector = ListVector { + array: arrow_array, + inner_data_type: ConcreteDataType::int32_datatype(), + }; + assert_eq!( + "Ok([Array([Number(1), Number(2), Number(3)]), Null, Array([Number(4), Null, Number(6)])])", + format!("{:?}", list_vector.serialize_to_json()) + ); + } +} diff --git a/src/log-store/Cargo.toml b/src/log-store/Cargo.toml index 4a79d18f99e7..5579e82ec7e8 100644 --- a/src/log-store/Cargo.toml +++ b/src/log-store/Cargo.toml @@ -25,4 +25,4 @@ tempdir = "0.3" tokio = { version = "1.18", features = ["full"] } [dev-dependencies] -rand = "0.8.5" +rand = "0.8" diff --git a/src/query/Cargo.toml b/src/query/Cargo.toml index c9d2d5eb6a98..ada307bcee94 100644 --- a/src/query/Cargo.toml +++ b/src/query/Cargo.toml @@ -9,6 +9,7 @@ version="0.10" features = ["io_csv", "io_json", "io_parquet", "io_parquet_compression", "io_ipc", "ahash", "compute", "serde_types"] [dependencies] +arc-swap = "1.0" async-trait = "0.1" common-error = { path = "../common/error" } common-function = { path = "../common/function" } @@ -27,5 +28,8 @@ tokio = "1.0" sql = { path = "../sql" } [dev-dependencies] +num = "0.4" +num-traits = "0.2" +rand = "0.8" tokio = { version = "1.0", features = ["full"] } tokio-stream = "0.1" diff --git a/src/query/src/datafusion.rs b/src/query/src/datafusion.rs index 74fc69dbddd9..e976f1ad9b69 100644 --- a/src/query/src/datafusion.rs +++ b/src/query/src/datafusion.rs @@ -7,6 +7,7 @@ mod planner; use std::sync::Arc; +use common_function::scalars::aggregate::AggregateFunctionMetaRef; use common_function::scalars::udf::create_udf; use common_function::scalars::FunctionRef; use common_query::prelude::ScalarUdf; @@ -87,6 +88,17 @@ impl QueryEngine for DatafusionQueryEngine { self.state.register_udf(udf); } + /// Note in SQL queries, aggregate names are looked up using + /// lowercase unless the query uses quotes. For example, + /// + /// `SELECT MY_UDAF(x)...` will look for an aggregate named `"my_udaf"` + /// `SELECT "my_UDAF"(x)` will look for an aggregate named `"my_UDAF"` + /// + /// So it's better to make UDAF name lowercase when creating one. + fn register_aggregate_function(&self, func: AggregateFunctionMetaRef) { + self.state.register_aggregate_function(func); + } + fn register_function(&self, func: FunctionRef) { self.state.register_udf(create_udf(func)); } diff --git a/src/query/src/datafusion/planner.rs b/src/query/src/datafusion/planner.rs index 68cf31628ee9..498751ecc3b7 100644 --- a/src/query/src/datafusion/planner.rs +++ b/src/query/src/datafusion/planner.rs @@ -1,6 +1,7 @@ use std::sync::Arc; use arrow::datatypes::DataType; +use common_query::logical_plan::create_aggregate_function; use datafusion::catalog::TableReference; use datafusion::datasource::TableProvider; use datafusion::physical_plan::udaf::AggregateUDF; @@ -80,10 +81,8 @@ impl ContextProvider for DfContextProviderAdapter { fn get_aggregate_meta(&self, name: &str) -> Option<Arc<AggregateUDF>> { self.state - .df_context() - .state - .lock() - .get_aggregate_meta(name) + .aggregate_function(name) + .map(|func| Arc::new(create_aggregate_function(func.name(), func.create()).into())) } fn get_variable_type(&self, variable_names: &[String]) -> Option<DataType> { diff --git a/src/query/src/query_engine.rs b/src/query/src/query_engine.rs index c9595ccfc325..9bd49893c292 100644 --- a/src/query/src/query_engine.rs +++ b/src/query/src/query_engine.rs @@ -3,6 +3,7 @@ mod state; use std::sync::Arc; +use common_function::scalars::aggregate::AggregateFunctionMetaRef; use common_function::scalars::{FunctionRef, FUNCTION_REGISTRY}; use common_query::prelude::ScalarUdf; use common_recordbatch::SendableRecordBatchStream; @@ -35,6 +36,8 @@ pub trait QueryEngine: Send + Sync { fn register_udf(&self, udf: ScalarUdf); + fn register_aggregate_function(&self, func: AggregateFunctionMetaRef); + fn register_function(&self, func: FunctionRef); } @@ -50,6 +53,10 @@ impl QueryEngineFactory { query_engine.register_function(func); } + for accumulator in FUNCTION_REGISTRY.aggregate_functions() { + query_engine.register_aggregate_function(accumulator); + } + Self { query_engine } } } diff --git a/src/query/src/query_engine/state.rs b/src/query/src/query_engine/state.rs index b906326bf551..6a086dd8cc96 100644 --- a/src/query/src/query_engine/state.rs +++ b/src/query/src/query_engine/state.rs @@ -1,6 +1,8 @@ +use std::collections::HashMap; use std::fmt; -use std::sync::Arc; +use std::sync::{Arc, RwLock}; +use common_function::scalars::aggregate::AggregateFunctionMetaRef; use common_query::prelude::ScalarUdf; use datafusion::prelude::{ExecutionConfig, ExecutionContext}; @@ -16,6 +18,7 @@ use crate::executor::Runtime; pub struct QueryEngineState { df_context: ExecutionContext, catalog_list: CatalogListRef, + aggregate_functions: Arc<RwLock<HashMap<String, AggregateFunctionMetaRef>>>, } impl fmt::Debug for QueryEngineState { @@ -41,6 +44,7 @@ impl QueryEngineState { Self { df_context, catalog_list, + aggregate_functions: Arc::new(RwLock::new(HashMap::new())), } } @@ -54,6 +58,23 @@ impl QueryEngineState { .insert(udf.name.clone(), Arc::new(udf.into_df_udf())); } + pub fn aggregate_function(&self, function_name: &str) -> Option<AggregateFunctionMetaRef> { + self.aggregate_functions + .read() + .unwrap() + .get(function_name) + .cloned() + } + + pub fn register_aggregate_function(&self, func: AggregateFunctionMetaRef) { + // TODO(LFC): Return some error if there exists an aggregate function with the same name. + // Simply overwrite the old value for now. + self.aggregate_functions + .write() + .unwrap() + .insert(func.name(), func); + } + #[inline] pub fn catalog_list(&self) -> &CatalogListRef { &self.catalog_list diff --git a/src/query/tests/my_sum_udaf_example.rs b/src/query/tests/my_sum_udaf_example.rs new file mode 100644 index 000000000000..9f3d93d7893d --- /dev/null +++ b/src/query/tests/my_sum_udaf_example.rs @@ -0,0 +1,267 @@ +use std::fmt::Debug; +use std::marker::PhantomData; +use std::sync::Arc; + +mod testing_table; + +use arc_swap::ArcSwapOption; +use common_function::scalars::aggregate::AggregateFunctionMeta; +use common_query::error::CreateAccumulatorSnafu; +use common_query::error::Result as QueryResult; +use common_query::logical_plan::Accumulator; +use common_query::logical_plan::AggregateFunctionCreator; +use common_query::prelude::*; +use common_recordbatch::util; +use datafusion::arrow_print; +use datafusion_common::record_batch::RecordBatch as DfRecordBatch; +use datatypes::prelude::*; +use datatypes::types::DataTypeBuilder; +use datatypes::types::PrimitiveType; +use datatypes::vectors::PrimitiveVector; +use datatypes::with_match_primitive_type_id; +use num_traits::AsPrimitive; +use query::catalog::memory::{MemoryCatalogList, MemoryCatalogProvider, MemorySchemaProvider}; +use query::catalog::schema::SchemaProvider; +use query::catalog::{CatalogList, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME}; +use query::error::Result; +use query::query_engine::Output; +use query::QueryEngineFactory; +use table::TableRef; + +use crate::testing_table::TestingTable; + +#[derive(Debug, Default)] +struct MySumAccumulator<T, SumT> +where + T: Primitive + AsPrimitive<SumT>, + SumT: Primitive + std::ops::AddAssign, +{ + sum: SumT, + _phantom: PhantomData<T>, +} + +impl<T, SumT> MySumAccumulator<T, SumT> +where + T: Primitive + AsPrimitive<SumT>, + SumT: Primitive + std::ops::AddAssign, +{ + #[inline(always)] + fn add(&mut self, v: T) { + self.sum += v.as_(); + } + + #[inline(always)] + fn merge(&mut self, s: SumT) { + self.sum += s; + } +} + +#[derive(Debug, Default)] +struct MySumAccumulatorCreator { + input_type: ArcSwapOption<Vec<ConcreteDataType>>, +} + +impl AggregateFunctionCreator for MySumAccumulatorCreator { + fn creator(&self) -> AccumulatorCreatorFunction { + let creator: AccumulatorCreatorFunction = Arc::new(move |types: &[ConcreteDataType]| { + let input_type = &types[0]; + with_match_primitive_type_id!( + input_type.logical_type_id(), + |$S| { + Ok(Box::new(MySumAccumulator::<$S, <$S as Primitive>::LargestType>::default())) + }, + { + let err_msg = format!( + "\"MY_SUM\" aggregate function not support data type {:?}", + input_type.logical_type_id(), + ); + CreateAccumulatorSnafu { err_msg }.fail()? + } + ) + }); + creator + } + + fn input_types(&self) -> QueryResult<Vec<ConcreteDataType>> { + Ok(self.input_type + .load() + .as_ref() + .expect("input_type is not present, check if DataFusion has changed its UDAF execution logic") + .as_ref() + .clone()) + } + + fn set_input_types(&self, input_types: Vec<ConcreteDataType>) -> QueryResult<()> { + let old = self.input_type.swap(Some(Arc::new(input_types.clone()))); + if let Some(old) = old { + assert_eq!(old.len(), input_types.len()); + old.iter().zip(input_types.iter()).for_each(|(x, y)| + assert_eq!(x, y, "input type {:?} != {:?}, check if DataFusion has changed its UDAF execution logic", x, y) + ); + } + Ok(()) + } + + fn output_type(&self) -> QueryResult<ConcreteDataType> { + let input_type = &self.input_types()?[0]; + with_match_primitive_type_id!( + input_type.logical_type_id(), + |$S| { + Ok(PrimitiveType::<<$S as Primitive>::LargestType>::default().logical_type_id().data_type()) + }, + { + unreachable!() + } + ) + } + + fn state_types(&self) -> QueryResult<Vec<ConcreteDataType>> { + Ok(vec![self.output_type()?]) + } +} + +impl<T, SumT> Accumulator for MySumAccumulator<T, SumT> +where + T: Primitive + AsPrimitive<SumT>, + for<'a> T: Scalar<RefType<'a> = T>, + SumT: Primitive + std::ops::AddAssign, + for<'a> SumT: Scalar<RefType<'a> = SumT>, +{ + fn state(&self) -> QueryResult<Vec<Value>> { + Ok(vec![self.sum.into()]) + } + + fn update_batch(&mut self, values: &[VectorRef]) -> QueryResult<()> { + if values.is_empty() { + return Ok(()); + }; + let column = &values[0]; + let column: &<T as Scalar>::VectorType = unsafe { VectorHelper::static_cast(column) }; + for v in column.iter_data().flatten() { + self.add(v) + } + Ok(()) + } + + fn merge_batch(&mut self, states: &[VectorRef]) -> QueryResult<()> { + if states.is_empty() { + return Ok(()); + }; + let states = &states[0]; + let states: &<SumT as Scalar>::VectorType = unsafe { VectorHelper::static_cast(states) }; + for s in states.iter_data().flatten() { + self.merge(s) + } + Ok(()) + } + + fn evaluate(&self) -> QueryResult<Value> { + Ok(self.sum.into()) + } +} + +#[tokio::test] +async fn test_my_sum() -> Result<()> { + common_telemetry::init_default_ut_logging(); + + test_my_sum_with( + (1..=10).collect::<Vec<u32>>(), + vec![ + "+--------+", + "| my_sum |", + "+--------+", + "| 55 |", + "+--------+", + ], + ) + .await?; + test_my_sum_with( + (-10..=11).collect::<Vec<i32>>(), + vec![ + "+--------+", + "| my_sum |", + "+--------+", + "| 11 |", + "+--------+", + ], + ) + .await?; + test_my_sum_with( + vec![-1.0f32, 1.0, 2.0, 3.0, 4.0], + vec![ + "+--------+", + "| my_sum |", + "+--------+", + "| 9 |", + "+--------+", + ], + ) + .await?; + test_my_sum_with( + vec![u32::MAX, u32::MAX], + vec![ + "+------------+", + "| my_sum |", + "+------------+", + "| 8589934590 |", + "+------------+", + ], + ) + .await?; + Ok(()) +} + +async fn test_my_sum_with<T>(numbers: Vec<T>, expected: Vec<&str>) -> Result<()> +where + T: Primitive + DataTypeBuilder, +{ + let table_name = format!("{}_numbers", std::any::type_name::<T>()); + let column_name = format!("{}_number", std::any::type_name::<T>()); + + let testing_table = Arc::new(TestingTable::new( + &column_name, + Arc::new(PrimitiveVector::<T>::from_vec(numbers.clone())), + )); + + let factory = new_query_engine_factory(table_name.clone(), testing_table); + let engine = factory.query_engine(); + + engine.register_aggregate_function(Arc::new(AggregateFunctionMeta::new( + "my_sum", + Arc::new(|| Arc::new(MySumAccumulatorCreator::default())), + ))); + + let sql = format!( + "select MY_SUM({}) as my_sum from {}", + column_name, table_name + ); + let plan = engine.sql_to_plan(&sql)?; + + let output = engine.execute(&plan).await?; + let recordbatch_stream = match output { + Output::RecordBatch(batch) => batch, + _ => unreachable!(), + }; + let recordbatch = util::collect(recordbatch_stream).await.unwrap(); + let df_recordbatch = recordbatch + .into_iter() + .map(|r| r.df_recordbatch) + .collect::<Vec<DfRecordBatch>>(); + + let pretty_print = arrow_print::write(&df_recordbatch); + let pretty_print = pretty_print.lines().collect::<Vec<&str>>(); + assert_eq!(expected, pretty_print); + Ok(()) +} + +pub fn new_query_engine_factory(table_name: String, table: TableRef) -> QueryEngineFactory { + let schema_provider = Arc::new(MemorySchemaProvider::new()); + let catalog_provider = Arc::new(MemoryCatalogProvider::new()); + let catalog_list = Arc::new(MemoryCatalogList::default()); + + schema_provider.register_table(table_name, table).unwrap(); + catalog_provider.register_schema(DEFAULT_SCHEMA_NAME, schema_provider); + catalog_list.register_catalog(DEFAULT_CATALOG_NAME.to_string(), catalog_provider); + + QueryEngineFactory::new(catalog_list) +} diff --git a/src/query/tests/query_engine_test.rs b/src/query/tests/query_engine_test.rs index 3cc3aba47841..aa42e3e883a2 100644 --- a/src/query/tests/query_engine_test.rs +++ b/src/query/tests/query_engine_test.rs @@ -1,22 +1,33 @@ mod pow; +mod testing_table; use std::sync::Arc; use arrow::array::UInt32Array; use common_query::prelude::{create_udf, make_scalar_function, Volatility}; -use common_recordbatch::util; +use common_recordbatch::error::Result as RecordResult; +use common_recordbatch::{util, RecordBatch}; use datafusion::field_util::FieldExt; use datafusion::field_util::SchemaExt; use datafusion::logical_plan::LogicalPlanBuilder; -use datatypes::data_type::ConcreteDataType; -use query::catalog::memory; +use datatypes::for_all_ordered_primitive_types; +use datatypes::prelude::*; +use datatypes::types::DataTypeBuilder; +use datatypes::vectors::PrimitiveVector; +use num::NumCast; +use query::catalog::memory::{MemoryCatalogList, MemoryCatalogProvider, MemorySchemaProvider}; +use query::catalog::schema::SchemaProvider; +use query::catalog::{memory, CatalogList, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME}; use query::error::Result; use query::plan::LogicalPlan; use query::query_engine::{Output, QueryEngineFactory}; +use query::QueryEngine; +use rand::Rng; use table::table::adapter::DfTableProviderAdapter; use table::table::numbers::NumbersTable; use crate::pow::pow; +use crate::testing_table::TestingTable; #[tokio::test] async fn test_datafusion_query_engine() -> Result<()> { @@ -110,3 +121,170 @@ async fn test_udf() -> Result<()> { Ok(()) } + +fn create_query_engine() -> Arc<dyn QueryEngine> { + let schema_provider = Arc::new(MemorySchemaProvider::new()); + let catalog_provider = Arc::new(MemoryCatalogProvider::new()); + let catalog_list = Arc::new(MemoryCatalogList::default()); + + macro_rules! create_testing_table { + ([], $( { $T:ty } ),*) => { + $( + let mut rng = rand::thread_rng(); + + let table_name = format!("{}_number_even", std::any::type_name::<$T>()); + let column_name = table_name.clone(); + let numbers = (1..=100).map(|_| rng.gen_range(<$T>::MIN..<$T>::MAX)).collect::<Vec<$T>>(); + let table = Arc::new(TestingTable::new( + &column_name, + Arc::new(PrimitiveVector::<$T>::from_vec(numbers.to_vec())), + )); + schema_provider.register_table(table_name, table).unwrap(); + + let table_name = format!("{}_number_odd", std::any::type_name::<$T>()); + let column_name = table_name.clone(); + let numbers = (1..=99).map(|_| rng.gen_range(<$T>::MIN..<$T>::MAX)).collect::<Vec<$T>>(); + let table = Arc::new(TestingTable::new( + &column_name, + Arc::new(PrimitiveVector::<$T>::from_vec(numbers.to_vec())), + )); + schema_provider.register_table(table_name, table).unwrap(); + )* + } + } + for_all_ordered_primitive_types! { create_testing_table } + + let table = Arc::new(TestingTable::new( + "f32_number", + Arc::new(PrimitiveVector::<f32>::from_vec(vec![1.0f32, 2.0, 3.0])), + )); + schema_provider + .register_table("f32_number".to_string(), table) + .unwrap(); + + let table = Arc::new(TestingTable::new( + "f64_number", + Arc::new(PrimitiveVector::<f64>::from_vec(vec![1.0f64, 2.0, 3.0])), + )); + schema_provider + .register_table("f64_number".to_string(), table) + .unwrap(); + + catalog_provider.register_schema(DEFAULT_SCHEMA_NAME, schema_provider); + catalog_list.register_catalog(DEFAULT_CATALOG_NAME.to_string(), catalog_provider); + + let factory = QueryEngineFactory::new(catalog_list); + factory.query_engine().clone() +} + +async fn get_numbers_from_table<T>(table_name: &str, engine: Arc<dyn QueryEngine>) -> Vec<T> +where + T: Primitive + DataTypeBuilder, + for<'a> T: Scalar<RefType<'a> = T>, +{ + let column_name = table_name; + let sql = format!("SELECT {} FROM {}", column_name, table_name); + let plan = engine.sql_to_plan(&sql).unwrap(); + + let output = engine.execute(&plan).await.unwrap(); + let recordbatch_stream = match output { + Output::RecordBatch(batch) => batch, + _ => unreachable!(), + }; + let numbers = util::collect(recordbatch_stream).await.unwrap(); + + let columns = numbers[0].df_recordbatch.columns(); + let column = VectorHelper::try_into_vector(&columns[0]).unwrap(); + let column: &<T as Scalar>::VectorType = unsafe { VectorHelper::static_cast(&column) }; + column.iter_data().flatten().collect::<Vec<T>>() +} + +#[tokio::test] +async fn test_median_aggregator() -> Result<()> { + common_telemetry::init_default_ut_logging(); + + let engine = create_query_engine(); + + test_median_failed::<f32>("f32_number", engine.clone()).await?; + test_median_failed::<f64>("f64_number", engine.clone()).await?; + + macro_rules! test_median { + ([], $( { $T:ty } ),*) => { + $( + let table_name = format!("{}_number_even", std::any::type_name::<$T>()); + test_median_success::<$T>(&table_name, engine.clone()).await?; + + let table_name = format!("{}_number_odd", std::any::type_name::<$T>()); + test_median_success::<$T>(&table_name, engine.clone()).await?; + )* + } + } + for_all_ordered_primitive_types! { test_median } + Ok(()) +} + +async fn test_median_success<T>(table_name: &str, engine: Arc<dyn QueryEngine>) -> Result<()> +where + T: Primitive + Ord + DataTypeBuilder, + for<'a> T: Scalar<RefType<'a> = T>, +{ + let result = execute_median(table_name, engine.clone()).await.unwrap(); + assert_eq!(1, result.len()); + assert_eq!(result[0].df_recordbatch.num_columns(), 1); + assert_eq!(1, result[0].schema.arrow_schema().fields().len()); + assert_eq!("median", result[0].schema.arrow_schema().field(0).name()); + + let columns = result[0].df_recordbatch.columns(); + assert_eq!(1, columns.len()); + assert_eq!(columns[0].len(), 1); + let v = VectorHelper::try_into_vector(&columns[0]).unwrap(); + assert_eq!(1, v.len()); + let median = v.get(0); + + let mut numbers = get_numbers_from_table::<T>(table_name, engine.clone()).await; + numbers.sort(); + let len = numbers.len(); + let expected_median: Value = if len % 2 == 1 { + numbers[len / 2] + } else { + let a: f64 = NumCast::from(numbers[len / 2 - 1]).unwrap(); + let b: f64 = NumCast::from(numbers[len / 2]).unwrap(); + NumCast::from(a / 2.0 + b / 2.0).unwrap() + } + .into(); + assert_eq!(expected_median, median); + Ok(()) +} + +async fn test_median_failed<T>(table_name: &str, engine: Arc<dyn QueryEngine>) -> Result<()> +where + T: Primitive + DataTypeBuilder, +{ + let result = execute_median(table_name, engine).await; + assert!(result.is_err()); + let error = result.unwrap_err(); + assert!(error.to_string().contains(&format!( + "Failed to create accumulator: \"MEDIAN\" aggregate function not support data type {}", + T::type_name() + ))); + Ok(()) +} + +async fn execute_median( + table_name: &str, + engine: Arc<dyn QueryEngine>, +) -> RecordResult<Vec<RecordBatch>> { + let column_name = table_name; + let sql = format!( + "select MEDIAN({}) as median from {}", + column_name, table_name + ); + let plan = engine.sql_to_plan(&sql).unwrap(); + + let output = engine.execute(&plan).await.unwrap(); + let recordbatch_stream = match output { + Output::RecordBatch(batch) => batch, + _ => unreachable!(), + }; + util::collect(recordbatch_stream).await +} diff --git a/src/query/tests/testing_table.rs b/src/query/tests/testing_table.rs new file mode 100644 index 000000000000..6965af19086b --- /dev/null +++ b/src/query/tests/testing_table.rs @@ -0,0 +1,73 @@ +use std::any::Any; +use std::pin::Pin; +use std::sync::Arc; + +use common_query::prelude::Expr; +use common_recordbatch::error::Result as RecordBatchResult; +use common_recordbatch::{RecordBatch, RecordBatchStream, SendableRecordBatchStream}; +use datatypes::prelude::VectorRef; +use datatypes::schema::{ColumnSchema, Schema, SchemaRef}; +use futures::task::{Context, Poll}; +use futures::Stream; +use table::error::Result; +use table::Table; + +#[derive(Debug, Clone)] +pub struct TestingTable { + records: RecordBatch, +} + +impl TestingTable { + pub fn new(column_name: &str, values: VectorRef) -> Self { + let column_schemas = vec![ColumnSchema::new(column_name, values.data_type(), false)]; + let schema = Arc::new(Schema::new(column_schemas)); + Self { + records: RecordBatch::new(schema, vec![values]).unwrap(), + } + } +} + +#[async_trait::async_trait] +impl Table for TestingTable { + fn as_any(&self) -> &dyn Any { + self + } + + fn schema(&self) -> SchemaRef { + self.records.schema.clone() + } + + async fn scan( + &self, + _projection: &Option<Vec<usize>>, + _filters: &[Expr], + _limit: Option<usize>, + ) -> Result<SendableRecordBatchStream> { + Ok(Box::pin(TestingRecordsStream { + schema: self.records.schema.clone(), + records: Some(self.records.clone()), + })) + } +} + +impl RecordBatchStream for TestingRecordsStream { + fn schema(&self) -> SchemaRef { + self.schema.clone() + } +} + +struct TestingRecordsStream { + schema: SchemaRef, + records: Option<RecordBatch>, +} + +impl Stream for TestingRecordsStream { + type Item = RecordBatchResult<RecordBatch>; + + fn poll_next(mut self: Pin<&mut Self>, _ctx: &mut Context<'_>) -> Poll<Option<Self::Item>> { + match self.records.take() { + Some(records) => Poll::Ready(Some(Ok(records))), + None => Poll::Ready(None), + } + } +}
feat
UDAF made generically (#91)
7f075d2c0fec887c696fda60e9ac2b9cf7a6ce5c
2022-04-25 14:33:05
evenyag
refactor: Rename scalar mod to scalars
false
diff --git a/src/datatypes/src/lib.rs b/src/datatypes/src/lib.rs index 513520cf5ec3..7f3e4ebb8ec2 100644 --- a/src/datatypes/src/lib.rs +++ b/src/datatypes/src/lib.rs @@ -2,7 +2,7 @@ mod data_type; pub mod prelude; -mod scalar; +mod scalars; mod schema; pub mod type_id; mod types; diff --git a/src/datatypes/src/scalar.rs b/src/datatypes/src/scalars.rs similarity index 100% rename from src/datatypes/src/scalar.rs rename to src/datatypes/src/scalars.rs diff --git a/src/datatypes/src/vectors/binary.rs b/src/datatypes/src/vectors/binary.rs index 6b27c5a85529..bb6d5dc77550 100644 --- a/src/datatypes/src/vectors/binary.rs +++ b/src/datatypes/src/vectors/binary.rs @@ -6,7 +6,7 @@ use arrow2::array::BinaryValueIter; use arrow2::bitmap::utils::ZipValidity; use crate::data_type::DataTypeRef; -use crate::scalar::{ScalarVector, ScalarVectorBuilder}; +use crate::scalars::{ScalarVector, ScalarVectorBuilder}; use crate::types::binary_type::BinaryType; use crate::vectors::Vector; use crate::{LargeBinaryArray, MutableLargeBinaryArray}; diff --git a/src/datatypes/src/vectors/primitive.rs b/src/datatypes/src/vectors/primitive.rs index e7fa02416964..ac1e99447ddf 100644 --- a/src/datatypes/src/vectors/primitive.rs +++ b/src/datatypes/src/vectors/primitive.rs @@ -7,7 +7,7 @@ use arrow2::array::{MutablePrimitiveArray, PrimitiveArray}; use arrow2::bitmap::utils::ZipValidity; use crate::data_type::DataTypeRef; -use crate::scalar::{ScalarVector, ScalarVectorBuilder}; +use crate::scalars::{ScalarVector, ScalarVectorBuilder}; use crate::types::primitive_traits::Primitive; use crate::types::primitive_type::DataTypeBuilder; use crate::vectors::Vector;
refactor
Rename scalar mod to scalars
97be052b33d8eeaa43cd0edd7bcf3883c2c7ac32
2022-08-04 20:41:39
Ning Sun
feat: update tonic/prost and simplify build requirements (#130)
false
diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml index 49bf57001c24..4f9b1d2420d8 100644 --- a/.github/workflows/coverage.yml +++ b/.github/workflows/coverage.yml @@ -2,8 +2,8 @@ on: pull_request: push: branches: - - 'main' - - 'develop' + - "main" + - "develop" name: Code coverage @@ -15,6 +15,7 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 + - uses: arduino/setup-protoc@v1 - name: Install toolchain uses: actions-rs/toolchain@v1 with: diff --git a/.github/workflows/develop.yml b/.github/workflows/develop.yml index 0dd3e90a6303..c47d7b9cdcc5 100644 --- a/.github/workflows/develop.yml +++ b/.github/workflows/develop.yml @@ -11,6 +11,7 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 + - uses: arduino/setup-protoc@v1 - uses: actions-rs/toolchain@v1 with: profile: minimal @@ -26,6 +27,7 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 + - uses: arduino/setup-protoc@v1 - uses: actions-rs/toolchain@v1 with: profile: minimal @@ -46,6 +48,7 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 + - uses: arduino/setup-protoc@v1 - uses: actions-rs/toolchain@v1 with: profile: minimal @@ -62,6 +65,7 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 + - uses: arduino/setup-protoc@v1 - uses: actions-rs/toolchain@v1 with: profile: minimal diff --git a/Cargo.lock b/Cargo.lock index 10fabc50ebbb..98017cb3f365 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -71,8 +71,8 @@ checksum = "08f9b8508dccb7687a1d6c4ce66b2b0ecef467c94667de27d8d7fe1f8d2a9cdc" name = "api" version = "0.1.0" dependencies = [ - "prost", - "tonic", + "prost 0.11.0", + "tonic 0.8.0", "tonic-build", ] @@ -621,16 +621,7 @@ dependencies = [ "common-error", "snafu", "tokio", - "tonic", -] - -[[package]] -name = "cmake" -version = "0.1.48" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8ad8cef104ac57b68b89df3208164d228503abbdce70f6880ffa3d970e7443a" -dependencies = [ - "cc", + "tonic 0.8.0", ] [[package]] @@ -777,9 +768,9 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "24cb05777feccbb2642d4f2df44d0505601a2cd88ca517d8c913f263a5a8dc8b" dependencies = [ - "prost", - "prost-types", - "tonic", + "prost 0.10.4", + "prost-types 0.10.1", + "tonic 0.7.2", "tracing-core", ] @@ -795,13 +786,13 @@ dependencies = [ "futures", "hdrhistogram", "humantime", - "prost-types", + "prost-types 0.10.1", "serde", "serde_json", "thread_local", "tokio", "tokio-stream", - "tonic", + "tonic 0.7.2", "tracing", "tracing-core", "tracing-subscriber", @@ -1188,7 +1179,7 @@ dependencies = [ "tempdir", "tokio", "tokio-stream", - "tonic", + "tonic 0.8.0", "tower", "tower-http", ] @@ -2904,26 +2895,34 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "71adf41db68aa0daaefc69bb30bcd68ded9b9abaad5d1fbb6304c4fb390e083e" dependencies = [ "bytes", - "prost-derive", + "prost-derive 0.10.1", +] + +[[package]] +name = "prost" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "399c3c31cdec40583bb68f0b18403400d01ec4289c383aa047560439952c4dd7" +dependencies = [ + "bytes", + "prost-derive 0.11.0", ] [[package]] name = "prost-build" -version = "0.10.4" +version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ae5a4388762d5815a9fc0dea33c56b021cdc8dde0c55e0c9ca57197254b0cab" +checksum = "7f835c582e6bd972ba8347313300219fed5bfa52caf175298d860b61ff6069bb" dependencies = [ "bytes", - "cfg-if", - "cmake", "heck 0.4.0", "itertools", "lazy_static", "log", "multimap", "petgraph", - "prost", - "prost-types", + "prost 0.11.0", + "prost-types 0.11.1", "regex", "tempfile", "which", @@ -2942,6 +2941,19 @@ dependencies = [ "syn", ] +[[package]] +name = "prost-derive" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7345d5f0e08c0536d7ac7229952590239e77abf0a0100a1b1d890add6ea96364" +dependencies = [ + "anyhow", + "itertools", + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "prost-types" version = "0.10.1" @@ -2949,7 +2961,17 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2d0a014229361011dc8e69c8a1ec6c2e8d0f2af7c91e3ea3f5b2170298461e68" dependencies = [ "bytes", - "prost", + "prost 0.10.4", +] + +[[package]] +name = "prost-types" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4dfaa718ad76a44b3415e6c4d53b17c8f99160dcb3a99b10470fce8ad43f6e3e" +dependencies = [ + "bytes", + "prost 0.11.0", ] [[package]] @@ -3587,7 +3609,7 @@ dependencies = [ "log-store", "object-store", "planus", - "prost", + "prost 0.11.0", "rand 0.8.5", "regex", "serde", @@ -3596,7 +3618,7 @@ dependencies = [ "store-api", "tempdir", "tokio", - "tonic", + "tonic 0.8.0", "tonic-build", "uuid", ] @@ -3897,10 +3919,11 @@ checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" [[package]] name = "tokio" -version = "1.18.2" +version = "1.20.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4903bf0427cf68dddd5aa6a93220756f8be0c34fcfa9f5e6191e103e15a31395" +checksum = "7a8325f63a7d4774dd041e363b2409ed1c5cbbd0f867795e661df066b2b0a581" dependencies = [ + "autocfg", "bytes", "libc", "memchr", @@ -4019,8 +4042,40 @@ dependencies = [ "hyper-timeout", "percent-encoding", "pin-project", - "prost", - "prost-derive", + "prost 0.10.4", + "prost-derive 0.10.1", + "tokio", + "tokio-stream", + "tokio-util 0.7.1", + "tower", + "tower-layer", + "tower-service", + "tracing", + "tracing-futures", +] + +[[package]] +name = "tonic" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "498f271adc46acce75d66f639e4d35b31b2394c295c82496727dafa16d465dd2" +dependencies = [ + "async-stream", + "async-trait", + "axum", + "base64", + "bytes", + "futures-core", + "futures-util", + "h2", + "http", + "http-body", + "hyper", + "hyper-timeout", + "percent-encoding", + "pin-project", + "prost 0.11.0", + "prost-derive 0.11.0", "tokio", "tokio-stream", "tokio-util 0.7.1", @@ -4033,9 +4088,9 @@ dependencies = [ [[package]] name = "tonic-build" -version = "0.7.2" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9263bf4c9bfaae7317c1c2faf7f18491d2fe476f70c414b73bf5d445b00ffa1" +checksum = "2fbcd2800e34e743b9ae795867d5f77b535d3a3be69fd731e39145719752df8c" dependencies = [ "prettyplease", "proc-macro2", diff --git a/README.md b/README.md index 9e92365081cc..d44fdb4a2e3c 100644 --- a/README.md +++ b/README.md @@ -9,23 +9,16 @@ GreptimeDB: the next-generation hybrid timeseries/analytics processing database ### Prerequisites To compile GreptimeDB from source, you'll need the following: - Rust -- C++ toolchain -- cmake +- Protobuf - OpenSSL #### Rust The easiest way to install Rust is to use [`rustup`](https://rustup.rs/), which will check our `rust-toolchain` file and install correct Rust version for you. -#### C++ toolchain -The [`prost-build`](https://github.com/tokio-rs/prost/tree/master/prost-build) dependency requires `C++ toolchain` and `cmake` to build its bundled `protoc`. For more info on what the required dependencies are check [`here`](https://github.com/protocolbuffers/protobuf/blob/master/src/README.md). - -#### cmake -Follow the instructions for your operating system on the [`cmake`](https://cmake.org/install/) site. - -For macOS users, you can also use `homebrew` to install `cmake`. -```bash -brew install cmake -``` +#### Protobuf +`protoc` is required for compiling `.proto` files. `protobuf` is available from +major package manager on macos and linux distributions. You can find an +installation instructions [here](https://grpc.io/docs/protoc-installation/). #### OpenSSL @@ -60,4 +53,3 @@ OR // Start datanode with `log-dir` and `log-level` options. cargo run -- --log-dir=logs --log-level=debug datanode start ``` - diff --git a/src/api/Cargo.toml b/src/api/Cargo.toml index 7def8aec3bac..5931652f803e 100644 --- a/src/api/Cargo.toml +++ b/src/api/Cargo.toml @@ -6,8 +6,8 @@ edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -prost = "0.10" -tonic = "0.7" +prost = "0.11" +tonic = "0.8" [build-dependencies] -tonic-build = "0.7" +tonic-build = "0.8" diff --git a/src/client/Cargo.toml b/src/client/Cargo.toml index 8e789ec7448a..a7b9cbfcf8ab 100644 --- a/src/client/Cargo.toml +++ b/src/client/Cargo.toml @@ -9,7 +9,7 @@ edition = "2021" api = { path = "../api" } common-error = { path = "../common/error" } snafu = { version = "0.7", features = ["backtraces"] } -tonic = "0.7" +tonic = "0.8" [dev-dependencies] tokio = { version = "1.0", features = ["full"] } diff --git a/src/datanode/Cargo.toml b/src/datanode/Cargo.toml index 5d8915d28367..1c2afbf6f64c 100644 --- a/src/datanode/Cargo.toml +++ b/src/datanode/Cargo.toml @@ -28,7 +28,7 @@ store-api = { path = "../store-api" } table = { path = "../table" } table-engine = { path = "../table-engine" } tokio = { version = "1.18", features = ["full"] } -tonic = "0.7" +tonic = "0.8" tokio-stream = { version = "0.1.8", features = ["net"] } tower = { version = "0.4", features = ["full"]} tower-http = { version ="0.3", features = ["full"]} diff --git a/src/storage/Cargo.toml b/src/storage/Cargo.toml index 0ceaff323258..6214a1c10db7 100644 --- a/src/storage/Cargo.toml +++ b/src/storage/Cargo.toml @@ -23,14 +23,14 @@ lazy_static = "1.4" log-store = { path = "../log-store" } object-store = { path = "../object-store" } planus = "0.2" -prost = "0.10" +prost = "0.11" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" snafu = { version = "0.7", features = ["backtraces"] } store-api = { path = "../store-api" } regex = "1.5" tokio = { version = "1.18", features = ["full"] } -tonic = "0.7" +tonic = "0.8" uuid = { version = "1.1" , features=["v4"]} [dev-dependencies] @@ -40,7 +40,7 @@ rand = "0.8" tempdir = "0.3" [build-dependencies] -tonic-build = "0.7" +tonic-build = "0.8" [[bench]] name = "bench_main"
feat
update tonic/prost and simplify build requirements (#130)
e021da2eeec8bf2fa14a22805aee5d798895e60d
2023-04-11 14:34:35
Eugene Tolbakov
feat(promql): add holt_winters initial implementation (#1342)
false
diff --git a/src/promql/src/functions.rs b/src/promql/src/functions.rs index 995a137148bd..a11cff608162 100644 --- a/src/promql/src/functions.rs +++ b/src/promql/src/functions.rs @@ -16,6 +16,7 @@ mod aggr_over_time; mod changes; mod deriv; mod extrapolate_rate; +mod holt_winters; mod idelta; mod quantile; mod resets; @@ -32,6 +33,7 @@ use datafusion::error::DataFusionError; use datafusion::physical_plan::ColumnarValue; pub use deriv::Deriv; pub use extrapolate_rate::{Delta, Increase, Rate}; +pub use holt_winters::HoltWinters; pub use idelta::IDelta; pub use quantile::QuantileOverTime; pub use resets::Resets; diff --git a/src/promql/src/functions/holt_winters.rs b/src/promql/src/functions/holt_winters.rs new file mode 100644 index 000000000000..f02276b4b019 --- /dev/null +++ b/src/promql/src/functions/holt_winters.rs @@ -0,0 +1,381 @@ +// Copyright 2023 Greptime Team +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Implementation of [`holt_winters`](https://prometheus.io/docs/prometheus/latest/querying/functions/#holt_winters) in PromQL. Refer to the [original +//! implementation](https://github.com/prometheus/prometheus/blob/8dba9163f1e923ec213f0f4d5c185d9648e387f0/promql/functions.go#L299). + +use std::sync::Arc; + +use datafusion::arrow::array::Float64Array; +use datafusion::arrow::datatypes::TimeUnit; +use datafusion::common::DataFusionError; +use datafusion::logical_expr::{ScalarUDF, Signature, TypeSignature, Volatility}; +use datafusion::physical_plan::ColumnarValue; +use datatypes::arrow::array::Array; +use datatypes::arrow::datatypes::DataType; + +use crate::error; +use crate::functions::extract_array; +use crate::range_array::RangeArray; + +/// There are 3 variants of smoothing functions: +/// 1) "Simple exponential smoothing": only the `level` component (the weighted average of the observations) is used to make forecasts. +/// This method is applied for time-series data that does not exhibit trend or seasonality. +/// 2) "Holt's linear method" (a.k.a. "double exponential smoothing"): `level` and `trend` components are used to make forecasts. +/// This method is applied for time-series data that exhibits trend but not seasonality. +/// 3) "Holt-Winter's method" (a.k.a. "triple exponential smoothing"): `level`, `trend`, and `seasonality` are used to make forecasts. +/// This method is applied for time-series data that exhibits both trend and seasonality. +/// +/// In order to keep the parity with the Prometheus functions we had to follow the same naming ("HoltWinters"), however +/// the "Holt's linear"("double exponential smoothing") suits better and reflects implementation. +/// There's the [discussion](https://github.com/prometheus/prometheus/issues/2458) in the Prometheus Github that dates back +/// to 2017 highlighting the naming/implementation mismatch. +pub struct HoltWinters { + sf: f64, + tf: f64, +} + +impl HoltWinters { + fn new(sf: f64, tf: f64) -> Self { + Self { sf, tf } + } + + pub const fn name() -> &'static str { + "prom_holt_winters" + } + + // time index column and value column + fn input_type() -> Vec<DataType> { + vec![ + RangeArray::convert_data_type(DataType::Timestamp(TimeUnit::Millisecond, None)), + RangeArray::convert_data_type(DataType::Float64), + ] + } + + fn return_type() -> DataType { + DataType::Float64 + } + + pub fn scalar_udf(level: f64, trend: f64) -> ScalarUDF { + ScalarUDF { + name: Self::name().to_string(), + signature: Signature::new( + TypeSignature::Exact(Self::input_type()), + Volatility::Immutable, + ), + return_type: Arc::new(|_| Ok(Arc::new(Self::return_type()))), + fun: Arc::new(move |input| Self::new(level, trend).calc(input)), + } + } + + fn calc(&self, input: &[ColumnarValue]) -> Result<ColumnarValue, DataFusionError> { + // construct matrix from input. + // The third one is level param, the fourth - trend param which are included in fields. + assert_eq!(input.len(), 2); + + let ts_array = extract_array(&input[0])?; + let value_array = extract_array(&input[1])?; + + let ts_range: RangeArray = RangeArray::try_new(ts_array.data().clone().into())?; + let value_range: RangeArray = RangeArray::try_new(value_array.data().clone().into())?; + + error::ensure( + ts_range.len() == value_range.len(), + DataFusionError::Execution(format!( + "{}: input arrays should have the same length, found {} and {}", + Self::name(), + ts_range.len(), + value_range.len() + )), + )?; + error::ensure( + ts_range.value_type() == DataType::Timestamp(TimeUnit::Millisecond, None), + DataFusionError::Execution(format!( + "{}: expect TimestampMillisecond as time index array's type, found {}", + Self::name(), + ts_range.value_type() + )), + )?; + error::ensure( + value_range.value_type() == DataType::Float64, + DataFusionError::Execution(format!( + "{}: expect Float64 as value array's type, found {}", + Self::name(), + value_range.value_type() + )), + )?; + + // calculation + let mut result_array = Vec::with_capacity(ts_range.len()); + for index in 0..ts_range.len() { + let timestamps = ts_range.get(index).unwrap(); + let values = value_range.get(index).unwrap(); + let values = values + .as_any() + .downcast_ref::<Float64Array>() + .unwrap() + .values(); + error::ensure( + timestamps.len() == values.len(), + DataFusionError::Execution(format!( + "{}: input arrays should have the same length, found {} and {}", + Self::name(), + timestamps.len(), + values.len() + )), + )?; + result_array.push(holt_winter_impl(values, self.sf, self.tf)); + } + + let result = ColumnarValue::Array(Arc::new(Float64Array::from_iter(result_array))); + Ok(result) + } +} + +fn calc_trend_value(i: usize, tf: f64, s0: f64, s1: f64, b: f64) -> f64 { + if i == 0 { + return b; + } + let x = tf * (s1 - s0); + let y = (1.0 - tf) * b; + x + y +} + +/// Refer to https://github.com/prometheus/prometheus/blob/main/promql/functions.go#L299 +fn holt_winter_impl(values: &[f64], sf: f64, tf: f64) -> Option<f64> { + if sf.is_nan() || tf.is_nan() || values.is_empty() { + return Some(f64::NAN); + } + if sf < 0.0 || tf < 0.0 { + return Some(f64::NEG_INFINITY); + } + if sf > 1.0 || tf > 1.0 { + return Some(f64::INFINITY); + } + + let l = values.len(); + if l <= 2 { + // Can't do the smoothing operation with less than two points. + return Some(f64::NAN); + } + + let values = values.to_vec(); + + let mut s0 = 0.0; + let mut s1 = values[0]; + let mut b = values[1] - values[0]; + + for (i, value) in values.iter().enumerate().skip(1) { + // Scale the raw value against the smoothing factor. + let x = sf * value; + // Scale the last smoothed value with the trend at this point. + b = calc_trend_value(i - 1, tf, s0, s1, b); + let y = (1.0 - sf) * (s1 + b); + s0 = s1; + s1 = x + y; + } + Some(s1) +} + +#[cfg(test)] +mod tests { + use datafusion::arrow::array::{Float64Array, TimestampMillisecondArray}; + + use super::*; + use crate::functions::test_util::simple_range_udf_runner; + + #[test] + fn test_holt_winter_impl_empty() { + let sf = 0.5; + let tf = 0.5; + let values = &[]; + assert!(holt_winter_impl(values, sf, tf).unwrap().is_nan()); + + let values = &[1.0, 2.0]; + assert!(holt_winter_impl(values, sf, tf).unwrap().is_nan()); + } + + #[test] + fn test_holt_winter_impl_nan() { + let values = &[1.0, 2.0, 3.0]; + let sf = f64::NAN; + let tf = 0.5; + assert!(holt_winter_impl(values, sf, tf).unwrap().is_nan()); + + let values = &[1.0, 2.0, 3.0]; + let sf = 0.5; + let tf = f64::NAN; + assert!(holt_winter_impl(values, sf, tf).unwrap().is_nan()); + } + + #[test] + fn test_holt_winter_impl_validation_rules() { + let values = &[1.0, 2.0, 3.0]; + let sf = -0.5; + let tf = 0.5; + assert_eq!(holt_winter_impl(values, sf, tf).unwrap(), f64::NEG_INFINITY); + + let values = &[1.0, 2.0, 3.0]; + let sf = 0.5; + let tf = -0.5; + assert_eq!(holt_winter_impl(values, sf, tf).unwrap(), f64::NEG_INFINITY); + + let values = &[1.0, 2.0, 3.0]; + let sf = 1.5; + let tf = 0.5; + assert_eq!(holt_winter_impl(values, sf, tf).unwrap(), f64::INFINITY); + + let values = &[1.0, 2.0, 3.0]; + let sf = 0.5; + let tf = 1.5; + assert_eq!(holt_winter_impl(values, sf, tf).unwrap(), f64::INFINITY); + } + + #[test] + fn test_holt_winter_impl() { + let sf = 0.5; + let tf = 0.1; + let values = &[1.0, 2.0, 3.0, 4.0, 5.0]; + assert_eq!(holt_winter_impl(values, sf, tf), Some(5.0)); + let values = &[50.0, 52.0, 95.0, 59.0, 52.0, 45.0, 38.0, 10.0, 47.0, 40.0]; + assert_eq!(holt_winter_impl(values, sf, tf), Some(38.18119566835938)); + } + + #[test] + fn test_prom_holt_winter_monotonic() { + let ranges = [(0, 5)]; + let ts_array = Arc::new(TimestampMillisecondArray::from_iter( + [1000i64, 3000, 5000, 7000, 9000, 11000, 13000, 15000, 17000] + .into_iter() + .map(Some), + )); + let values_array = Arc::new(Float64Array::from_iter([1.0, 2.0, 3.0, 4.0, 5.0])); + let ts_range_array = RangeArray::from_ranges(ts_array, ranges).unwrap(); + let value_range_array = RangeArray::from_ranges(values_array, ranges).unwrap(); + simple_range_udf_runner( + HoltWinters::scalar_udf(0.5, 0.1), + ts_range_array, + value_range_array, + vec![Some(5.0)], + ); + } + + #[test] + fn test_prom_holt_winter_non_monotonic() { + let ranges = [(0, 10)]; + let ts_array = Arc::new(TimestampMillisecondArray::from_iter( + [ + 1000i64, 3000, 5000, 7000, 9000, 11000, 13000, 15000, 17000, 19000, + ] + .into_iter() + .map(Some), + )); + let values_array = Arc::new(Float64Array::from_iter([ + 50.0, 52.0, 95.0, 59.0, 52.0, 45.0, 38.0, 10.0, 47.0, 40.0, + ])); + let ts_range_array = RangeArray::from_ranges(ts_array, ranges).unwrap(); + let value_range_array = RangeArray::from_ranges(values_array, ranges).unwrap(); + simple_range_udf_runner( + HoltWinters::scalar_udf(0.5, 0.1), + ts_range_array, + value_range_array, + vec![Some(38.18119566835938)], + ); + } + + #[test] + fn test_promql_trends() { + let ranges = vec![(0, 801)]; + + let trends = vec![ + // positive trends https://github.com/prometheus/prometheus/blob/8dba9163f1e923ec213f0f4d5c185d9648e387f0/promql/testdata/functions.test#L475 + ("0+10x1000 100+30x1000", 8000.0), + ("0+20x1000 200+30x1000", 16000.0), + ("0+30x1000 300+80x1000", 24000.0), + ("0+40x2000", 32000.0), + // negative trends https://github.com/prometheus/prometheus/blob/8dba9163f1e923ec213f0f4d5c185d9648e387f0/promql/testdata/functions.test#L488 + ("8000-10x1000", 0.0), + ("0-20x1000", -16000.0), + ("0+30x1000 300-80x1000", 24000.0), + ("0-40x1000 0+40x1000", -32000.0), + ]; + + for (query, expected) in trends { + let (ts_range_array, value_range_array) = + create_ts_and_value_range_arrays(query, ranges.clone()); + simple_range_udf_runner( + HoltWinters::scalar_udf(0.01, 0.1), + ts_range_array, + value_range_array, + vec![Some(expected)], + ); + } + } + + fn create_ts_and_value_range_arrays( + input: &str, + ranges: Vec<(u32, u32)>, + ) -> (RangeArray, RangeArray) { + let promql_range = create_test_range_from_promql_series(input); + let ts_array = Arc::new(TimestampMillisecondArray::from_iter( + (0..(promql_range.len() as i64)).map(Some), + )); + let values_array = Arc::new(Float64Array::from_iter(promql_range)); + let ts_range_array = RangeArray::from_ranges(ts_array, ranges.clone()).unwrap(); + let value_range_array = RangeArray::from_ranges(values_array, ranges).unwrap(); + (ts_range_array, value_range_array) + } + + /// Converts a prometheus functions test series into a vector of f64 element with respect to resets and trend direction + /// The input example: "0+10x1000 100+30x1000" + fn create_test_range_from_promql_series(input: &str) -> Vec<f64> { + input.split(' ').map(parse_promql_series_entry).fold( + Vec::new(), + |mut acc, (start, end, step, operation)| { + if operation.eq("+") { + let iter = (start..=((step * end) + start)) + .step_by(step as usize) + .map(|x| x as f64); + acc.extend(iter); + } else { + let iter = (((-step * end) + start)..=start) + .rev() + .step_by(step as usize) + .map(|x| x as f64); + acc.extend(iter); + }; + acc + }, + ) + } + + /// Converts a prometheus functions test series entry into separate parts to create a range with a step + /// The input example: "100+30x1000" + fn parse_promql_series_entry(input: &str) -> (i32, i32, i32, &str) { + let mut parts = input.split('x'); + let start_operation_step = parts.next().unwrap(); + let operation = start_operation_step + .split(char::is_numeric) + .find(|&x| !x.is_empty()) + .unwrap(); + let start_step = start_operation_step + .split(operation) + .map(|s| s.parse::<i32>().unwrap()) + .collect::<Vec<_>>(); + let start = *start_step.first().unwrap(); + let step = *start_step.last().unwrap(); + let end = parts.next().unwrap().parse::<i32>().unwrap(); + (start, end, step, operation) + } +} diff --git a/src/promql/src/planner.rs b/src/promql/src/planner.rs index 5620a8220d1f..b75fdad16976 100644 --- a/src/promql/src/planner.rs +++ b/src/promql/src/planner.rs @@ -51,9 +51,9 @@ use crate::extension_plan::{ EmptyMetric, InstantManipulate, Millisecond, RangeManipulate, SeriesDivide, SeriesNormalize, }; use crate::functions::{ - AbsentOverTime, AvgOverTime, Changes, CountOverTime, Delta, IDelta, Increase, LastOverTime, - MaxOverTime, MinOverTime, PresentOverTime, QuantileOverTime, Rate, Resets, StddevOverTime, - StdvarOverTime, SumOverTime, + AbsentOverTime, AvgOverTime, Changes, CountOverTime, Delta, HoltWinters, IDelta, Increase, + LastOverTime, MaxOverTime, MinOverTime, PresentOverTime, QuantileOverTime, Rate, Resets, + StddevOverTime, StdvarOverTime, SumOverTime, }; const LEFT_PLAN_JOIN_ALIAS: &str = "lhs"; @@ -796,6 +796,26 @@ impl PromPlanner { }; ScalarFunc::Udf(QuantileOverTime::scalar_udf(quantile_expr)) } + "holt_winters" => { + let sf_exp = match other_input_exprs.get(0) { + Some(DfExpr::Literal(ScalarValue::Float64(Some(sf)))) => *sf, + other => UnexpectedPlanExprSnafu { + desc: format!( + "expect f64 literal as smoothing factor, but found {:?}", + other + ), + } + .fail()?, + }; + let tf_exp = match other_input_exprs.get(1) { + Some(DfExpr::Literal(ScalarValue::Float64(Some(tf)))) => *tf, + other => UnexpectedPlanExprSnafu { + desc: format!("expect f64 literal as trend factor, but found {:?}", other), + } + .fail()?, + }; + ScalarFunc::Udf(HoltWinters::scalar_udf(sf_exp, tf_exp)) + } _ => ScalarFunc::DataFusionBuiltin( BuiltinScalarFunction::from_str(func.name).map_err(|_| { UnsupportedExprSnafu {
feat
add holt_winters initial implementation (#1342)
c2e1b0857cc9fcb8393d2d98d3b459797f9a6411
2022-11-07 13:39:01
fys
refactor: optimize channel_manager (#401)
false
diff --git a/Cargo.lock b/Cargo.lock index 6039d477e66d..22e51f0b7356 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -93,6 +93,12 @@ dependencies = [ "libc", ] +[[package]] +name = "anes" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" + [[package]] name = "ansi_term" version = "0.12.1" @@ -797,6 +803,33 @@ version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fff857943da45f546682664a79488be82e69e43c1a7a2307679ab9afb3a66d2e" +[[package]] +name = "ciborium" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b0c137568cc60b904a7724001b35ce2630fd00d5d84805fbb608ab89509d788f" +dependencies = [ + "ciborium-io", + "ciborium-ll", + "serde", +] + +[[package]] +name = "ciborium-io" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "346de753af073cc87b52b2083a506b38ac176a44cfb05497b622e27be899b369" + +[[package]] +name = "ciborium-ll" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "213030a2b5a4e0c0892b6652260cf6ccac84827b83a85a534e178e3906c4cf1b" +dependencies = [ + "ciborium-io", + "half", +] + [[package]] name = "clang-sys" version = "1.4.0" @@ -1060,7 +1093,10 @@ dependencies = [ "common-base", "common-error", "common-runtime", + "criterion 0.4.0", + "dashmap", "datafusion", + "rand 0.8.5", "snafu", "tokio", "tonic", @@ -1271,7 +1307,7 @@ dependencies = [ "atty", "cast", "clap 2.34.0", - "criterion-plot", + "criterion-plot 0.4.5", "csv", "itertools", "lazy_static", @@ -1288,6 +1324,32 @@ dependencies = [ "walkdir", ] +[[package]] +name = "criterion" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e7c76e09c1aae2bc52b3d2f29e13c6572553b30c4aa1b8a49fd70de6412654cb" +dependencies = [ + "anes", + "atty", + "cast", + "ciborium", + "clap 3.2.22", + "criterion-plot 0.5.0", + "itertools", + "lazy_static", + "num-traits", + "oorandom", + "plotters", + "rayon", + "regex", + "serde", + "serde_derive", + "serde_json", + "tinytemplate", + "walkdir", +] + [[package]] name = "criterion-plot" version = "0.4.5" @@ -1298,6 +1360,16 @@ dependencies = [ "itertools", ] +[[package]] +name = "criterion-plot" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6b50826342786a51a89e2da3a28f1c32b06e387201bc2d19791f622c673706b1" +dependencies = [ + "cast", + "itertools", +] + [[package]] name = "crossbeam" version = "0.8.2" @@ -1440,6 +1512,19 @@ dependencies = [ "syn", ] +[[package]] +name = "dashmap" +version = "5.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "907076dfda823b0b36d2a1bb5f90c96660a5bbcd7729e10727f07858f22c4edc" +dependencies = [ + "cfg-if", + "hashbrown", + "lock_api", + "once_cell", + "parking_lot_core", +] + [[package]] name = "datafusion" version = "7.0.0" @@ -5215,7 +5300,7 @@ dependencies = [ "common-runtime", "common-telemetry", "common-time", - "criterion", + "criterion 0.3.6", "datatypes", "futures", "futures-util", diff --git a/src/common/grpc/Cargo.toml b/src/common/grpc/Cargo.toml index 1bbd38e86c13..e72abc1996f5 100644 --- a/src/common/grpc/Cargo.toml +++ b/src/common/grpc/Cargo.toml @@ -9,6 +9,7 @@ async-trait = "0.1" common-base = { path = "../base" } common-error = { path = "../error" } common-runtime = { path = "../runtime" } +dashmap = "5.4" datafusion = { git = "https://github.com/apache/arrow-datafusion.git", branch = "arrow2", features = ["simd"] } snafu = { version = "0.7", features = ["backtraces"] } tokio = { version = "1.0", features = ["full"] } @@ -19,3 +20,11 @@ tower = "0.4" package = "arrow2" version = "0.10" features = ["io_csv", "io_json", "io_parquet", "io_parquet_compression", "io_ipc", "ahash", "compute", "serde_types"] + +[dev-dependencies] +criterion = "0.4" +rand = "0.8" + +[[bench]] +name = "bench_main" +harness = false diff --git a/src/common/grpc/benches/bench_main.rs b/src/common/grpc/benches/bench_main.rs new file mode 100644 index 000000000000..b1e7a22bf242 --- /dev/null +++ b/src/common/grpc/benches/bench_main.rs @@ -0,0 +1,7 @@ +use criterion::criterion_main; + +mod channel_manager; + +criterion_main! { + channel_manager::benches +} diff --git a/src/common/grpc/benches/channel_manager.rs b/src/common/grpc/benches/channel_manager.rs new file mode 100644 index 000000000000..c9aa17bd8661 --- /dev/null +++ b/src/common/grpc/benches/channel_manager.rs @@ -0,0 +1,34 @@ +use common_grpc::channel_manager::ChannelManager; +use criterion::{criterion_group, criterion_main, Criterion}; + +#[tokio::main] +async fn do_bench_channel_manager() { + let m = ChannelManager::new(); + let task_count = 8; + let mut joins = Vec::with_capacity(task_count); + + for _ in 0..task_count { + let m_clone = m.clone(); + let join = tokio::spawn(async move { + for _ in 0..10000 { + let idx = rand::random::<usize>() % 100; + let ret = m_clone.get(format!("{}", idx)); + assert!(ret.is_ok()); + } + }); + joins.push(join); + } + + for join in joins { + let _ = join.await; + } +} + +fn bench_channel_manager(c: &mut Criterion) { + c.bench_function("bench channel manager", |b| { + b.iter(do_bench_channel_manager); + }); +} + +criterion_group!(benches, bench_channel_manager); +criterion_main!(benches); diff --git a/src/common/grpc/src/channel_manager.rs b/src/common/grpc/src/channel_manager.rs index 0e31ebcdfad3..a209bb7b109d 100644 --- a/src/common/grpc/src/channel_manager.rs +++ b/src/common/grpc/src/channel_manager.rs @@ -1,8 +1,10 @@ -use std::collections::HashMap; +use std::sync::atomic::AtomicUsize; +use std::sync::atomic::Ordering; use std::sync::Arc; -use std::sync::Mutex; use std::time::Duration; +use dashmap::mapref::entry::Entry; +use dashmap::DashMap; use snafu::ResultExt; use tonic::transport::Channel as InnerChannel; use tonic::transport::Endpoint; @@ -17,7 +19,7 @@ const RECYCLE_CHANNEL_INTERVAL_SECS: u64 = 60; #[derive(Clone, Debug)] pub struct ChannelManager { config: ChannelConfig, - pool: Arc<Mutex<Pool>>, + pool: Arc<Pool>, } impl Default for ChannelManager { @@ -32,17 +34,14 @@ impl ChannelManager { } pub fn with_config(config: ChannelConfig) -> Self { - let pool = Pool { - channels: HashMap::default(), - }; - let pool = Arc::new(Mutex::new(pool)); + let pool = Arc::new(Pool::default()); let cloned_pool = pool.clone(); - common_runtime::spawn_bg(async move { + common_runtime::spawn_bg(async { recycle_channel_in_loop(cloned_pool, RECYCLE_CHANNEL_INTERVAL_SECS).await; }); - Self { pool, config } + Self { config, pool } } pub fn config(&self) -> &ChannelConfig { @@ -51,23 +50,30 @@ impl ChannelManager { pub fn get(&self, addr: impl AsRef<str>) -> Result<InnerChannel> { let addr = addr.as_ref(); - let mut pool = self.pool.lock().unwrap(); - if let Some(ch) = pool.get_mut(addr) { - ch.access += 1; - return Ok(ch.channel.clone()); + // It will acquire the read lock. + if let Some(inner_ch) = self.pool.get(addr) { + return Ok(inner_ch); } - let endpoint = self.build_endpoint(addr)?; - - let inner_channel = endpoint.connect_lazy(); - let channel = Channel { - channel: inner_channel.clone(), - access: 1, - use_default_connector: true, + // It will acquire the write lock. + let entry = match self.pool.entry(addr.to_string()) { + Entry::Occupied(entry) => { + entry.get().increase_access(); + entry.into_ref() + } + Entry::Vacant(entry) => { + let endpoint = self.build_endpoint(addr)?; + let inner_channel = endpoint.connect_lazy(); + + let channel = Channel { + channel: inner_channel, + access: AtomicUsize::new(1), + use_default_connector: true, + }; + entry.insert(channel) + } }; - pool.put(addr, channel); - - Ok(inner_channel) + Ok(entry.channel.clone()) } pub fn reset_with_connector<C>( @@ -86,11 +92,10 @@ impl ChannelManager { let inner_channel = endpoint.connect_with_connector_lazy(connector); let channel = Channel { channel: inner_channel.clone(), - access: 1, + access: AtomicUsize::new(1), use_default_connector: false, }; - let mut pool = self.pool.lock().unwrap(); - pool.put(addr, channel); + self.pool.put(addr, channel); Ok(inner_channel) } @@ -99,8 +104,7 @@ impl ChannelManager { where F: FnMut(&String, &mut Channel) -> bool, { - let mut pool = self.pool.lock().unwrap(); - pool.retain_channel(f); + self.pool.retain_channel(f); } fn build_endpoint(&self, addr: &str) -> Result<Endpoint> { @@ -297,39 +301,56 @@ impl ChannelConfig { #[derive(Debug)] pub struct Channel { channel: InnerChannel, - access: usize, + access: AtomicUsize, use_default_connector: bool, } impl Channel { #[inline] pub fn access(&self) -> usize { - self.access + self.access.load(Ordering::Relaxed) } #[inline] pub fn use_default_connector(&self) -> bool { self.use_default_connector } + + #[inline] + pub fn increase_access(&self) { + self.access.fetch_add(1, Ordering::Relaxed); + } } -#[derive(Debug)] + +#[derive(Debug, Default)] struct Pool { - channels: HashMap<String, Channel>, + channels: DashMap<String, Channel>, } impl Pool { - #[inline] - fn get_mut(&mut self, addr: &str) -> Option<&mut Channel> { - self.channels.get_mut(addr) + fn get(&self, addr: &str) -> Option<InnerChannel> { + let channel = self.channels.get(addr); + channel.map(|ch| { + ch.increase_access(); + ch.channel.clone() + }) } - #[inline] - fn put(&mut self, addr: &str, channel: Channel) { + fn entry(&self, addr: String) -> Entry<String, Channel> { + self.channels.entry(addr) + } + + #[cfg(test)] + fn get_access(&self, addr: &str) -> Option<usize> { + let channel = self.channels.get(addr); + channel.map(|ch| ch.access()) + } + + fn put(&self, addr: &str, channel: Channel) { self.channels.insert(addr.to_string(), channel); } - #[inline] - fn retain_channel<F>(&mut self, f: F) + fn retain_channel<F>(&self, f: F) where F: FnMut(&String, &mut Channel) -> bool, { @@ -337,20 +358,12 @@ impl Pool { } } -async fn recycle_channel_in_loop(pool: Arc<Mutex<Pool>>, interval_secs: u64) { +async fn recycle_channel_in_loop(pool: Arc<Pool>, interval_secs: u64) { let mut interval = tokio::time::interval(Duration::from_secs(interval_secs)); loop { interval.tick().await; - let mut pool = pool.lock().unwrap(); - pool.retain_channel(|_, c| { - if c.access == 0 { - false - } else { - c.access = 0; - true - } - }) + pool.retain_channel(|_, c| c.access.swap(0, Ordering::Relaxed) != 0) } } @@ -363,10 +376,7 @@ mod tests { #[should_panic] #[test] fn test_invalid_addr() { - let pool = Pool { - channels: HashMap::default(), - }; - let pool = Arc::new(Mutex::new(pool)); + let pool = Arc::new(Pool::default()); let mgr = ChannelManager { pool, ..Default::default() @@ -378,36 +388,31 @@ mod tests { #[tokio::test] async fn test_access_count() { - let pool = Pool { - channels: HashMap::default(), - }; - let pool = Arc::new(Mutex::new(pool)); + let pool = Arc::new(Pool::default()); let config = ChannelConfig::new(); - let mgr = ChannelManager { pool, config }; + let mgr = Arc::new(ChannelManager { pool, config }); let addr = "test_uri"; - for i in 0..10 { - { - let _ = mgr.get(addr).unwrap(); - let mut pool = mgr.pool.lock().unwrap(); - assert_eq!(i + 1, pool.get_mut(addr).unwrap().access); - } + let mut joins = Vec::with_capacity(10); + for _ in 0..10 { + let mgr_clone = mgr.clone(); + let join = tokio::spawn(async move { + for _ in 0..100 { + let _ = mgr_clone.get(addr); + } + }); + joins.push(join); + } + for join in joins { + join.await.unwrap(); } - let mut pool = mgr.pool.lock().unwrap(); - - assert_eq!(10, pool.get_mut(addr).unwrap().access); + assert_eq!(1000, mgr.pool.get_access(addr).unwrap()); - pool.retain_channel(|_, c| { - if c.access == 0 { - false - } else { - c.access = 0; - true - } - }); + mgr.pool + .retain_channel(|_, c| c.access.swap(0, Ordering::Relaxed) != 0); - assert_eq!(0, pool.get_mut(addr).unwrap().access); + assert_eq!(0, mgr.pool.get_access(addr).unwrap()); } #[test] @@ -466,10 +471,7 @@ mod tests { #[test] fn test_build_endpoint() { - let pool = Pool { - channels: HashMap::default(), - }; - let pool = Arc::new(Mutex::new(pool)); + let pool = Arc::new(Pool::default()); let config = ChannelConfig::new() .timeout(Duration::from_secs(3)) .connect_timeout(Duration::from_secs(5)) @@ -493,9 +495,11 @@ mod tests { #[tokio::test] async fn test_channel_with_connector() { let pool = Pool { - channels: HashMap::default(), + channels: DashMap::default(), }; - let pool = Arc::new(Mutex::new(pool)); + + let pool = Arc::new(pool); + let config = ChannelConfig::new(); let mgr = ChannelManager { pool, config };
refactor
optimize channel_manager (#401)
62f660e439df766c8f209472c5cec0b59d52f301
2023-06-25 11:36:50
Ruihang Xia
feat: implement metrics for Scan plan (#1812)
false
diff --git a/src/common/query/src/physical_plan.rs b/src/common/query/src/physical_plan.rs index 144e1bcd0b6c..1c148020ac9b 100644 --- a/src/common/query/src/physical_plan.rs +++ b/src/common/query/src/physical_plan.rs @@ -22,6 +22,7 @@ use datafusion::arrow::datatypes::SchemaRef as DfSchemaRef; use datafusion::error::Result as DfResult; pub use datafusion::execution::context::{SessionContext, TaskContext}; use datafusion::physical_plan::expressions::PhysicalSortExpr; +use datafusion::physical_plan::metrics::{BaselineMetrics, ExecutionPlanMetricsSet, MetricsSet}; pub use datafusion::physical_plan::Partitioning; use datafusion::physical_plan::Statistics; use datatypes::schema::SchemaRef; @@ -69,6 +70,10 @@ pub trait PhysicalPlan: Debug + Send + Sync { partition: usize, context: Arc<TaskContext>, ) -> Result<SendableRecordBatchStream>; + + fn metrics(&self) -> Option<MetricsSet> { + None + } } /// Adapt DataFusion's [`ExecutionPlan`](DfPhysicalPlan) to GreptimeDB's [`PhysicalPlan`]. @@ -76,11 +81,16 @@ pub trait PhysicalPlan: Debug + Send + Sync { pub struct PhysicalPlanAdapter { schema: SchemaRef, df_plan: Arc<dyn DfPhysicalPlan>, + metric: ExecutionPlanMetricsSet, } impl PhysicalPlanAdapter { pub fn new(schema: SchemaRef, df_plan: Arc<dyn DfPhysicalPlan>) -> Self { - Self { schema, df_plan } + Self { + schema, + df_plan, + metric: ExecutionPlanMetricsSet::new(), + } } pub fn df_plan(&self) -> Arc<dyn DfPhysicalPlan> { @@ -127,15 +137,21 @@ impl PhysicalPlan for PhysicalPlanAdapter { partition: usize, context: Arc<TaskContext>, ) -> Result<SendableRecordBatchStream> { + let baseline_metric = BaselineMetrics::new(&self.metric, partition); + let df_plan = self.df_plan.clone(); let stream = df_plan .execute(partition, context) .context(error::GeneralDataFusionSnafu)?; - let adapter = RecordBatchStreamAdapter::try_new(stream) + let adapter = RecordBatchStreamAdapter::try_new_with_metrics(stream, baseline_metric) .context(error::ConvertDfRecordBatchStreamSnafu)?; Ok(Box::pin(adapter)) } + + fn metrics(&self) -> Option<MetricsSet> { + Some(self.metric.clone_inner()) + } } #[derive(Debug)] @@ -196,6 +212,10 @@ impl DfPhysicalPlan for DfPhysicalPlanAdapter { fn statistics(&self) -> Statistics { Statistics::default() } + + fn metrics(&self) -> Option<MetricsSet> { + self.0.metrics() + } } #[cfg(test)] diff --git a/src/common/recordbatch/src/adapter.rs b/src/common/recordbatch/src/adapter.rs index 260b52fe417c..24d5c2af4a0a 100644 --- a/src/common/recordbatch/src/adapter.rs +++ b/src/common/recordbatch/src/adapter.rs @@ -20,6 +20,7 @@ use std::task::{Context, Poll}; use datafusion::arrow::datatypes::SchemaRef as DfSchemaRef; use datafusion::error::Result as DfResult; use datafusion::parquet::arrow::async_reader::{AsyncFileReader, ParquetRecordBatchStream}; +use datafusion::physical_plan::metrics::BaselineMetrics; use datafusion::physical_plan::RecordBatchStream as DfRecordBatchStream; use datafusion_common::DataFusionError; use datatypes::schema::{Schema, SchemaRef}; @@ -115,13 +116,31 @@ impl Stream for DfRecordBatchStreamAdapter { pub struct RecordBatchStreamAdapter { schema: SchemaRef, stream: DfSendableRecordBatchStream, + metrics: Option<BaselineMetrics>, } impl RecordBatchStreamAdapter { pub fn try_new(stream: DfSendableRecordBatchStream) -> Result<Self> { let schema = Arc::new(Schema::try_from(stream.schema()).context(error::SchemaConversionSnafu)?); - Ok(Self { schema, stream }) + Ok(Self { + schema, + stream, + metrics: None, + }) + } + + pub fn try_new_with_metrics( + stream: DfSendableRecordBatchStream, + metrics: BaselineMetrics, + ) -> Result<Self> { + let schema = + Arc::new(Schema::try_from(stream.schema()).context(error::SchemaConversionSnafu)?); + Ok(Self { + schema, + stream, + metrics: Some(metrics), + }) } } @@ -135,6 +154,12 @@ impl Stream for RecordBatchStreamAdapter { type Item = Result<RecordBatch>; fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> { + let timer = self + .metrics + .as_ref() + .map(|m| m.elapsed_compute().clone()) + .unwrap_or_default(); + let _guard = timer.timer(); match Pin::new(&mut self.stream).poll_next(cx) { Poll::Pending => Poll::Pending, Poll::Ready(Some(df_record_batch)) => { diff --git a/src/table/src/table/scan.rs b/src/table/src/table/scan.rs index c510dbdde8b7..cdc2d17b7967 100644 --- a/src/table/src/table/scan.rs +++ b/src/table/src/table/scan.rs @@ -14,15 +14,20 @@ use std::any::Any; use std::fmt::{Debug, Formatter}; +use std::pin::Pin; use std::sync::{Arc, Mutex}; +use std::task::{Context, Poll}; use common_query::error as query_error; use common_query::error::Result as QueryResult; use common_query::physical_plan::{Partitioning, PhysicalPlan, PhysicalPlanRef}; -use common_recordbatch::SendableRecordBatchStream; +use common_recordbatch::error::Result as RecordBatchResult; +use common_recordbatch::{RecordBatch, RecordBatchStream, SendableRecordBatchStream}; use datafusion::execution::context::TaskContext; +use datafusion::physical_plan::metrics::{BaselineMetrics, ExecutionPlanMetricsSet, MetricsSet}; use datafusion_physical_expr::PhysicalSortExpr; use datatypes::schema::SchemaRef; +use futures::{Stream, StreamExt}; use snafu::OptionExt; /// Adapt greptime's [SendableRecordBatchStream] to DataFusion's [PhysicalPlan]. @@ -30,6 +35,7 @@ pub struct StreamScanAdapter { stream: Mutex<Option<SendableRecordBatchStream>>, schema: SchemaRef, output_ordering: Option<Vec<PhysicalSortExpr>>, + metric: ExecutionPlanMetricsSet, } impl Debug for StreamScanAdapter { @@ -49,6 +55,7 @@ impl StreamScanAdapter { stream: Mutex::new(Some(stream)), schema, output_ordering: None, + metric: ExecutionPlanMetricsSet::new(), } } @@ -85,11 +92,46 @@ impl PhysicalPlan for StreamScanAdapter { fn execute( &self, - _partition: usize, + partition: usize, _context: Arc<TaskContext>, ) -> QueryResult<SendableRecordBatchStream> { let mut stream = self.stream.lock().unwrap(); - stream.take().context(query_error::ExecuteRepeatedlySnafu) + let stream = stream.take().context(query_error::ExecuteRepeatedlySnafu)?; + let baseline_metric = BaselineMetrics::new(&self.metric, partition); + Ok(Box::pin(StreamWithMetricWrapper { + stream, + metric: baseline_metric, + })) + } + + fn metrics(&self) -> Option<MetricsSet> { + Some(self.metric.clone_inner()) + } +} + +pub struct StreamWithMetricWrapper { + stream: SendableRecordBatchStream, + metric: BaselineMetrics, +} + +impl Stream for StreamWithMetricWrapper { + type Item = RecordBatchResult<RecordBatch>; + + fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> { + let this = self.get_mut(); + let _timer = this.metric.elapsed_compute().timer(); + let poll = this.stream.poll_next_unpin(cx); + if let Poll::Ready(Option::Some(Result::Ok(record_batch))) = &poll { + this.metric.record_output(record_batch.num_rows()); + } + + poll + } +} + +impl RecordBatchStream for StreamWithMetricWrapper { + fn schema(&self) -> SchemaRef { + self.stream.schema() } }
feat
implement metrics for Scan plan (#1812)
1fc168bf6aee8e9a7c0c47c0461fe27745d24349
2024-01-09 15:08:48
Ning Sun
feat: update our cross schema check to cross catalog (#3123)
false
diff --git a/src/catalog/src/table_source.rs b/src/catalog/src/table_source.rs index 96dd709ba5ac..d5d0c282e6e5 100644 --- a/src/catalog/src/table_source.rs +++ b/src/catalog/src/table_source.rs @@ -15,7 +15,6 @@ use std::collections::HashMap; use std::sync::Arc; -use common_catalog::consts::INFORMATION_SCHEMA_NAME; use common_catalog::format_full_table_name; use datafusion::common::{ResolvedTableReference, TableReference}; use datafusion::datasource::provider_as_source; @@ -30,7 +29,7 @@ use crate::CatalogManagerRef; pub struct DfTableSourceProvider { catalog_manager: CatalogManagerRef, resolved_tables: HashMap<String, Arc<dyn TableSource>>, - disallow_cross_schema_query: bool, + disallow_cross_catalog_query: bool, default_catalog: String, default_schema: String, } @@ -38,12 +37,12 @@ pub struct DfTableSourceProvider { impl DfTableSourceProvider { pub fn new( catalog_manager: CatalogManagerRef, - disallow_cross_schema_query: bool, + disallow_cross_catalog_query: bool, query_ctx: &QueryContext, ) -> Self { Self { catalog_manager, - disallow_cross_schema_query, + disallow_cross_catalog_query, resolved_tables: HashMap::new(), default_catalog: query_ctx.current_catalog().to_owned(), default_schema: query_ctx.current_schema().to_owned(), @@ -54,29 +53,18 @@ impl DfTableSourceProvider { &'a self, table_ref: TableReference<'a>, ) -> Result<ResolvedTableReference<'a>> { - if self.disallow_cross_schema_query { + if self.disallow_cross_catalog_query { match &table_ref { TableReference::Bare { .. } => (), - TableReference::Partial { schema, .. } => { - ensure!( - schema.as_ref() == self.default_schema - || schema.as_ref() == INFORMATION_SCHEMA_NAME, - QueryAccessDeniedSnafu { - catalog: &self.default_catalog, - schema: schema.as_ref(), - } - ); - } + TableReference::Partial { .. } => {} TableReference::Full { catalog, schema, .. } => { ensure!( - catalog.as_ref() == self.default_catalog - && (schema.as_ref() == self.default_schema - || schema.as_ref() == INFORMATION_SCHEMA_NAME), + catalog.as_ref() == self.default_catalog, QueryAccessDeniedSnafu { catalog: catalog.as_ref(), - schema: schema.as_ref() + schema: schema.as_ref(), } ); } @@ -136,21 +124,21 @@ mod tests { table: Cow::Borrowed("table_name"), }; let result = table_provider.resolve_table_ref(table_ref); - let _ = result.unwrap(); + assert!(result.is_ok()); let table_ref = TableReference::Partial { schema: Cow::Borrowed("public"), table: Cow::Borrowed("table_name"), }; let result = table_provider.resolve_table_ref(table_ref); - let _ = result.unwrap(); + assert!(result.is_ok()); let table_ref = TableReference::Partial { schema: Cow::Borrowed("wrong_schema"), table: Cow::Borrowed("table_name"), }; let result = table_provider.resolve_table_ref(table_ref); - assert!(result.is_err()); + assert!(result.is_ok()); let table_ref = TableReference::Full { catalog: Cow::Borrowed("greptime"), @@ -158,7 +146,7 @@ mod tests { table: Cow::Borrowed("table_name"), }; let result = table_provider.resolve_table_ref(table_ref); - let _ = result.unwrap(); + assert!(result.is_ok()); let table_ref = TableReference::Full { catalog: Cow::Borrowed("wrong_catalog"), @@ -172,14 +160,15 @@ mod tests { schema: Cow::Borrowed("information_schema"), table: Cow::Borrowed("columns"), }; - let _ = table_provider.resolve_table_ref(table_ref).unwrap(); + let result = table_provider.resolve_table_ref(table_ref); + assert!(result.is_ok()); let table_ref = TableReference::Full { catalog: Cow::Borrowed("greptime"), schema: Cow::Borrowed("information_schema"), table: Cow::Borrowed("columns"), }; - let _ = table_provider.resolve_table_ref(table_ref).unwrap(); + assert!(table_provider.resolve_table_ref(table_ref).is_ok()); let table_ref = TableReference::Full { catalog: Cow::Borrowed("dummy"), @@ -187,5 +176,12 @@ mod tests { table: Cow::Borrowed("columns"), }; assert!(table_provider.resolve_table_ref(table_ref).is_err()); + + let table_ref = TableReference::Full { + catalog: Cow::Borrowed("greptime"), + schema: Cow::Borrowed("greptime_private"), + table: Cow::Borrowed("columns"), + }; + assert!(table_provider.resolve_table_ref(table_ref).is_ok()); } } diff --git a/src/frontend/src/instance.rs b/src/frontend/src/instance.rs index ff76fa11c3ca..f0ee10046ceb 100644 --- a/src/frontend/src/instance.rs +++ b/src/frontend/src/instance.rs @@ -442,7 +442,7 @@ pub fn check_permission( ) -> Result<()> { let need_validate = plugins .get::<QueryOptions>() - .map(|opts| opts.disallow_cross_schema_query) + .map(|opts| opts.disallow_cross_catalog_query) .unwrap_or_default(); if !need_validate { @@ -520,7 +520,7 @@ mod tests { let query_ctx = QueryContext::arc(); let plugins: Plugins = Plugins::new(); plugins.insert(QueryOptions { - disallow_cross_schema_query: true, + disallow_cross_catalog_query: true, }); let sql = r#" @@ -556,8 +556,6 @@ mod tests { } let wrong = vec![ - ("", "wrongschema."), - ("greptime.", "wrongschema."), ("wrongcatalog.", "public."), ("wrongcatalog.", "wrongschema."), ]; @@ -607,10 +605,10 @@ mod tests { let stmt = parse_stmt(sql, &GreptimeDbDialect {}).unwrap(); check_permission(plugins.clone(), &stmt[0], &query_ctx).unwrap(); - let sql = "SHOW TABLES FROM wrongschema"; + let sql = "SHOW TABLES FROM private"; let stmt = parse_stmt(sql, &GreptimeDbDialect {}).unwrap(); let re = check_permission(plugins.clone(), &stmt[0], &query_ctx); - assert!(re.is_err()); + assert!(re.is_ok()); // test describe table let sql = "DESC TABLE {catalog}{schema}demo;"; diff --git a/src/query/src/datafusion/planner.rs b/src/query/src/datafusion/planner.rs index dfa185543c0e..983c4155f7e1 100644 --- a/src/query/src/datafusion/planner.rs +++ b/src/query/src/datafusion/planner.rs @@ -56,7 +56,7 @@ impl DfContextProviderAdapter { let mut table_provider = DfTableSourceProvider::new( engine_state.catalog_manager().clone(), - engine_state.disallow_cross_schema_query(), + engine_state.disallow_cross_catalog_query(), query_ctx.as_ref(), ); diff --git a/src/query/src/planner.rs b/src/query/src/planner.rs index 9e5c1a3f9dab..97e8d3d931ee 100644 --- a/src/query/src/planner.rs +++ b/src/query/src/planner.rs @@ -58,7 +58,7 @@ impl DfLogicalPlanner { let table_provider = DfTableSourceProvider::new( self.engine_state.catalog_manager().clone(), - self.engine_state.disallow_cross_schema_query(), + self.engine_state.disallow_cross_catalog_query(), query_ctx.as_ref(), ); @@ -91,7 +91,7 @@ impl DfLogicalPlanner { async fn plan_pql(&self, stmt: EvalStmt, query_ctx: QueryContextRef) -> Result<LogicalPlan> { let table_provider = DfTableSourceProvider::new( self.engine_state.catalog_manager().clone(), - self.engine_state.disallow_cross_schema_query(), + self.engine_state.disallow_cross_catalog_query(), query_ctx.as_ref(), ); PromPlanner::stmt_to_plan(table_provider, stmt) diff --git a/src/query/src/query_engine/options.rs b/src/query/src/query_engine/options.rs index 3cb5043fa323..867e8c15fef0 100644 --- a/src/query/src/query_engine/options.rs +++ b/src/query/src/query_engine/options.rs @@ -12,7 +12,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -use common_catalog::consts::INFORMATION_SCHEMA_NAME; use session::context::QueryContextRef; use snafu::ensure; @@ -20,7 +19,7 @@ use crate::error::{QueryAccessDeniedSnafu, Result}; #[derive(Default, Clone)] pub struct QueryOptions { - pub disallow_cross_schema_query: bool, + pub disallow_cross_catalog_query: bool, } // TODO(shuiyisong): remove one method after #559 is done @@ -29,13 +28,8 @@ pub fn validate_catalog_and_schema( schema: &str, query_ctx: &QueryContextRef, ) -> Result<()> { - // information_schema is an exception - if schema.eq_ignore_ascii_case(INFORMATION_SCHEMA_NAME) { - return Ok(()); - } - ensure!( - catalog == query_ctx.current_catalog() && schema == query_ctx.current_schema(), + catalog == query_ctx.current_catalog(), QueryAccessDeniedSnafu { catalog: catalog.to_string(), schema: schema.to_string(), @@ -57,8 +51,8 @@ mod tests { let context = QueryContext::with("greptime", "public"); validate_catalog_and_schema("greptime", "public", &context).unwrap(); - let re = validate_catalog_and_schema("greptime", "wrong_schema", &context); - assert!(re.is_err()); + let re = validate_catalog_and_schema("greptime", "private_schema", &context); + assert!(re.is_ok()); let re = validate_catalog_and_schema("wrong_catalog", "public", &context); assert!(re.is_err()); let re = validate_catalog_and_schema("wrong_catalog", "wrong_schema", &context); diff --git a/src/query/src/query_engine/state.rs b/src/query/src/query_engine/state.rs index c80ecadc9fb8..4da21338df5c 100644 --- a/src/query/src/query_engine/state.rs +++ b/src/query/src/query_engine/state.rs @@ -163,9 +163,9 @@ impl QueryEngineState { self.table_mutation_handler.as_ref() } - pub(crate) fn disallow_cross_schema_query(&self) -> bool { + pub(crate) fn disallow_cross_catalog_query(&self) -> bool { self.plugins - .map::<QueryOptions, _, _>(|x| x.disallow_cross_schema_query) + .map::<QueryOptions, _, _>(|x| x.disallow_cross_catalog_query) .unwrap_or(false) } diff --git a/src/query/src/tests/query_engine_test.rs b/src/query/src/tests/query_engine_test.rs index 437c15067439..aa2824236287 100644 --- a/src/query/src/tests/query_engine_test.rs +++ b/src/query/src/tests/query_engine_test.rs @@ -125,7 +125,7 @@ async fn test_query_validate() -> Result<()> { // set plugins let plugins = Plugins::new(); plugins.insert(QueryOptions { - disallow_cross_schema_query: true, + disallow_cross_catalog_query: true, }); let factory = QueryEngineFactory::new_with_plugins(catalog_list, None, None, false, plugins);
feat
update our cross schema check to cross catalog (#3123)
383c55d39c9221c7f73c74cde2c4ee307be95ac2
2022-05-25 08:24:43
evenyag
ci: Only trigger ci on pull request (#36)
false
diff --git a/.github/workflows/develop.yml b/.github/workflows/develop.yml index 2c94d171895b..5d352f35f96d 100644 --- a/.github/workflows/develop.yml +++ b/.github/workflows/develop.yml @@ -1,4 +1,4 @@ -on: [push, pull_request] +on: [pull_request] name: Continuous integration for developing
ci
Only trigger ci on pull request (#36)
e6507aaf3416485963e7bc334621609ef246a6fc
2024-04-15 14:32:19
Jeremyhi
chore: debt 3696 (#3705)
false
diff --git a/src/meta-client/src/client.rs b/src/meta-client/src/client.rs index fed97c568193..ddb169ebc5d5 100644 --- a/src/meta-client/src/client.rs +++ b/src/meta-client/src/client.rs @@ -468,28 +468,24 @@ impl MetaClient { Ok(res) } - #[inline] pub fn heartbeat_client(&self) -> Result<HeartbeatClient> { self.heartbeat.clone().context(NotStartedSnafu { name: "heartbeat_client", }) } - #[inline] pub fn store_client(&self) -> Result<StoreClient> { self.store.clone().context(NotStartedSnafu { name: "store_client", }) } - #[inline] pub fn lock_client(&self) -> Result<LockClient> { self.lock.clone().context(NotStartedSnafu { name: "lock_client", }) } - #[inline] pub fn procedure_client(&self) -> Result<ProcedureClient> { self.procedure.clone().context(NotStartedSnafu { name: "procedure_client", @@ -502,12 +498,10 @@ impl MetaClient { }) } - #[inline] pub fn channel_config(&self) -> &ChannelConfig { self.channel_manager.config() } - #[inline] pub fn id(&self) -> Id { self.id } diff --git a/src/meta-srv/src/election/etcd.rs b/src/meta-srv/src/election/etcd.rs index 695808a78728..0b615c78e6e0 100644 --- a/src/meta-srv/src/election/etcd.rs +++ b/src/meta-srv/src/election/etcd.rs @@ -166,7 +166,7 @@ impl Election for EtcdElection { if let Some(res) = receiver.message().await.context(error::EtcdFailedSnafu)? { if res.ttl() <= 0 { - // Failed to keep alive, just break the loop. + warn!("Candidate lease expired, key: {}", self.candidate_key()); break; } }
chore
debt 3696 (#3705)
e4333969b48292e4b00f00f75a2657b92be0c98a
2024-03-13 19:41:47
Weny Xu
feat(fuzz): add alter table target (#3503)
false
diff --git a/.github/workflows/develop.yml b/.github/workflows/develop.yml index 94ada0fabf73..1ce1d8c18cd2 100644 --- a/.github/workflows/develop.yml +++ b/.github/workflows/develop.yml @@ -123,7 +123,7 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - target: [ "fuzz_create_table" ] + target: [ "fuzz_create_table", "fuzz_alter_table" ] steps: - uses: actions/checkout@v4 - uses: arduino/setup-protoc@v3 diff --git a/tests-fuzz/Cargo.toml b/tests-fuzz/Cargo.toml index a63dc60babcd..ce216de41970 100644 --- a/tests-fuzz/Cargo.toml +++ b/tests-fuzz/Cargo.toml @@ -55,3 +55,10 @@ path = "targets/fuzz_insert.rs" test = false bench = false doc = false + +[[bin]] +name = "fuzz_alter_table" +path = "targets/fuzz_alter_table.rs" +test = false +bench = false +doc = false diff --git a/tests-fuzz/src/context.rs b/tests-fuzz/src/context.rs index a9fbcbd3aaa9..29536c853ccd 100644 --- a/tests-fuzz/src/context.rs +++ b/tests-fuzz/src/context.rs @@ -14,13 +14,20 @@ use std::sync::Arc; +use common_query::AddColumnLocation; use partition::partition::PartitionDef; +use rand::Rng; +use snafu::{ensure, OptionExt}; -use crate::ir::{Column, CreateTableExpr, Ident}; +use crate::error::{self, Result}; +use crate::generator::Random; +use crate::ir::alter_expr::AlterTableOperation; +use crate::ir::{AlterTableExpr, Column, CreateTableExpr, Ident}; pub type TableContextRef = Arc<TableContext>; /// TableContext stores table info. +#[derive(Debug, Clone)] pub struct TableContext { pub name: Ident, pub columns: Vec<Column>, @@ -48,3 +55,183 @@ impl From<&CreateTableExpr> for TableContext { } } } + +impl TableContext { + /// Applies the [AlterTableExpr]. + pub fn alter(mut self, expr: AlterTableExpr) -> Result<TableContext> { + match expr.alter_options { + AlterTableOperation::AddColumn { column, location } => { + ensure!( + !self.columns.iter().any(|col| col.name == column.name), + error::UnexpectedSnafu { + violated: format!("Column {} exists", column.name), + } + ); + match location { + Some(AddColumnLocation::First) => { + let mut columns = Vec::with_capacity(self.columns.len() + 1); + columns.push(column); + columns.extend(self.columns); + self.columns = columns; + } + Some(AddColumnLocation::After { column_name }) => { + let index = self + .columns + .iter() + // TODO(weny): find a better way? + .position(|col| col.name.to_string() == column_name) + .context(error::UnexpectedSnafu { + violated: format!("Column: {column_name} not found"), + })?; + self.columns.insert(index + 1, column); + } + None => self.columns.push(column), + } + // Re-generates the primary_keys + self.primary_keys = self + .columns + .iter() + .enumerate() + .flat_map(|(idx, col)| { + if col.is_primary_key() { + Some(idx) + } else { + None + } + }) + .collect(); + Ok(self) + } + AlterTableOperation::DropColumn { name } => { + self.columns.retain(|col| col.name != name); + // Re-generates the primary_keys + self.primary_keys = self + .columns + .iter() + .enumerate() + .flat_map(|(idx, col)| { + if col.is_primary_key() { + Some(idx) + } else { + None + } + }) + .collect(); + Ok(self) + } + AlterTableOperation::RenameTable { new_table_name } => { + ensure!( + new_table_name != self.name, + error::UnexpectedSnafu { + violated: "The new table name is equal the current name", + } + ); + self.name = new_table_name; + Ok(self) + } + } + } + + pub fn generate_unique_column_name<R: Rng>( + &self, + rng: &mut R, + generator: &dyn Random<Ident, R>, + ) -> Ident { + let mut name = generator.gen(rng); + while self.columns.iter().any(|col| col.name.value == name.value) { + name = generator.gen(rng); + } + name + } + + pub fn generate_unique_table_name<R: Rng>( + &self, + rng: &mut R, + generator: &dyn Random<Ident, R>, + ) -> Ident { + let mut name = generator.gen(rng); + while self.name.value == name.value { + name = generator.gen(rng); + } + name + } +} + +#[cfg(test)] +mod tests { + use common_query::AddColumnLocation; + use datatypes::data_type::ConcreteDataType; + + use super::TableContext; + use crate::ir::alter_expr::AlterTableOperation; + use crate::ir::create_expr::ColumnOption; + use crate::ir::{AlterTableExpr, Column, Ident}; + + #[test] + fn test_table_context_alter() { + let table_ctx = TableContext { + name: "foo".into(), + columns: vec![], + partition: None, + primary_keys: vec![], + }; + // Add a column + let expr = AlterTableExpr { + table_name: "foo".into(), + alter_options: AlterTableOperation::AddColumn { + column: Column { + name: "a".into(), + column_type: ConcreteDataType::timestamp_microsecond_datatype(), + options: vec![ColumnOption::PrimaryKey], + }, + location: None, + }, + }; + let table_ctx = table_ctx.alter(expr).unwrap(); + assert_eq!(table_ctx.columns[0].name, Ident::new("a")); + assert_eq!(table_ctx.primary_keys, vec![0]); + + // Add a column at first + let expr = AlterTableExpr { + table_name: "foo".into(), + alter_options: AlterTableOperation::AddColumn { + column: Column { + name: "b".into(), + column_type: ConcreteDataType::timestamp_microsecond_datatype(), + options: vec![ColumnOption::PrimaryKey], + }, + location: Some(AddColumnLocation::First), + }, + }; + let table_ctx = table_ctx.alter(expr).unwrap(); + assert_eq!(table_ctx.columns[0].name, Ident::new("b")); + assert_eq!(table_ctx.primary_keys, vec![0, 1]); + + // Add a column after "b" + let expr = AlterTableExpr { + table_name: "foo".into(), + alter_options: AlterTableOperation::AddColumn { + column: Column { + name: "c".into(), + column_type: ConcreteDataType::timestamp_microsecond_datatype(), + options: vec![ColumnOption::PrimaryKey], + }, + location: Some(AddColumnLocation::After { + column_name: "b".into(), + }), + }, + }; + let table_ctx = table_ctx.alter(expr).unwrap(); + assert_eq!(table_ctx.columns[1].name, Ident::new("c")); + assert_eq!(table_ctx.primary_keys, vec![0, 1, 2]); + + // Drop the column "b" + let expr = AlterTableExpr { + table_name: "foo".into(), + alter_options: AlterTableOperation::DropColumn { name: "b".into() }, + }; + let table_ctx = table_ctx.alter(expr).unwrap(); + assert_eq!(table_ctx.columns[1].name, Ident::new("a")); + assert_eq!(table_ctx.primary_keys, vec![0, 1]); + } +} diff --git a/tests-fuzz/src/generator/alter_expr.rs b/tests-fuzz/src/generator/alter_expr.rs index 03e823773d05..1a9d4b965b15 100644 --- a/tests-fuzz/src/generator/alter_expr.rs +++ b/tests-fuzz/src/generator/alter_expr.rs @@ -15,6 +15,7 @@ use std::marker::PhantomData; use common_query::AddColumnLocation; +use datatypes::data_type::ConcreteDataType; use derive_builder::Builder; use rand::Rng; use snafu::ensure; @@ -24,10 +25,38 @@ use crate::error::{self, Error, Result}; use crate::fake::WordGenerator; use crate::generator::{ColumnOptionGenerator, ConcreteDataTypeGenerator, Generator, Random}; use crate::ir::alter_expr::{AlterTableExpr, AlterTableOperation}; +use crate::ir::create_expr::ColumnOption; use crate::ir::{ - column_options_generator, droppable_columns, generate_columns, ColumnTypeGenerator, Ident, + droppable_columns, generate_columns, generate_random_value, ColumnTypeGenerator, Ident, }; +fn add_column_options_generator<R: Rng>( + rng: &mut R, + column_type: &ConcreteDataType, +) -> Vec<ColumnOption> { + // 0 -> NULL + // 1 -> DEFAULT VALUE + // 2 -> PRIMARY KEY + DEFAULT VALUE + let idx = rng.gen_range(0..3); + match idx { + 0 => vec![ColumnOption::Null], + 1 => { + vec![ColumnOption::DefaultValue(generate_random_value( + rng, + column_type, + None, + ))] + } + 2 => { + vec![ + ColumnOption::PrimaryKey, + ColumnOption::DefaultValue(generate_random_value(rng, column_type, None)), + ] + } + _ => unreachable!(), + } +} + /// Generates the [AlterTableOperation::AddColumn] of [AlterTableExpr]. #[derive(Builder)] #[builder(pattern = "owned")] @@ -37,7 +66,7 @@ pub struct AlterExprAddColumnGenerator<R: Rng + 'static> { location: bool, #[builder(default = "Box::new(WordGenerator)")] name_generator: Box<dyn Random<Ident, R>>, - #[builder(default = "Box::new(column_options_generator)")] + #[builder(default = "Box::new(add_column_options_generator)")] column_options_generator: ColumnOptionGenerator<R>, #[builder(default = "Box::new(ColumnTypeGenerator)")] column_type_generator: ConcreteDataTypeGenerator<R>, @@ -65,7 +94,9 @@ impl<R: Rng + 'static> Generator<AlterTableExpr, R> for AlterExprAddColumnGenera None }; - let name = self.name_generator.gen(rng); + let name = self + .table_ctx + .generate_unique_column_name(rng, self.name_generator.as_ref()); let column = generate_columns( rng, vec![name], @@ -116,7 +147,9 @@ impl<R: Rng> Generator<AlterTableExpr, R> for AlterExprRenameGenerator<R> { type Error = Error; fn generate(&self, rng: &mut R) -> Result<AlterTableExpr> { - let new_table_name = self.name_generator.gen(rng); + let new_table_name = self + .table_ctx + .generate_unique_table_name(rng, self.name_generator.as_ref()); Ok(AlterTableExpr { table_name: self.table_ctx.name.clone(), alter_options: AlterTableOperation::RenameTable { new_table_name }, @@ -153,7 +186,7 @@ mod tests { .generate(&mut rng) .unwrap(); let serialized = serde_json::to_string(&expr).unwrap(); - let expected = r#"{"table_name":{"value":"animI","quote_style":null},"alter_options":{"AddColumn":{"column":{"name":{"value":"velit","quote_style":null},"column_type":{"Int32":{}},"options":[{"DefaultValue":{"Int32":853246610}}]},"location":null}}}"#; + let expected = r#"{"table_name":{"value":"animI","quote_style":null},"alter_options":{"AddColumn":{"column":{"name":{"value":"velit","quote_style":null},"column_type":{"Int32":{}},"options":[{"DefaultValue":{"Int32":1606462472}}]},"location":null}}}"#; assert_eq!(expected, serialized); let expr = AlterExprRenameGeneratorBuilder::default() @@ -163,7 +196,7 @@ mod tests { .generate(&mut rng) .unwrap(); let serialized = serde_json::to_string(&expr).unwrap(); - let expected = r#"{"table_name":{"value":"animI","quote_style":null},"alter_options":{"RenameTable":{"new_table_name":{"value":"iure","quote_style":null}}}}"#; + let expected = r#"{"table_name":{"value":"animI","quote_style":null},"alter_options":{"RenameTable":{"new_table_name":{"value":"nihil","quote_style":null}}}}"#; assert_eq!(expected, serialized); let expr = AlterExprDropColumnGeneratorBuilder::default() @@ -173,7 +206,7 @@ mod tests { .generate(&mut rng) .unwrap(); let serialized = serde_json::to_string(&expr).unwrap(); - let expected = r#"{"table_name":{"value":"animI","quote_style":null},"alter_options":{"DropColumn":{"name":{"value":"toTAm","quote_style":null}}}}"#; + let expected = r#"{"table_name":{"value":"animI","quote_style":null},"alter_options":{"DropColumn":{"name":{"value":"cUmquE","quote_style":null}}}}"#; assert_eq!(expected, serialized); } } diff --git a/tests-fuzz/src/ir/create_expr.rs b/tests-fuzz/src/ir/create_expr.rs index 6ef151f82558..1e6c165b5c8a 100644 --- a/tests-fuzz/src/ir/create_expr.rs +++ b/tests-fuzz/src/ir/create_expr.rs @@ -22,7 +22,7 @@ use serde::{Deserialize, Serialize}; use crate::ir::{Column, Ident}; -// The column options +/// The column options #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Hash)] pub enum ColumnOption { Null, diff --git a/tests-fuzz/src/validator/column.rs b/tests-fuzz/src/validator/column.rs index 797834eec517..0736bbb48da4 100644 --- a/tests-fuzz/src/validator/column.rs +++ b/tests-fuzz/src/validator/column.rs @@ -37,6 +37,21 @@ fn is_nullable(str: &str) -> bool { str.to_uppercase() == "YES" } +enum SemanticType { + Timestamp, + Field, + Tag, +} + +fn semantic_type(str: &str) -> Option<SemanticType> { + match str { + "TIMESTAMP" => Some(SemanticType::Timestamp), + "FIELD" => Some(SemanticType::Field), + "TAG" => Some(SemanticType::Tag), + _ => None, + } +} + impl PartialEq<Column> for ColumnEntry { fn eq(&self, other: &Column) -> bool { // Checks `table_name` @@ -108,11 +123,47 @@ impl PartialEq<Column> for ColumnEntry { .iter() .any(|opt| matches!(opt, ColumnOption::NotNull | ColumnOption::TimeIndex)) { - debug!("unexpected ColumnOption::NotNull or ColumnOption::TimeIndex"); + debug!("ColumnOption::NotNull or ColumnOption::TimeIndex is not found"); return false; } } //TODO: Checks `semantic_type` + match semantic_type(&self.semantic_type) { + Some(SemanticType::Tag) => { + if !other + .options + .iter() + .any(|opt| matches!(opt, ColumnOption::PrimaryKey)) + { + debug!("ColumnOption::PrimaryKey is not found"); + return false; + } + } + Some(SemanticType::Field) => { + if other + .options + .iter() + .any(|opt| matches!(opt, ColumnOption::PrimaryKey | ColumnOption::TimeIndex)) + { + debug!("unexpected ColumnOption::PrimaryKey or ColumnOption::TimeIndex"); + return false; + } + } + Some(SemanticType::Timestamp) => { + if !other + .options + .iter() + .any(|opt| matches!(opt, ColumnOption::TimeIndex)) + { + debug!("ColumnOption::TimeIndex is not found"); + return false; + } + } + None => { + debug!("unknown semantic type: {}", self.semantic_type); + return false; + } + }; true } @@ -186,7 +237,7 @@ mod tests { table_name: String::new(), column_name: "test".to_string(), data_type: ConcreteDataType::int8_datatype().name(), - semantic_type: String::new(), + semantic_type: "FIELD".to_string(), column_default: None, is_nullable: "Yes".to_string(), }; @@ -210,7 +261,7 @@ mod tests { table_name: String::new(), column_name: "test".to_string(), data_type: ConcreteDataType::int8_datatype().to_string(), - semantic_type: String::new(), + semantic_type: "FIELD".to_string(), column_default: Some("1".to_string()), is_nullable: "Yes".to_string(), }; @@ -226,7 +277,7 @@ mod tests { table_name: String::new(), column_name: "test".to_string(), data_type: ConcreteDataType::int8_datatype().to_string(), - semantic_type: String::new(), + semantic_type: "FIELD".to_string(), column_default: Some("Hello()".to_string()), is_nullable: "Yes".to_string(), }; diff --git a/tests-fuzz/targets/fuzz_alter_table.rs b/tests-fuzz/targets/fuzz_alter_table.rs new file mode 100644 index 000000000000..3d345c2f16e7 --- /dev/null +++ b/tests-fuzz/targets/fuzz_alter_table.rs @@ -0,0 +1,185 @@ +// Copyright 2023 Greptime Team +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#![no_main] + +use std::sync::Arc; + +use arbitrary::{Arbitrary, Unstructured}; +use common_telemetry::info; +use libfuzzer_sys::fuzz_target; +use rand::{Rng, SeedableRng}; +use rand_chacha::ChaChaRng; +use snafu::ResultExt; +use sqlx::{MySql, Pool}; +use tests_fuzz::context::{TableContext, TableContextRef}; +use tests_fuzz::error::{self, Result}; +use tests_fuzz::fake::{ + merge_two_word_map_fn, random_capitalize_map, uppercase_and_keyword_backtick_map, + MappedGenerator, WordGenerator, +}; +use tests_fuzz::generator::alter_expr::{ + AlterExprAddColumnGeneratorBuilder, AlterExprDropColumnGeneratorBuilder, + AlterExprRenameGeneratorBuilder, +}; +use tests_fuzz::generator::create_expr::CreateTableExprGeneratorBuilder; +use tests_fuzz::generator::Generator; +use tests_fuzz::ir::{droppable_columns, AlterTableExpr, CreateTableExpr}; +use tests_fuzz::translator::mysql::alter_expr::AlterTableExprTranslator; +use tests_fuzz::translator::mysql::create_expr::CreateTableExprTranslator; +use tests_fuzz::translator::DslTranslator; +use tests_fuzz::utils::{init_greptime_connections, Connections}; +use tests_fuzz::validator; + +struct FuzzContext { + greptime: Pool<MySql>, +} + +impl FuzzContext { + async fn close(self) { + self.greptime.close().await; + } +} + +#[derive(Clone, Debug)] +struct FuzzInput { + seed: u64, + actions: usize, +} + +fn generate_create_table_expr<R: Rng + 'static>(rng: &mut R) -> Result<CreateTableExpr> { + let columns = rng.gen_range(2..30); + let create_table_generator = CreateTableExprGeneratorBuilder::default() + .name_generator(Box::new(MappedGenerator::new( + WordGenerator, + merge_two_word_map_fn(random_capitalize_map, uppercase_and_keyword_backtick_map), + ))) + .columns(columns) + .engine("mito") + .build() + .unwrap(); + create_table_generator.generate(rng) +} + +fn generate_alter_table_expr<R: Rng + 'static>( + table_ctx: TableContextRef, + rng: &mut R, +) -> Result<AlterTableExpr> { + let rename = rng.gen_bool(0.2); + if rename { + let expr_generator = AlterExprRenameGeneratorBuilder::default() + .table_ctx(table_ctx) + .name_generator(Box::new(MappedGenerator::new( + WordGenerator, + merge_two_word_map_fn(random_capitalize_map, uppercase_and_keyword_backtick_map), + ))) + .build() + .unwrap(); + expr_generator.generate(rng) + } else { + let drop_column = rng.gen_bool(0.5) && !droppable_columns(&table_ctx.columns).is_empty(); + if drop_column { + let expr_generator = AlterExprDropColumnGeneratorBuilder::default() + .table_ctx(table_ctx) + .build() + .unwrap(); + expr_generator.generate(rng) + } else { + let location = rng.gen_bool(0.5); + let expr_generator = AlterExprAddColumnGeneratorBuilder::default() + .table_ctx(table_ctx) + .location(location) + .build() + .unwrap(); + expr_generator.generate(rng) + } + } +} + +impl Arbitrary<'_> for FuzzInput { + fn arbitrary(u: &mut Unstructured<'_>) -> arbitrary::Result<Self> { + let seed = u.int_in_range(u64::MIN..=u64::MAX)?; + let mut rng = ChaChaRng::seed_from_u64(seed); + let actions = rng.gen_range(1..256); + + Ok(FuzzInput { seed, actions }) + } +} + +async fn execute_alter_table(ctx: FuzzContext, input: FuzzInput) -> Result<()> { + info!("input: {input:?}"); + let mut rng = ChaChaRng::seed_from_u64(input.seed); + + // Create table + let expr = generate_create_table_expr(&mut rng).unwrap(); + let translator = CreateTableExprTranslator; + let sql = translator.translate(&expr)?; + let result = sqlx::query(&sql) + .execute(&ctx.greptime) + .await + .context(error::ExecuteQuerySnafu { sql: &sql })?; + info!("Create table: {sql}, result: {result:?}"); + + // Alter table actions + let mut table_ctx = Arc::new(TableContext::from(&expr)); + for _ in 0..input.actions { + let expr = generate_alter_table_expr(table_ctx.clone(), &mut rng).unwrap(); + let translator = AlterTableExprTranslator; + let sql = translator.translate(&expr)?; + let result = sqlx::query(&sql) + .execute(&ctx.greptime) + .await + .context(error::ExecuteQuerySnafu { sql: &sql })?; + info!("Alter table: {sql}, result: {result:?}"); + // Applies changes + table_ctx = Arc::new(Arc::unwrap_or_clone(table_ctx).alter(expr).unwrap()); + + // Validates columns + let mut column_entries = validator::column::fetch_columns( + &ctx.greptime, + "public".into(), + table_ctx.name.clone(), + ) + .await?; + column_entries.sort_by(|a, b| a.column_name.cmp(&b.column_name)); + let mut columns = table_ctx.columns.clone(); + columns.sort_by(|a, b| a.name.value.cmp(&b.name.value)); + validator::column::assert_eq(&column_entries, &columns)?; + } + + // Cleans up + let table_name = table_ctx.name.clone(); + let sql = format!("DROP TABLE {}", table_name); + let result = sqlx::query(&sql) + .execute(&ctx.greptime) + .await + .context(error::ExecuteQuerySnafu { sql })?; + info!("Drop table: {}, result: {result:?}", table_name); + ctx.close().await; + + Ok(()) +} + +fuzz_target!(|input: FuzzInput| { + common_telemetry::init_default_ut_logging(); + common_runtime::block_on_write(async { + let Connections { mysql } = init_greptime_connections().await; + let ctx = FuzzContext { + greptime: mysql.expect("mysql connection init must be succeed"), + }; + execute_alter_table(ctx, input) + .await + .unwrap_or_else(|err| panic!("fuzz test must be succeed: {err:?}")); + }) +}); diff --git a/tests-fuzz/targets/fuzz_create_table.rs b/tests-fuzz/targets/fuzz_create_table.rs index 7af489b1c2e3..6d351778dc96 100644 --- a/tests-fuzz/targets/fuzz_create_table.rs +++ b/tests-fuzz/targets/fuzz_create_table.rs @@ -33,7 +33,6 @@ use tests_fuzz::translator::mysql::create_expr::CreateTableExprTranslator; use tests_fuzz::translator::DslTranslator; use tests_fuzz::utils::{init_greptime_connections, Connections}; use tests_fuzz::validator; -use tests_fuzz::validator::column::fetch_columns; struct FuzzContext { greptime: Pool<MySql>, @@ -85,9 +84,10 @@ async fn execute_create_table(ctx: FuzzContext, input: FuzzInput) -> Result<()> .context(error::ExecuteQuerySnafu { sql: &sql })?; info!("Create table: {sql}, result: {result:?}"); - // Validate columns + // Validates columns let mut column_entries = - fetch_columns(&ctx.greptime, "public".into(), expr.table_name.clone()).await?; + validator::column::fetch_columns(&ctx.greptime, "public".into(), expr.table_name.clone()) + .await?; column_entries.sort_by(|a, b| a.column_name.cmp(&b.column_name)); let mut columns = expr.columns.clone(); columns.sort_by(|a, b| a.name.value.cmp(&b.name.value));
feat
add alter table target (#3503)
371d4cf9f51c6741c6fdd10dd424701c53cfc93a
2024-04-30 08:36:47
Ruihang Xia
fix: broken link in contributing guide (#3831)
false
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 94bfb60ac35c..97e88e843d00 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -50,7 +50,7 @@ GreptimeDB uses the [Apache 2.0 license](https://github.com/GreptimeTeam/greptim - To ensure that community is free and confident in its ability to use your contributions, please sign the Contributor License Agreement (CLA) which will be incorporated in the pull request process. - Make sure all files have proper license header (running `docker run --rm -v $(pwd):/github/workspace ghcr.io/korandoru/hawkeye-native:v3 format` from the project root). -- Make sure all your codes are formatted and follow the [coding style](https://pingcap.github.io/style-guide/rust/) and [style guide](http://github.com/greptimeTeam/docs/style-guide.md). +- Make sure all your codes are formatted and follow the [coding style](https://pingcap.github.io/style-guide/rust/) and [style guide](docs/style-guide.md). - Make sure all unit tests are passed (using `cargo test --workspace` or [nextest](https://nexte.st/index.html) `cargo nextest run`). - Make sure all clippy warnings are fixed (you can check it locally by running `cargo clippy --workspace --all-targets -- -D warnings`).
fix
broken link in contributing guide (#3831)
bf5975ca3e96944e1cbbb2ede4680ad312b13e50
2022-07-25 12:56:00
evenyag
feat: Prototype of the storage engine (#107)
false
diff --git a/.gitignore b/.gitignore index 877b107204e6..4e9d971ad199 100644 --- a/.gitignore +++ b/.gitignore @@ -8,6 +8,9 @@ # These are backup files generated by rustfmt **/*.rs.bk +# Mac DS_Store +**/*.DS_Store + debug/ # MSVC Windows builds of rustc generate these, which store debugging information diff --git a/Cargo.lock b/Cargo.lock index bc5e2c84c622..23eb67428bc7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -142,6 +142,17 @@ dependencies = [ "strength_reduce", ] +[[package]] +name = "async-channel" +version = "1.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2114d64672151c0c5eaa5e131ec84a74f06e1e559830dabba01ca30605d66319" +dependencies = [ + "concurrent-queue", + "event-listener", + "futures-core", +] + [[package]] name = "async-compat" version = "0.2.1" @@ -308,6 +319,18 @@ dependencies = [ "tower-service", ] +[[package]] +name = "backon" +version = "0.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f334d8b7d003e7d4e17844b81ffbfcd24ad955777997440701c08a834e407105" +dependencies = [ + "futures", + "pin-project", + "rand 0.8.5", + "tokio", +] + [[package]] name = "backtrace" version = "0.3.65" @@ -329,6 +352,12 @@ version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "904dfeac50f3cdaba28fc6f57fdcddb75f49ed61346676a78c4ffe55877802fd" +[[package]] +name = "bit-vec" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "349f9b6a179ed607305526ca489b34ad0a41aed5f7980fa90eb03160b69598fb" + [[package]] name = "bitflags" version = "1.3.2" @@ -446,6 +475,15 @@ name = "bytes" version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c4872d67bab6358e59559027aa3b9157c53d9358c51423c17554809a8858e0f8" +dependencies = [ + "serde", +] + +[[package]] +name = "cache-padded" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c1db59621ec70f09c5e9b597b220c7a2b43611f4710dc03ceb8748637775692c" [[package]] name = "cast" @@ -456,6 +494,12 @@ dependencies = [ "rustc_version", ] +[[package]] +name = "castaway" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a2698f953def977c68f935bb0dfa959375ad4638570e969e2f1e9f433cbf1af6" + [[package]] name = "cc" version = "1.0.73" @@ -671,7 +715,7 @@ version = "0.1.0" dependencies = [ "common-error", "common-telemetry", - "metrics", + "metrics 0.18.1", "once_cell", "paste", "snafu", @@ -686,7 +730,7 @@ dependencies = [ "backtrace", "common-error", "console-subscriber", - "metrics", + "metrics 0.18.1", "metrics-exporter-prometheus", "once_cell", "opentelemetry", @@ -705,6 +749,15 @@ dependencies = [ name = "common-time" version = "0.1.0" +[[package]] +name = "concurrent-queue" +version = "1.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "30ed07550be01594c6026cff2a1d7fe9c8f683caa798e12b68694ac9e88286a3" +dependencies = [ + "cache-padded", +] + [[package]] name = "console-api" version = "0.2.0" @@ -949,6 +1002,37 @@ dependencies = [ "syn", ] +[[package]] +name = "curl" +version = "0.4.43" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37d855aeef205b43f65a5001e0997d81f8efca7badad4fad7d897aa7f0d0651f" +dependencies = [ + "curl-sys", + "libc", + "openssl-probe", + "openssl-sys", + "schannel", + "socket2", + "winapi", +] + +[[package]] +name = "curl-sys" +version = "0.4.55+curl-7.83.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "23734ec77368ec583c2e61dd3f0b0e5c98b93abe6d2a004ca06b91dd7e3e2762" +dependencies = [ + "cc", + "libc", + "libnghttp2-sys", + "libz-sys", + "openssl-sys", + "pkg-config", + "vcpkg", + "winapi", +] + [[package]] name = "datafusion" version = "7.0.0" @@ -1041,7 +1125,8 @@ dependencies = [ "common-telemetry", "datatypes", "hyper", - "metrics", + "log-store", + "metrics 0.18.1", "query", "serde", "serde_json", @@ -1051,6 +1136,7 @@ dependencies = [ "store-api", "table", "table-engine", + "tempdir", "tokio", "tokio-stream", "tonic", @@ -1146,6 +1232,12 @@ dependencies = [ "syn", ] +[[package]] +name = "event-listener" +version = "2.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77f3309417938f28bf8228fcff79a4a37103981e3e186d2ccd19c74b38f4eb71" + [[package]] name = "fallible-streaming-iterator" version = "0.1.9" @@ -1264,6 +1356,21 @@ version = "0.3.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fc4045962a5a5e935ee2fdedaa4e08284547402885ab326734432bed5d12966b" +[[package]] +name = "futures-lite" +version = "1.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7694489acd39452c77daa48516b894c153f192c3578d5a839b62c58099fcbf48" +dependencies = [ + "fastrand", + "futures-core", + "futures-io", + "memchr", + "parking", + "pin-project-lite", + "waker-fn", +] + [[package]] name = "futures-macro" version = "0.3.21" @@ -1594,6 +1701,33 @@ dependencies = [ "nom", ] +[[package]] +name = "isahc" +version = "1.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "334e04b4d781f436dc315cb1e7515bd96826426345d498149e4bde36b67f8ee9" +dependencies = [ + "async-channel", + "castaway", + "crossbeam-utils", + "curl", + "curl-sys", + "encoding_rs", + "event-listener", + "futures-lite", + "http", + "log", + "mime", + "once_cell", + "polling", + "slab", + "sluice", + "tracing", + "tracing-futures", + "url", + "waker-fn", +] + [[package]] name = "itertools" version = "0.10.3" @@ -1723,6 +1857,28 @@ version = "0.2.125" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5916d2ae698f6de9bfb891ad7a8d65c09d232dc58cc4ac433c7da3b2fd84bc2b" +[[package]] +name = "libnghttp2-sys" +version = "0.1.7+1.45.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57ed28aba195b38d5ff02b9170cbff627e336a20925e43b4945390401c5dc93f" +dependencies = [ + "cc", + "libc", +] + +[[package]] +name = "libz-sys" +version = "1.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9702761c3935f8cc2f101793272e202c72b99da8f4224a19ddcf1279a6450bbf" +dependencies = [ + "cc", + "libc", + "pkg-config", + "vcpkg", +] + [[package]] name = "lock_api" version = "0.4.7" @@ -1870,6 +2026,16 @@ dependencies = [ "metrics-macros", ] +[[package]] +name = "metrics" +version = "0.19.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "142c53885123b68d94108295a09d4afe1a1388ed95b54d5dacd9a454753030f2" +dependencies = [ + "ahash", + "metrics-macros", +] + [[package]] name = "metrics-exporter-prometheus" version = "0.9.0" @@ -1877,7 +2043,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b93b470b04c005178058e18ac8bb2eb3fda562cf87af5ea05ba8d44190d458c" dependencies = [ "indexmap", - "metrics", + "metrics 0.18.1", "metrics-util", "parking_lot 0.11.2", "quanta", @@ -1905,7 +2071,7 @@ dependencies = [ "crossbeam-epoch", "crossbeam-utils", "hashbrown 0.11.2", - "metrics", + "metrics 0.18.1", "num_cpus", "parking_lot 0.11.2", "quanta", @@ -2178,9 +2344,9 @@ checksum = "0ab1bc2a289d34bd04a330323ac98a1b4bc82c9d9fcb1e66b63caa84da26b575" [[package]] name = "opendal" -version = "0.6.2" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3649ace5a99d388ac9d02459135ad0425941e8cf6c33f418c4ded80483563ce3" +checksum = "e9e982034fd0b4f142efba461604f5ccb1fb1f962c4e84c8e44ce369f0e3d1f2" dependencies = [ "anyhow", "async-compat", @@ -2193,15 +2359,14 @@ dependencies = [ "hyper-tls", "log", "md5", - "metrics", + "metrics 0.19.0", "minitrace", "once_cell", "parking_lot 0.12.0", + "percent-encoding", "pin-project", "quick-xml", "reqsign", - "reqwest", - "roxmltree", "serde", "thiserror", "time 0.3.9", @@ -2323,6 +2488,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "96bcbab4bfea7a59c2c0fe47211a1ac4e3e96bea6eb446d704f310bc5c732ae2" dependencies = [ "num-traits", + "serde", ] [[package]] @@ -2341,6 +2507,12 @@ version = "6.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8e22443d1643a904602595ba1cd8f7d896afe56d26712531c5ff73a15b2fbf64" +[[package]] +name = "parking" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "427c3892f9e783d91cc128285287e70a59e206ca452770ece88a76f7a3eddd72" + [[package]] name = "parking_lot" version = "0.11.2" @@ -2577,6 +2749,19 @@ dependencies = [ "plotters-backend", ] +[[package]] +name = "polling" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "685404d509889fade3e86fe3a5803bca2ec09b0c0778d5ada6ec8bf7a8de5259" +dependencies = [ + "cfg-if", + "libc", + "log", + "wepoll-ffi", + "winapi", +] + [[package]] name = "ppv-lite86" version = "0.2.16" @@ -2585,9 +2770,9 @@ checksum = "eb9f9e6e233e5c4a35559a617bf40a4ec447db2e84c20b55a6f83167b7e57872" [[package]] name = "prettyplease" -version = "0.1.14" +version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3662417e650bd6af740f5b8b3501776aa10c3d5cbd10b40263ed250d3770884" +checksum = "da6ffbe862780245013cb1c0a48c4e44b7d665548088f91f6b90876d0625e4c2" dependencies = [ "proc-macro2", "syn", @@ -2714,7 +2899,7 @@ dependencies = [ "datatypes", "futures", "futures-util", - "metrics", + "metrics 0.18.1", "num", "num-traits", "rand 0.8.5", @@ -2727,9 +2912,9 @@ dependencies = [ [[package]] name = "quick-xml" -version = "0.22.0" +version = "0.23.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8533f14c8382aaad0d592c812ac3b826162128b65662331e1127b45c3d18536b" +checksum = "9279fbdacaad3baf559d8cabe0acc3d06e30ea14931af31af79578ac0946decc" dependencies = [ "memchr", "serde", @@ -2910,12 +3095,12 @@ dependencies = [ [[package]] name = "reqsign" -version = "0.0.3" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8931679eac96ffc8eee4e5507c4b91fbc0799f29a6535707ee3ef89c0d0de426" +checksum = "9a6b48d7d1f390bcb0149b4d7a3022f5a927fca173c19413ba17e74936716e39" dependencies = [ "anyhow", - "async-trait", + "backon", "base64", "bytes", "dirs", @@ -2923,18 +3108,17 @@ dependencies = [ "hex", "hmac", "http", + "isahc", "jsonwebtoken", "log", "once_cell", "percent-encoding", - "reqwest", - "roxmltree", + "quick-xml", "rust-ini", "serde", "serde_json", "sha2", "time 0.3.9", - "tokio", ] [[package]] @@ -2996,15 +3180,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "roxmltree" -version = "0.14.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "921904a62e410e37e215c40381b7117f830d9d89ba60ab5236170541dd25646b" -dependencies = [ - "xmlparser", -] - [[package]] name = "rust-ini" version = "0.18.0" @@ -3215,11 +3390,22 @@ version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eb703cfe953bccee95685111adeedb76fabe4e97549a58d16f03ea7b9367bb32" +[[package]] +name = "sluice" +version = "0.5.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d7400c0eff44aa2fcb5e31a5f24ba9716ed90138769e4977a2ba6014ae63eb5" +dependencies = [ + "async-channel", + "futures-core", + "futures-io", +] + [[package]] name = "smallvec" -version = "1.8.0" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2dd574626839106c320a323308629dcb1acfc96e32a8cba364ddc61ac23ee83" +checksum = "2fd0db749597d91ff862fd1d55ea87f7855a744a8425a64695b6fca237d1dad1" [[package]] name = "snafu" @@ -3295,16 +3481,35 @@ name = "storage" version = "0.1.0" dependencies = [ "arc-swap", + "arrow-format", "async-trait", "atomic_float", + "bit-vec", + "bytes", "common-error", + "common-runtime", "common-telemetry", + "common-time", "criterion", "datatypes", + "futures", + "futures-util", + "lazy_static", + "log-store", + "object-store", + "planus", + "prost", "rand 0.8.5", + "regex", + "serde", + "serde_json", "snafu", "store-api", + "tempdir", "tokio", + "tonic", + "tonic-build", + "uuid", ] [[package]] @@ -3316,8 +3521,11 @@ dependencies = [ "bytes", "common-base", "common-error", + "common-time", "datatypes", "futures", + "object-store", + "serde", "snafu", "tokio", ] @@ -3422,10 +3630,12 @@ dependencies = [ "datafusion-common", "datatypes", "futures", + "log-store", "snafu", "storage", "store-api", "table", + "tempdir", "tokio", ] @@ -4004,9 +4214,9 @@ dependencies = [ [[package]] name = "uuid" -version = "1.0.0" +version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8cfcd319456c4d6ea10087ed423473267e1a071f3bc0aa89f80d60997843c6f0" +checksum = "dd6469f4314d5f1ffec476e05f17cc9a78bc7a27a6a857842170bdf8d6f98d2f" dependencies = [ "getrandom", ] @@ -4029,6 +4239,12 @@ version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" +[[package]] +name = "waker-fn" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d5b2c62b4012a3e1eca5a7e077d13b3bf498c4073e33ccd58626607748ceeca" + [[package]] name = "walkdir" version = "2.3.2" @@ -4144,6 +4360,15 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "wepoll-ffi" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d743fdedc5c64377b5fc2bc036b01c7fd642205a0d96356034ae3404d49eb7fb" +dependencies = [ + "cc", +] + [[package]] name = "which" version = "4.2.5" @@ -4238,12 +4463,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "xmlparser" -version = "0.13.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "114ba2b24d2167ef6d67d7d04c8cc86522b87f490025f39f0303b7db5bf5e3d8" - [[package]] name = "zstd" version = "0.10.0+zstd.1.5.2" diff --git a/Cargo.toml b/Cargo.toml index eb662fbdb6ae..982d49ddd4aa 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -5,11 +5,11 @@ members = [ "src/common/base", "src/common/error", "src/common/function", + "src/common/query", + "src/common/recordbatch", "src/common/runtime", "src/common/telemetry", "src/common/time", - "src/common/query", - "src/common/recordbatch", "src/cmd", "src/datanode", "src/datatypes", diff --git a/src/cmd/src/datanode.rs b/src/cmd/src/datanode.rs index 3f99164cd844..53a5dd678b0d 100644 --- a/src/cmd/src/datanode.rs +++ b/src/cmd/src/datanode.rs @@ -1,5 +1,5 @@ use clap::Parser; -use datanode::{Datanode, DatanodeOptions}; +use datanode::datanode::{Datanode, DatanodeOptions}; use snafu::ResultExt; use crate::error::{Result, StartDatanodeSnafu}; @@ -40,6 +40,7 @@ struct StartCommand { impl StartCommand { async fn run(self) -> Result<()> { Datanode::new(self.into()) + .await .context(StartDatanodeSnafu)? .start() .await @@ -52,6 +53,7 @@ impl From<StartCommand> for DatanodeOptions { DatanodeOptions { http_addr: cmd.http_addr, rpc_addr: cmd.rpc_addr, + ..Default::default() } } } diff --git a/src/common/base/Cargo.toml b/src/common/base/Cargo.toml index 9b5feeaa7986..c59f3356ea91 100644 --- a/src/common/base/Cargo.toml +++ b/src/common/base/Cargo.toml @@ -4,7 +4,7 @@ version = "0.1.0" edition = "2021" [dependencies] -bytes = "1.1" +bytes = { version = "1.1", features = ["serde"] } common-error = { path = "../error" } paste = "1.0" serde = { version = "1.0", features = ["derive"] } diff --git a/src/common/base/src/bytes.rs b/src/common/base/src/bytes.rs index e2f017dc488d..2fde5f5f9591 100644 --- a/src/common/base/src/bytes.rs +++ b/src/common/base/src/bytes.rs @@ -1,9 +1,9 @@ use std::ops::Deref; -use serde::{Serialize, Serializer}; +use serde::{Deserialize, Deserializer, Serialize, Serializer}; /// Bytes buffer. -#[derive(Debug, Default, Clone, PartialEq, Eq, PartialOrd, Ord)] +#[derive(Debug, Default, Clone, PartialEq, Eq, PartialOrd, Ord, Deserialize, Serialize)] pub struct Bytes(bytes::Bytes); impl From<bytes::Bytes> for Bytes { @@ -56,15 +56,6 @@ impl PartialEq<Bytes> for [u8] { } } -impl Serialize for Bytes { - fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> - where - S: Serializer, - { - self.0.serialize(serializer) - } -} - /// String buffer that can hold arbitrary encoding string (only support UTF-8 now). /// /// Now this buffer is restricted to only hold valid UTF-8 string (only allow constructing `StringBytes` @@ -128,6 +119,17 @@ impl Serialize for StringBytes { } } +// Custom Deserialize to ensure UTF-8 check is always done. +impl<'de> Deserialize<'de> for StringBytes { + fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> + where + D: Deserializer<'de>, + { + let s = String::deserialize(deserializer)?; + Ok(StringBytes::from(s)) + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/src/common/error/src/status_code.rs b/src/common/error/src/status_code.rs index b2b4422ab49e..6b38e6254606 100644 --- a/src/common/error/src/status_code.rs +++ b/src/common/error/src/status_code.rs @@ -34,6 +34,11 @@ pub enum StatusCode { TableNotFound, TableColumnNotFound, // ====== End of catalog related status code ======= + + // ====== Begin of storage related status code ===== + /// Storage is temporarily unable to handle the request + StorageUnavailable, + // ====== End of storage related status code ======= } impl fmt::Display for StatusCode { diff --git a/src/common/runtime/src/lib.rs b/src/common/runtime/src/lib.rs index e2e78a1d251b..7d2c8503f5a4 100644 --- a/src/common/runtime/src/lib.rs +++ b/src/common/runtime/src/lib.rs @@ -9,4 +9,4 @@ pub use global::{ spawn_read, spawn_write, write_runtime, }; -pub use crate::runtime::{Builder, JoinHandle, Runtime}; +pub use crate::runtime::{Builder, JoinError, JoinHandle, Runtime}; diff --git a/src/common/runtime/src/runtime.rs b/src/common/runtime/src/runtime.rs index 3b2842433dbf..9c226bd2f997 100644 --- a/src/common/runtime/src/runtime.rs +++ b/src/common/runtime/src/runtime.rs @@ -6,13 +6,13 @@ use metrics::{decrement_gauge, increment_gauge}; use snafu::ResultExt; use tokio::runtime::{Builder as RuntimeBuilder, Handle}; use tokio::sync::oneshot; -pub use tokio::task::JoinHandle; +pub use tokio::task::{JoinError, JoinHandle}; use crate::error::*; use crate::metric::*; /// A runtime to run future tasks -#[derive(Clone)] +#[derive(Clone, Debug)] pub struct Runtime { handle: Handle, // Used to receive a drop signal when dropper is dropped, inspired by databend @@ -20,6 +20,7 @@ pub struct Runtime { } /// Dropping the dropper will cause runtime to shutdown. +#[derive(Debug)] pub struct Dropper { close: Option<oneshot::Sender<()>>, } diff --git a/src/common/time/src/range.rs b/src/common/time/src/range.rs index ed5222acd5b0..8dc51193e67d 100644 --- a/src/common/time/src/range.rs +++ b/src/common/time/src/range.rs @@ -11,7 +11,7 @@ pub struct TimeRange<T> { } impl<T> TimeRange<T> { - /// Create a new range that contains timestamp in `[start, end)`. + /// Creates a new range that contains timestamp in `[start, end)`. /// /// Returns `None` if `start` > `end`. pub fn new<U: PartialOrd + Into<T>>(start: U, end: U) -> Option<TimeRange<T>> { @@ -23,6 +23,14 @@ impl<T> TimeRange<T> { } } + /// Given a value, creates an empty time range that `start == end == value`. + pub fn empty_with_value<U: Clone + Into<T>>(value: U) -> TimeRange<T> { + TimeRange { + start: value.clone().into(), + end: value.into(), + } + } + /// Returns the lower bound of the range (inclusive). #[inline] pub fn start(&self) -> &T { @@ -71,6 +79,10 @@ mod tests { assert_eq!(range_eq.start(), range_eq.end()); assert_eq!(None, RangeMillis::new(1, 0)); + + let range = RangeMillis::empty_with_value(1024); + assert_eq!(range.start(), range.end()); + assert_eq!(1024, *range.start()); } #[test] diff --git a/src/common/time/src/timestamp.rs b/src/common/time/src/timestamp.rs index 93cb079b0d87..4eddc58d418f 100644 --- a/src/common/time/src/timestamp.rs +++ b/src/common/time/src/timestamp.rs @@ -1,6 +1,8 @@ use std::cmp::Ordering; /// Unix timestamp in millisecond resolution. +/// +/// Negative timestamp is allowed, which represents timestamp before '1970-01-01T00:00:00'. #[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct TimestampMillis(i64); @@ -18,6 +20,29 @@ impl TimestampMillis { pub const fn new(ms: i64) -> TimestampMillis { TimestampMillis(ms) } + + /// Returns the timestamp aligned by `bucket_duration` in milliseconds or + /// `None` if overflow occurred. + /// + /// # Panics + /// Panics if `bucket_duration <= 0`. + pub fn align_by_bucket(self, bucket_duration: i64) -> Option<TimestampMillis> { + assert!(bucket_duration > 0); + + let ts = if self.0 >= 0 { + self.0 + } else { + // `bucket_duration > 0` implies `bucket_duration - 1` won't overflow. + self.0.checked_sub(bucket_duration - 1)? + }; + + Some(TimestampMillis(ts / bucket_duration * bucket_duration)) + } + + /// Returns the timestamp value as i64. + pub fn as_i64(&self) -> i64 { + self.0 + } } impl From<i64> for TimestampMillis { @@ -60,6 +85,7 @@ mod tests { let timestamp = TimestampMillis::from(ts); assert_eq!(timestamp, ts); assert_eq!(ts, timestamp); + assert_eq!(ts, timestamp.as_i64()); assert_ne!(TimestampMillis::new(0), timestamp); assert!(TimestampMillis::new(-123) < TimestampMillis::new(0)); @@ -70,4 +96,28 @@ mod tests { assert_eq!(i64::MAX - 1, TimestampMillis::MAX); assert_eq!(i64::MIN, TimestampMillis::MIN); } + + #[test] + fn test_align_by_bucket() { + let bucket = 100; + assert_eq!(0, TimestampMillis::new(0).align_by_bucket(bucket).unwrap()); + assert_eq!(0, TimestampMillis::new(1).align_by_bucket(bucket).unwrap()); + assert_eq!(0, TimestampMillis::new(99).align_by_bucket(bucket).unwrap()); + assert_eq!( + 100, + TimestampMillis::new(100).align_by_bucket(bucket).unwrap() + ); + assert_eq!( + 100, + TimestampMillis::new(199).align_by_bucket(bucket).unwrap() + ); + + assert_eq!(0, TimestampMillis::MAX.align_by_bucket(i64::MAX).unwrap()); + assert_eq!( + i64::MAX, + TimestampMillis::INF.align_by_bucket(i64::MAX).unwrap() + ); + + assert_eq!(None, TimestampMillis::MIN.align_by_bucket(bucket)); + } } diff --git a/src/datanode/Cargo.toml b/src/datanode/Cargo.toml index 8a9089c483e8..7d0023ac44f4 100644 --- a/src/datanode/Cargo.toml +++ b/src/datanode/Cargo.toml @@ -15,6 +15,7 @@ common-recordbatch = { path = "../common/recordbatch" } common-telemetry = { path = "../common/telemetry" } datatypes = { path = "../datatypes"} hyper = { version = "0.14", features = ["full"] } +log-store = { path = "../log-store" } metrics = "0.18" query = { path = "../query" } serde = "1.0" @@ -34,6 +35,7 @@ tower-http = { version ="0.3", features = ["full"]} [dev-dependencies] axum-test-helper = "0.1" common-query = { path = "../common/query" } +tempdir = "0.3" [dev-dependencies.arrow] package = "arrow2" diff --git a/src/datanode/src/datanode.rs b/src/datanode/src/datanode.rs index 77247636fa90..bc1c085ed718 100644 --- a/src/datanode/src/datanode.rs +++ b/src/datanode/src/datanode.rs @@ -8,11 +8,23 @@ use crate::error::{NewCatalogSnafu, Result}; use crate::instance::{Instance, InstanceRef}; use crate::server::Services; -#[derive(Debug)] +#[derive(Clone, Debug)] pub struct DatanodeOptions { pub http_addr: String, pub rpc_addr: String, + pub wal_dir: String, } + +impl Default for DatanodeOptions { + fn default() -> Self { + Self { + http_addr: Default::default(), + rpc_addr: Default::default(), + wal_dir: "/tmp/wal".to_string(), + } + } +} + /// Datanode service. pub struct Datanode { opts: DatanodeOptions, @@ -22,9 +34,9 @@ pub struct Datanode { } impl Datanode { - pub fn new(opts: DatanodeOptions) -> Result<Datanode> { + pub async fn new(opts: DatanodeOptions) -> Result<Datanode> { let catalog_list = memory::new_memory_catalog_list().context(NewCatalogSnafu)?; - let instance = Arc::new(Instance::new(catalog_list.clone())); + let instance = Arc::new(Instance::new(&opts, catalog_list.clone()).await?); Ok(Self { opts, diff --git a/src/datanode/src/error.rs b/src/datanode/src/error.rs index f8e45608c424..254d545812b7 100644 --- a/src/datanode/src/error.rs +++ b/src/datanode/src/error.rs @@ -3,6 +3,7 @@ use std::any::Any; use common_error::ext::BoxedError; use common_error::prelude::*; use datatypes::prelude::ConcreteDataType; +use storage::error::Error as StorageError; use table::error::Error as TableError; use table_engine::error::Error as TableEngineError; @@ -92,6 +93,15 @@ pub enum Error { #[snafu(display("Fail to start gRPC server, source: {}", source))] StartGrpc { source: tonic::transport::Error }, + + #[snafu(display("Failed to create directory {}, source: {}", dir, source))] + CreateDir { dir: String, source: std::io::Error }, + + #[snafu(display("Failed to open log store, source: {}", source))] + OpenLogStore { source: log_store::error::Error }, + + #[snafu(display("Failed to storage engine, source: {}", source))] + OpenStorageEngine { source: StorageError }, } pub type Result<T> = std::result::Result<T, Error>; @@ -112,7 +122,10 @@ impl ErrorExt for Error { Error::StartHttp { .. } | Error::ParseAddr { .. } | Error::TcpBind { .. } - | Error::StartGrpc { .. } => StatusCode::Internal, + | Error::StartGrpc { .. } + | Error::CreateDir { .. } => StatusCode::Internal, + Error::OpenLogStore { source } => source.status_code(), + Error::OpenStorageEngine { source } => source.status_code(), } } diff --git a/src/datanode/src/instance.rs b/src/datanode/src/instance.rs index a6a07dfd5497..b9dacd5622c5 100644 --- a/src/datanode/src/instance.rs +++ b/src/datanode/src/instance.rs @@ -1,21 +1,24 @@ -use std::sync::Arc; +use std::{fs, path, sync::Arc}; +use common_telemetry::logging::info; use datatypes::prelude::ConcreteDataType; use datatypes::schema::{ColumnSchema, Schema}; +use log_store::fs::{config::LogConfig, log::LocalFileLogStore}; use query::catalog::{CatalogListRef, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME}; use query::query_engine::{Output, QueryEngineFactory, QueryEngineRef}; use snafu::ResultExt; use sql::statements::statement::Statement; -use storage::EngineImpl; +use storage::{config::EngineConfig, EngineImpl}; use table::engine::EngineContext; use table::engine::TableEngine; use table::requests::CreateTableRequest; use table_engine::engine::MitoEngine; -use crate::error::{CreateTableSnafu, ExecuteSqlSnafu, Result}; +use crate::datanode::DatanodeOptions; +use crate::error::{self, CreateTableSnafu, ExecuteSqlSnafu, Result}; use crate::sql::SqlHandler; -type DefaultEngine = MitoEngine<EngineImpl>; +type DefaultEngine = MitoEngine<EngineImpl<LocalFileLogStore>>; // An abstraction to read/write services. pub struct Instance { @@ -30,17 +33,22 @@ pub struct Instance { pub type InstanceRef = Arc<Instance>; impl Instance { - pub fn new(catalog_list: CatalogListRef) -> Self { + pub async fn new(opts: &DatanodeOptions, catalog_list: CatalogListRef) -> Result<Self> { + let log_store = create_local_file_log_store(opts).await?; let factory = QueryEngineFactory::new(catalog_list.clone()); let query_engine = factory.query_engine().clone(); - let table_engine = DefaultEngine::new(EngineImpl::new()); + let table_engine = DefaultEngine::new( + EngineImpl::new(EngineConfig::default(), Arc::new(log_store)) + .await + .context(error::OpenStorageEngineSnafu)?, + ); - Self { + Ok(Self { query_engine, sql_handler: SqlHandler::new(table_engine.clone()), table_engine, catalog_list, - } + }) } pub async fn execute_sql(&self, sql: &str) -> Result<Output> { @@ -95,7 +103,10 @@ impl Instance { CreateTableRequest { name: table_name.to_string(), desc: Some(" a test table".to_string()), - schema: Arc::new(Schema::new(column_schemas)), + schema: Arc::new( + Schema::with_timestamp_index(column_schemas, 3) + .expect("ts is expected to be timestamp column"), + ), }, ) .await @@ -116,6 +127,25 @@ impl Instance { } } +async fn create_local_file_log_store(opts: &DatanodeOptions) -> Result<LocalFileLogStore> { + // create WAL directory + fs::create_dir_all(path::Path::new(&opts.wal_dir)) + .context(error::CreateDirSnafu { dir: &opts.wal_dir })?; + + info!("The WAL directory is: {}", &opts.wal_dir); + + let log_config = LogConfig { + log_file_dir: opts.wal_dir.clone(), + ..Default::default() + }; + + let log_store = LocalFileLogStore::open(&log_config) + .await + .context(error::OpenLogStoreSnafu)?; + + Ok(log_store) +} + #[cfg(test)] mod tests { use arrow::array::UInt64Array; @@ -123,12 +153,13 @@ mod tests { use query::catalog::memory; use super::*; + use crate::test_util; #[tokio::test] async fn test_execute_insert() { let catalog_list = memory::new_memory_catalog_list().unwrap(); - - let instance = Instance::new(catalog_list); + let (opts, _tmp_dir) = test_util::create_tmp_dir_and_datanode_opts(); + let instance = Instance::new(&opts, catalog_list).await.unwrap(); instance.start().await.unwrap(); let output = instance @@ -147,8 +178,8 @@ mod tests { #[tokio::test] async fn test_execute_query() { let catalog_list = memory::new_memory_catalog_list().unwrap(); - - let instance = Instance::new(catalog_list); + let (opts, _tmp_dir) = test_util::create_tmp_dir_and_datanode_opts(); + let instance = Instance::new(&opts, catalog_list).await.unwrap(); let output = instance .execute_sql("select sum(number) from numbers limit 20") diff --git a/src/datanode/src/lib.rs b/src/datanode/src/lib.rs index 886847708535..a2b15f3cf494 100644 --- a/src/datanode/src/lib.rs +++ b/src/datanode/src/lib.rs @@ -6,5 +6,7 @@ mod metric; pub mod server; mod sql; -pub use crate::datanode::Datanode; -pub use crate::datanode::DatanodeOptions; +#[cfg(test)] +pub mod test_util; +#[cfg(test)] +mod tests; diff --git a/src/datanode/src/server/http/handler.rs b/src/datanode/src/server/http/handler.rs index 1e205237a861..2d09849d36a9 100644 --- a/src/datanode/src/server/http/handler.rs +++ b/src/datanode/src/server/http/handler.rs @@ -48,6 +48,7 @@ mod tests { use super::*; use crate::instance::Instance; use crate::server::http::JsonOutput; + use crate::test_util; fn create_params() -> Query<HashMap<String, String>> { let mut map = HashMap::new(); @@ -58,15 +59,16 @@ mod tests { Query(map) } - fn create_extension() -> Extension<InstanceRef> { + async fn create_extension() -> Extension<InstanceRef> { let catalog_list = memory::new_memory_catalog_list().unwrap(); - let instance = Arc::new(Instance::new(catalog_list)); + let (opts, _tmp_dir) = test_util::create_tmp_dir_and_datanode_opts(); + let instance = Arc::new(Instance::new(&opts, catalog_list).await.unwrap()); Extension(instance) } #[tokio::test] async fn test_sql_not_provided() { - let extension = create_extension(); + let extension = create_extension().await; let json = sql(extension, Query(HashMap::default())).await; match json { @@ -82,7 +84,7 @@ mod tests { #[tokio::test] async fn test_sql_output_rows() { let query = create_params(); - let extension = create_extension(); + let extension = create_extension().await; let json = sql(extension, query).await; @@ -110,7 +112,7 @@ mod tests { counter!("test_metrics", 1); let query = create_params(); - let extension = create_extension(); + let extension = create_extension().await; let text = metrics(extension, query).await; match text { diff --git a/src/datanode/src/sql.rs b/src/datanode/src/sql.rs index a9b93fe696c2..98bce48f4eb3 100644 --- a/src/datanode/src/sql.rs +++ b/src/datanode/src/sql.rs @@ -63,14 +63,17 @@ mod tests { use datatypes::prelude::ConcreteDataType; use datatypes::schema::{ColumnSchema, Schema, SchemaRef}; use datatypes::value::Value; + use log_store::fs::noop::NoopLogStore; use query::catalog::memory; use query::catalog::schema::SchemaProvider; use query::error::Result as QueryResult; use query::QueryEngineFactory; + use storage::config::EngineConfig; use storage::EngineImpl; use table::error::Result as TableResult; use table::{Table, TableRef}; use table_engine::engine::MitoEngine; + use tempdir::TempDir; use super::*; @@ -90,7 +93,7 @@ mod tests { ColumnSchema::new("ts", ConcreteDataType::int64_datatype(), true), ]; - Arc::new(Schema::new(column_schemas)) + Arc::new(Schema::with_timestamp_index(column_schemas, 3).unwrap()) } async fn scan( &self, @@ -129,8 +132,11 @@ mod tests { } } - #[test] - fn test_statement_to_request() { + #[tokio::test] + async fn test_statement_to_request() { + let dir = TempDir::new("setup_test_engine_and_table").unwrap(); + let store_dir = dir.path().to_string_lossy(); + let catalog_list = memory::new_memory_catalog_list().unwrap(); let factory = QueryEngineFactory::new(catalog_list); let query_engine = factory.query_engine().clone(); @@ -140,7 +146,14 @@ mod tests { ('host2', 88.8, 333.3, 1655276558000) "#; - let table_engine = MitoEngine::<EngineImpl>::new(EngineImpl::new()); + let table_engine = MitoEngine::<EngineImpl<NoopLogStore>>::new( + EngineImpl::new( + EngineConfig::with_store_dir(&store_dir), + Arc::new(NoopLogStore::default()), + ) + .await + .unwrap(), + ); let sql_handler = SqlHandler::new(table_engine); let stmt = query_engine.sql_to_statement(sql).unwrap(); diff --git a/src/datanode/src/test_util.rs b/src/datanode/src/test_util.rs new file mode 100644 index 000000000000..6aee7d3bfae0 --- /dev/null +++ b/src/datanode/src/test_util.rs @@ -0,0 +1,17 @@ +use tempdir::TempDir; + +use crate::datanode::DatanodeOptions; + +/// Create a tmp dir(will be deleted once it goes out of scope.) and a default `DatanodeOptions`, +/// Only for test. +/// +/// TODO: Add a test feature +pub fn create_tmp_dir_and_datanode_opts() -> (DatanodeOptions, TempDir) { + let tmp_dir = TempDir::new("/tmp/greptimedb_test").unwrap(); + let opts = DatanodeOptions { + wal_dir: tmp_dir.path().to_str().unwrap().to_string(), + ..Default::default() + }; + + (opts, tmp_dir) +} diff --git a/src/datanode/src/tests.rs b/src/datanode/src/tests.rs new file mode 100644 index 000000000000..150709f786c7 --- /dev/null +++ b/src/datanode/src/tests.rs @@ -0,0 +1 @@ +mod http_test; diff --git a/src/datanode/tests/http_test.rs b/src/datanode/src/tests/http_test.rs similarity index 82% rename from src/datanode/tests/http_test.rs rename to src/datanode/src/tests/http_test.rs index c767c7bb4a01..b24f4dbaf741 100644 --- a/src/datanode/tests/http_test.rs +++ b/src/datanode/src/tests/http_test.rs @@ -5,12 +5,16 @@ use std::sync::Arc; use axum::http::StatusCode; use axum::Router; use axum_test_helper::TestClient; -use datanode::{instance::Instance, server::http::HttpServer}; use query::catalog::memory; -fn make_test_app() -> Router { +use crate::instance::Instance; +use crate::server::http::HttpServer; +use crate::test_util; + +async fn make_test_app() -> Router { let catalog_list = memory::new_memory_catalog_list().unwrap(); - let instance = Arc::new(Instance::new(catalog_list)); + let (opts, _tmp_dir) = test_util::create_tmp_dir_and_datanode_opts(); + let instance = Arc::new(Instance::new(&opts, catalog_list).await.unwrap()); let http_server = HttpServer::new(instance); http_server.make_app() } @@ -18,7 +22,7 @@ fn make_test_app() -> Router { #[tokio::test] async fn test_sql_api() { common_telemetry::init_default_ut_logging(); - let app = make_test_app(); + let app = make_test_app().await; let client = TestClient::new(app); let res = client.get("/sql").send().await; assert_eq!(res.status(), StatusCode::OK); @@ -46,7 +50,7 @@ async fn test_sql_api() { async fn test_metrics_api() { common_telemetry::init_default_ut_logging(); common_telemetry::init_default_metrics_recorder(); - let app = make_test_app(); + let app = make_test_app().await; let client = TestClient::new(app); // Send a sql diff --git a/src/datatypes/Cargo.toml b/src/datatypes/Cargo.toml index b08d59c81550..2f07ea8166ff 100644 --- a/src/datatypes/Cargo.toml +++ b/src/datatypes/Cargo.toml @@ -13,10 +13,10 @@ common-base = { path = "../common/base" } common-error = { path = "../common/error" } datafusion-common = { git = "https://github.com/apache/arrow-datafusion.git" , branch = "arrow2" } enum_dispatch = "0.3" -ordered-float = "3.0" -paste = "1.0" num = "0.4" num-traits = "0.2" -serde = { version = "1.0.136", features = ["derive"] } +ordered-float = { version = "3.0", features = ["serde"]} +paste = "1.0" +serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" snafu = { version = "0.7", features = ["backtraces"] } diff --git a/src/datatypes/src/data_type.rs b/src/datatypes/src/data_type.rs index 249875e472d5..b472155d191c 100644 --- a/src/datatypes/src/data_type.rs +++ b/src/datatypes/src/data_type.rs @@ -2,6 +2,7 @@ use std::sync::Arc; use arrow::datatypes::DataType as ArrowDataType; use paste::paste; +use serde::{Deserialize, Serialize}; use crate::error::{self, Error, Result}; use crate::type_id::LogicalTypeId; @@ -11,7 +12,7 @@ use crate::types::{ }; use crate::value::Value; -#[derive(Clone, Debug, PartialEq)] +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] #[enum_dispatch::enum_dispatch(DataType)] pub enum ConcreteDataType { Null(NullType), @@ -72,6 +73,10 @@ impl ConcreteDataType { ) } + pub fn is_timestamp(&self) -> bool { + matches!(self, ConcreteDataType::Int64(_)) + } + pub fn numerics() -> Vec<ConcreteDataType> { vec![ ConcreteDataType::int8_datatype(), diff --git a/src/datatypes/src/error.rs b/src/datatypes/src/error.rs index a08a4119d1e5..be367f5bd1d2 100644 --- a/src/datatypes/src/error.rs +++ b/src/datatypes/src/error.rs @@ -30,6 +30,20 @@ pub enum Error { arrow_type: arrow::datatypes::DataType, backtrace: Backtrace, }, + + #[snafu(display( + "Failed to parse index in schema meta, value: {}, source: {}", + value, + source + ))] + ParseSchemaIndex { + value: String, + source: std::num::ParseIntError, + backtrace: Backtrace, + }, + + #[snafu(display("Invalid timestamp index: {}", index))] + InvalidTimestampIndex { index: usize, backtrace: Backtrace }, } impl ErrorExt for Error { diff --git a/src/datatypes/src/schema.rs b/src/datatypes/src/schema.rs index 9f11b340e1db..0b0ec7c211cc 100644 --- a/src/datatypes/src/schema.rs +++ b/src/datatypes/src/schema.rs @@ -1,15 +1,19 @@ -use std::collections::HashMap; +use std::collections::{BTreeMap, HashMap}; use std::sync::Arc; -use arrow::datatypes::{Field, Schema as ArrowSchema}; +use arrow::datatypes::{Field, Metadata, Schema as ArrowSchema}; +use serde::{Deserialize, Serialize}; +use snafu::{ensure, ResultExt}; use crate::data_type::{ConcreteDataType, DataType}; -use crate::error::{Error, Result}; +use crate::error::{self, Error, Result}; + +const TIMESTAMP_INDEX_KEY: &str = "greptime:timestamp_index"; // TODO(yingwen): consider assign a version to schema so compare schema can be // done by compare version. -#[derive(Debug, Clone, PartialEq)] +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct ColumnSchema { pub name: String, pub data_type: ConcreteDataType, @@ -30,31 +34,49 @@ impl ColumnSchema { } } -#[derive(Debug, Clone, PartialEq)] +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct Schema { column_schemas: Vec<ColumnSchema>, name_to_index: HashMap<String, usize>, arrow_schema: Arc<ArrowSchema>, + /// Index of the timestamp key column. + /// + /// Timestamp key column is the column holds the timestamp and forms part of + /// the primary key. None means there is no timestamp key column. + timestamp_index: Option<usize>, } impl Schema { pub fn new(column_schemas: Vec<ColumnSchema>) -> Schema { - let mut fields = Vec::with_capacity(column_schemas.len()); - let mut name_to_index = HashMap::with_capacity(column_schemas.len()); - for (index, column_schema) in column_schemas.iter().enumerate() { - let field = Field::from(column_schema); - fields.push(field); - name_to_index.insert(column_schema.name.clone(), index); - } - let arrow_schema = Arc::new(ArrowSchema::from(fields)); + let (arrow_schema, name_to_index) = collect_column_schemas(&column_schemas); Schema { column_schemas, name_to_index, - arrow_schema, + arrow_schema: Arc::new(arrow_schema), + timestamp_index: None, } } + pub fn with_timestamp_index( + column_schemas: Vec<ColumnSchema>, + timestamp_index: usize, + ) -> Result<Schema> { + let (arrow_schema, name_to_index) = collect_column_schemas(&column_schemas); + let mut metadata = BTreeMap::new(); + metadata.insert(TIMESTAMP_INDEX_KEY.to_string(), timestamp_index.to_string()); + let arrow_schema = Arc::new(arrow_schema.with_metadata(metadata)); + + validate_timestamp_index(&column_schemas, timestamp_index)?; + + Ok(Schema { + column_schemas, + name_to_index, + arrow_schema, + timestamp_index: Some(timestamp_index), + }) + } + pub fn arrow_schema(&self) -> &Arc<ArrowSchema> { &self.arrow_schema } @@ -68,6 +90,55 @@ impl Schema { .get(name) .map(|index| &self.column_schemas[*index]) } + + #[inline] + pub fn num_columns(&self) -> usize { + self.column_schemas.len() + } + + /// Returns index of the timestamp key column. + #[inline] + pub fn timestamp_index(&self) -> Option<usize> { + self.timestamp_index + } + + #[inline] + pub fn timestamp_column(&self) -> Option<&ColumnSchema> { + self.timestamp_index.map(|idx| &self.column_schemas[idx]) + } +} + +fn collect_column_schemas( + column_schemas: &[ColumnSchema], +) -> (ArrowSchema, HashMap<String, usize>) { + let mut fields = Vec::with_capacity(column_schemas.len()); + let mut name_to_index = HashMap::with_capacity(column_schemas.len()); + for (index, column_schema) in column_schemas.iter().enumerate() { + let field = Field::from(column_schema); + fields.push(field); + name_to_index.insert(column_schema.name.clone(), index); + } + + (ArrowSchema::from(fields), name_to_index) +} + +fn validate_timestamp_index(column_schemas: &[ColumnSchema], timestamp_index: usize) -> Result<()> { + ensure!( + timestamp_index < column_schemas.len(), + error::InvalidTimestampIndexSnafu { + index: timestamp_index, + } + ); + + let column_schema = &column_schemas[timestamp_index]; + ensure!( + column_schema.data_type.is_timestamp(), + error::InvalidTimestampIndexSnafu { + index: timestamp_index, + } + ); + + Ok(()) } pub type SchemaRef = Arc<Schema>; @@ -108,14 +179,32 @@ impl TryFrom<Arc<ArrowSchema>> for Schema { column_schemas.push(column_schema); } + let timestamp_index = try_parse_index(&arrow_schema.metadata, TIMESTAMP_INDEX_KEY)?; + if let Some(index) = timestamp_index { + validate_timestamp_index(&column_schemas, index)?; + } + Ok(Self { column_schemas, name_to_index, arrow_schema, + timestamp_index, }) } } +fn try_parse_index(metadata: &Metadata, key: &str) -> Result<Option<usize>> { + if let Some(value) = metadata.get(key) { + let index = value + .parse() + .context(error::ParseSchemaIndexSnafu { value })?; + + Ok(Some(index)) + } else { + Ok(None) + } +} + #[cfg(test)] mod tests { use arrow::datatypes::DataType as ArrowDataType; @@ -135,13 +224,17 @@ mod tests { } #[test] - fn test_schema() { + fn test_schema_no_timestamp() { let column_schemas = vec![ ColumnSchema::new("col1", ConcreteDataType::int32_datatype(), false), ColumnSchema::new("col2", ConcreteDataType::float64_datatype(), true), ]; let schema = Schema::new(column_schemas.clone()); + assert_eq!(2, schema.num_columns()); + assert!(schema.timestamp_index().is_none()); + assert!(schema.timestamp_column().is_none()); + for column_schema in &column_schemas { let found = schema.column_schema_by_name(&column_schema.name).unwrap(); assert_eq!(column_schema, found); @@ -158,4 +251,31 @@ mod tests { assert_eq!(arrow_schema, *schema.arrow_schema()); assert_eq!(arrow_schema, *new_schema.arrow_schema()); } + + #[test] + fn test_schema_with_timestamp() { + let column_schemas = vec![ + ColumnSchema::new("col1", ConcreteDataType::int32_datatype(), true), + ColumnSchema::new("ts", ConcreteDataType::int64_datatype(), false), + ]; + let schema = Schema::with_timestamp_index(column_schemas.clone(), 1).unwrap(); + + assert_eq!(1, schema.timestamp_index().unwrap()); + assert_eq!(&column_schemas[1], schema.timestamp_column().unwrap()); + + let new_schema = Schema::try_from(schema.arrow_schema().clone()).unwrap(); + assert_eq!(1, schema.timestamp_index().unwrap()); + assert_eq!(schema, new_schema); + } + + #[test] + fn test_schema_wrong_timestamp() { + let column_schemas = vec![ + ColumnSchema::new("col1", ConcreteDataType::int32_datatype(), true), + ColumnSchema::new("col2", ConcreteDataType::float64_datatype(), false), + ]; + assert!(Schema::with_timestamp_index(column_schemas.clone(), 0).is_err()); + assert!(Schema::with_timestamp_index(column_schemas.clone(), 1).is_err()); + assert!(Schema::with_timestamp_index(column_schemas, 2).is_err()); + } } diff --git a/src/datatypes/src/types/binary_type.rs b/src/datatypes/src/types/binary_type.rs index 56c81002e719..adbe69aef181 100644 --- a/src/datatypes/src/types/binary_type.rs +++ b/src/datatypes/src/types/binary_type.rs @@ -2,12 +2,13 @@ use std::sync::Arc; use arrow::datatypes::DataType as ArrowDataType; use common_base::bytes::StringBytes; +use serde::{Deserialize, Serialize}; use crate::data_type::{DataType, DataTypeRef}; use crate::type_id::LogicalTypeId; use crate::value::Value; -#[derive(Debug, Default, Clone, PartialEq)] +#[derive(Debug, Default, Clone, PartialEq, Serialize, Deserialize)] pub struct BinaryType; impl BinaryType { diff --git a/src/datatypes/src/types/boolean_type.rs b/src/datatypes/src/types/boolean_type.rs index c62473ef5613..2394410299d1 100644 --- a/src/datatypes/src/types/boolean_type.rs +++ b/src/datatypes/src/types/boolean_type.rs @@ -1,12 +1,13 @@ use std::sync::Arc; use arrow::datatypes::DataType as ArrowDataType; +use serde::{Deserialize, Serialize}; use crate::data_type::{DataType, DataTypeRef}; use crate::type_id::LogicalTypeId; use crate::value::Value; -#[derive(Debug, Default, Clone, PartialEq)] +#[derive(Debug, Default, Clone, PartialEq, Serialize, Deserialize)] pub struct BooleanType; impl BooleanType { diff --git a/src/datatypes/src/types/list_type.rs b/src/datatypes/src/types/list_type.rs index a93352a814b3..e3afa6aafa3b 100644 --- a/src/datatypes/src/types/list_type.rs +++ b/src/datatypes/src/types/list_type.rs @@ -1,10 +1,11 @@ use arrow::datatypes::{DataType as ArrowDataType, Field}; +use serde::{Deserialize, Serialize}; use crate::prelude::*; use crate::value::ListValue; /// Used to represent the List datatype. -#[derive(Debug, Clone, PartialEq)] +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct ListType { /// The type of List's inner data. inner: Box<ConcreteDataType>, diff --git a/src/datatypes/src/types/null_type.rs b/src/datatypes/src/types/null_type.rs index 7d932a8f7935..27133c9755d8 100644 --- a/src/datatypes/src/types/null_type.rs +++ b/src/datatypes/src/types/null_type.rs @@ -1,12 +1,13 @@ use std::sync::Arc; use arrow::datatypes::DataType as ArrowDataType; +use serde::{Deserialize, Serialize}; use crate::data_type::{DataType, DataTypeRef}; use crate::type_id::LogicalTypeId; use crate::value::Value; -#[derive(Debug, Default, Clone, PartialEq)] +#[derive(Debug, Default, Clone, PartialEq, Serialize, Deserialize)] pub struct NullType; impl NullType { diff --git a/src/datatypes/src/types/primitive_type.rs b/src/datatypes/src/types/primitive_type.rs index 6c1e5b5bfe94..61785cad51dc 100644 --- a/src/datatypes/src/types/primitive_type.rs +++ b/src/datatypes/src/types/primitive_type.rs @@ -2,14 +2,16 @@ use std::marker::PhantomData; use arrow::datatypes::DataType as ArrowDataType; use paste::paste; +use serde::{Deserialize, Serialize}; use crate::data_type::{ConcreteDataType, DataType}; use crate::type_id::LogicalTypeId; use crate::types::primitive_traits::Primitive; use crate::value::Value; -#[derive(Clone, PartialEq)] +#[derive(Clone, PartialEq, Serialize, Deserialize)] pub struct PrimitiveType<T: Primitive> { + #[serde(skip)] _phantom: PhantomData<T>, } diff --git a/src/datatypes/src/types/string_type.rs b/src/datatypes/src/types/string_type.rs index 6717b27e748a..20d6879430dc 100644 --- a/src/datatypes/src/types/string_type.rs +++ b/src/datatypes/src/types/string_type.rs @@ -2,11 +2,12 @@ use std::sync::Arc; use arrow::datatypes::DataType as ArrowDataType; use common_base::bytes::StringBytes; +use serde::{Deserialize, Serialize}; use crate::data_type::DataType; use crate::prelude::{DataTypeRef, LogicalTypeId, Value}; -#[derive(Debug, Default, Clone, PartialEq)] +#[derive(Debug, Default, Clone, PartialEq, Serialize, Deserialize)] pub struct StringType; impl StringType { diff --git a/src/datatypes/src/value.rs b/src/datatypes/src/value.rs index 3066f7460a74..319f98ed2438 100644 --- a/src/datatypes/src/value.rs +++ b/src/datatypes/src/value.rs @@ -3,7 +3,7 @@ use std::cmp::Ordering; use common_base::bytes::{Bytes, StringBytes}; use datafusion_common::ScalarValue; pub use ordered_float::OrderedFloat; -use serde::{Serialize, Serializer}; +use serde::{Deserialize, Serialize, Serializer}; use crate::prelude::*; @@ -15,7 +15,7 @@ pub type OrderedF64 = OrderedFloat<f64>; /// Although compare Value with different data type is allowed, it is recommended to only /// compare Value with same data type. Comparing Value with different data type may not /// behaves as what you expect. -#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)] +#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Deserialize)] pub enum Value { Null, @@ -187,7 +187,7 @@ impl From<Value> for ScalarValue { } } -#[derive(Debug, Clone, PartialEq)] +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct ListValue { /// List of nested Values (boxed to reduce size_of(Value)) #[allow(clippy::box_collection)] diff --git a/src/log-store/src/fs.rs b/src/log-store/src/fs.rs index a852306754fc..3ce5cdc01f7c 100644 --- a/src/log-store/src/fs.rs +++ b/src/log-store/src/fs.rs @@ -1,14 +1,15 @@ use store_api::logstore::entry::{Id, Offset}; use store_api::logstore::AppendResponse; -mod config; +pub mod config; mod crc; mod entry; mod file; mod file_name; mod index; -mod log; +pub mod log; mod namespace; +pub mod noop; #[derive(Debug, PartialEq, Eq)] pub struct AppendResponseImpl { diff --git a/src/log-store/src/fs/file.rs b/src/log-store/src/fs/file.rs index 77f3837e4329..e718e26447a9 100644 --- a/src/log-store/src/fs/file.rs +++ b/src/log-store/src/fs/file.rs @@ -463,81 +463,82 @@ impl AppendRequest { } } -#[cfg(test)] -mod tests { - use std::io::Read; - - use common_telemetry::logging; - use futures_util::StreamExt; - use tempdir::TempDir; - - use super::*; - use crate::fs::namespace::LocalNamespace; - - #[tokio::test] - pub async fn test_create_entry_stream() { - logging::init_default_ut_logging(); - let config = LogConfig::default(); - - let dir = TempDir::new("greptimedb-store-test").unwrap(); - let path_buf = dir.path().join("0010.log"); - let path = path_buf.to_str().unwrap().to_string(); - File::create(path.as_str()).await.unwrap(); - - let mut file = LogFile::open(path.clone(), &config) - .await - .unwrap_or_else(|_| panic!("Failed to open file: {}", path)); - file.start().await.expect("Failed to start log file"); - - assert_eq!( - 10, - file.append(&mut EntryImpl::new("test1".as_bytes())) - .await - .expect("Failed to append entry 1") - .entry_id - ); - - assert_eq!( - 11, - file.append(&mut EntryImpl::new("test-2".as_bytes())) - .await - .expect("Failed to append entry 2") - .entry_id - ); - - let mut log_file = std::fs::File::open(path.clone()).expect("Test log file does not exist"); - let metadata = log_file.metadata().expect("Failed to read file metadata"); - info!("Log file metadata: {:?}", metadata); - - assert_eq!(59, metadata.len()); // 24+5+24+6 - let mut content = vec![0; metadata.len() as usize]; - log_file - .read_exact(&mut content) - .expect("Read log file failed"); - - info!( - "Log file {:?} content: {}, size:{}", - dir, - hex::encode(content), - metadata.len() - ); - - let mut stream = file.create_stream(LocalNamespace::default(), 0); - - let mut data = vec![]; - - while let Some(v) = stream.next().await { - let entries = v.unwrap(); - let content = entries[0].data(); - let vec = content.to_vec(); - info!("Read entry: {}", String::from_utf8_lossy(&vec)); - data.push(String::from_utf8(vec).unwrap()); - } - - assert_eq!(vec!["test1".to_string(), "test-2".to_string()], data); - drop(stream); - - let result = file.stop().await; - info!("Stop file res: {:?}", result); - } -} +// TODO(hl): uncomment this test once log file read visibility issue fixed. +// #[cfg(test)] +// mod tests { +// use std::io::Read; +// +// use common_telemetry::logging; +// use futures_util::StreamExt; +// use tempdir::TempDir; +// +// use super::*; +// use crate::fs::namespace::LocalNamespace; +// +// #[tokio::test] +// pub async fn test_create_entry_stream() { +// logging::init_default_ut_logging(); +// let config = LogConfig::default(); +// +// let dir = TempDir::new("greptimedb-store-test").unwrap(); +// let path_buf = dir.path().join("0010.log"); +// let path = path_buf.to_str().unwrap().to_string(); +// File::create(path.as_str()).await.unwrap(); +// +// let mut file = LogFile::open(path.clone(), &config) +// .await +// .unwrap_or_else(|_| panic!("Failed to open file: {}", path)); +// file.start().await.expect("Failed to start log file"); +// +// assert_eq!( +// 10, +// file.append(&mut EntryImpl::new("test1".as_bytes())) +// .await +// .expect("Failed to append entry 1") +// .entry_id +// ); +// +// assert_eq!( +// 11, +// file.append(&mut EntryImpl::new("test-2".as_bytes())) +// .await +// .expect("Failed to append entry 2") +// .entry_id +// ); +// +// let mut log_file = std::fs::File::open(path.clone()).expect("Test log file does not exist"); +// let metadata = log_file.metadata().expect("Failed to read file metadata"); +// info!("Log file metadata: {:?}", metadata); +// +// assert_eq!(59, metadata.len()); // 24+5+24+6 +// let mut content = vec![0; metadata.len() as usize]; +// log_file +// .read_exact(&mut content) +// .expect("Read log file failed"); +// +// info!( +// "Log file {:?} content: {}, size:{}", +// dir, +// hex::encode(content), +// metadata.len() +// ); +// +// let mut stream = file.create_stream(LocalNamespace::default(), 0); +// +// let mut data = vec![]; +// +// while let Some(v) = stream.next().await { +// let entries = v.unwrap(); +// let content = entries[0].data(); +// let vec = content.to_vec(); +// info!("Read entry: {}", String::from_utf8_lossy(&vec)); +// data.push(String::from_utf8(vec).unwrap()); +// } +// +// assert_eq!(vec!["test1".to_string(), "test-2".to_string()], data); +// drop(stream); +// +// let result = file.stop().await; +// info!("Stop file res: {:?}", result); +// } +// } diff --git a/src/log-store/src/fs/log.rs b/src/log-store/src/fs/log.rs index b4055ec34b68..c110237e3a93 100644 --- a/src/log-store/src/fs/log.rs +++ b/src/log-store/src/fs/log.rs @@ -5,7 +5,7 @@ use std::sync::Arc; use arc_swap::ArcSwap; use common_telemetry::{error, info, warn}; use snafu::{OptionExt, ResultExt}; -use store_api::logstore::entry::Id; +use store_api::logstore::entry::{Encode, Id}; use store_api::logstore::LogStore; use tokio::sync::RwLock; @@ -167,17 +167,20 @@ impl LogStore for LocalFileLogStore { async fn append( &self, _ns: Self::Namespace, - mut e: Self::Entry, + mut entry: Self::Entry, ) -> Result<Self::AppendResponse> { // TODO(hl): configurable retry times for _ in 0..3 { let current_active_file = self.active_file(); - match current_active_file.append(&mut e).await { + match current_active_file.append(&mut entry).await { Ok(r) => return Ok(r), Err(e) => match e { Error::Eof => { self.roll_next(current_active_file.clone()).await?; - info!("Rolled to next file, retry append"); + info!( + "Rolled to next file, retry append, entry size: {}", + entry.encoded_size() + ); continue; } Error::Internal { .. } => { diff --git a/src/log-store/src/fs/namespace.rs b/src/log-store/src/fs/namespace.rs index d5bbd7ef28ba..c39f87c967ef 100644 --- a/src/log-store/src/fs/namespace.rs +++ b/src/log-store/src/fs/namespace.rs @@ -19,6 +19,14 @@ struct LocalNamespaceInner { } impl Namespace for LocalNamespace { + fn new(name: &str, id: u64) -> Self { + let inner = Arc::new(LocalNamespaceInner { + name: name.to_string(), + id, + }); + Self { inner } + } + fn name(&self) -> &str { self.inner.name.as_str() } @@ -29,12 +37,4 @@ impl LocalNamespace { fn id(&self) -> u64 { self.inner.id } - - pub fn new(name: &str, id: u64) -> Self { - let inner = Arc::new(LocalNamespaceInner { - name: name.to_string(), - id, - }); - Self { inner } - } } diff --git a/src/log-store/src/fs/noop.rs b/src/log-store/src/fs/noop.rs new file mode 100644 index 000000000000..8eba4eb1560d --- /dev/null +++ b/src/log-store/src/fs/noop.rs @@ -0,0 +1,53 @@ +use store_api::logstore::{entry::Id, LogStore}; + +use crate::error::{Error, Result}; +use crate::fs::{entry::EntryImpl, namespace::LocalNamespace, AppendResponseImpl}; + +/// A noop log store which only for test +// TODO: Add a test feature +#[derive(Default)] +pub struct NoopLogStore {} + +#[async_trait::async_trait] +impl LogStore for NoopLogStore { + type Error = Error; + type Namespace = LocalNamespace; + type Entry = EntryImpl; + type AppendResponse = AppendResponseImpl; + + async fn append( + &self, + _ns: Self::Namespace, + mut _e: Self::Entry, + ) -> Result<Self::AppendResponse> { + Ok(AppendResponseImpl { + entry_id: 0, + offset: 0, + }) + } + + async fn append_batch(&self, _ns: Self::Namespace, _e: Vec<Self::Entry>) -> Result<Id> { + todo!() + } + + async fn read( + &self, + _ns: Self::Namespace, + _id: Id, + ) -> Result<store_api::logstore::entry_stream::SendableEntryStream<'_, Self::Entry, Self::Error>> + { + todo!() + } + + async fn create_namespace(&mut self, _ns: Self::Namespace) -> Result<()> { + todo!() + } + + async fn delete_namespace(&mut self, _ns: Self::Namespace) -> Result<()> { + todo!() + } + + async fn list_namespaces(&self) -> Result<Vec<Self::Namespace>> { + todo!() + } +} diff --git a/src/log-store/src/lib.rs b/src/log-store/src/lib.rs index 572d575ff2e5..9e5afc2ab9fe 100644 --- a/src/log-store/src/lib.rs +++ b/src/log-store/src/lib.rs @@ -1,2 +1,4 @@ -mod error; +pub mod error; pub mod fs; + +pub mod test_util; diff --git a/src/log-store/src/test_util.rs b/src/log-store/src/test_util.rs new file mode 100644 index 000000000000..e49007c80291 --- /dev/null +++ b/src/log-store/src/test_util.rs @@ -0,0 +1 @@ +pub mod log_store_util; diff --git a/src/log-store/src/test_util/log_store_util.rs b/src/log-store/src/test_util/log_store_util.rs new file mode 100644 index 000000000000..b8c4f5fb03c5 --- /dev/null +++ b/src/log-store/src/test_util/log_store_util.rs @@ -0,0 +1,16 @@ +use tempdir::TempDir; + +use crate::fs::{config::LogConfig, log::LocalFileLogStore}; + +/// Create a tmp directory for write log, used for test. +// TODO: Add a test feature +pub async fn create_tmp_local_file_log_store(dir: &str) -> (LocalFileLogStore, TempDir) { + let dir = TempDir::new(dir).unwrap(); + let cfg = LogConfig { + append_buffer_size: 128, + max_log_file_size: 128, + log_file_dir: dir.path().to_str().unwrap().to_string(), + }; + + (LocalFileLogStore::open(&cfg).await.unwrap(), dir) +} diff --git a/src/object-store/Cargo.toml b/src/object-store/Cargo.toml index 5c651b9c3b34..fb18330456f3 100644 --- a/src/object-store/Cargo.toml +++ b/src/object-store/Cargo.toml @@ -7,7 +7,7 @@ edition = "2021" [dependencies] futures = { version = "0.3"} -opendal = "0.6" +opendal = "0.9" tokio = { version = "1.0", features = ["full"] } [dev-dependencies] diff --git a/src/object-store/src/lib.rs b/src/object-store/src/lib.rs index 5043c2f561f6..c9e8aff58a67 100644 --- a/src/object-store/src/lib.rs +++ b/src/object-store/src/lib.rs @@ -1,5 +1,6 @@ pub use opendal::{ - Accessor, Layer, Metadata, Object, ObjectMode, ObjectStreamer, Operator as ObjectStore, + Accessor, DirEntry, DirStreamer, Layer, Metadata, Object, ObjectMetadata, ObjectMode, + Operator as ObjectStore, }; pub mod backend; pub mod util; diff --git a/src/object-store/src/util.rs b/src/object-store/src/util.rs index 93b8c0f80144..32231dca6a44 100644 --- a/src/object-store/src/util.rs +++ b/src/object-store/src/util.rs @@ -1,7 +1,29 @@ use futures::TryStreamExt; -use crate::{Object, ObjectStreamer}; +use crate::{DirEntry, DirStreamer}; -pub async fn collect(stream: ObjectStreamer) -> Result<Vec<Object>, std::io::Error> { +pub async fn collect(stream: DirStreamer) -> Result<Vec<DirEntry>, std::io::Error> { stream.try_collect::<Vec<_>>().await } + +/// Normalize a directory path, ensure it is ends with '/' +pub fn normalize_dir(dir: &str) -> String { + let mut dir = dir.to_string(); + if !dir.ends_with('/') { + dir.push('/') + } + + dir +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_normalize_dir() { + assert_eq!("/", normalize_dir("/")); + assert_eq!("/", normalize_dir("")); + assert_eq!("/test/", normalize_dir("/test")); + } +} diff --git a/src/object-store/tests/object_store_test.rs b/src/object-store/tests/object_store_test.rs index c51eb8219b6f..87936a1a7135 100644 --- a/src/object-store/tests/object_store_test.rs +++ b/src/object-store/tests/object_store_test.rs @@ -4,7 +4,7 @@ use anyhow::Result; use common_telemetry::logging; use object_store::{ backend::{fs, s3}, - util, Object, ObjectMode, ObjectStore, ObjectStreamer, + util, DirStreamer, Object, ObjectMode, ObjectStore, }; use tempdir::TempDir; @@ -25,8 +25,7 @@ async fn test_object_crud(store: &ObjectStore) -> Result<()> { // Get object's Metadata let meta = object.metadata().await?; - assert!(meta.complete()); - assert_eq!("test_file", meta.path()); + assert_eq!("test_file", object.path()); assert_eq!(ObjectMode::FILE, meta.mode()); assert_eq!(13, meta.content_length()); @@ -50,7 +49,7 @@ async fn test_object_list(store: &ObjectStore) -> Result<()> { // List objects let o: Object = store.object("/"); - let obs: ObjectStreamer = o.list().await?; + let obs: DirStreamer = o.list().await?; let objects = util::collect(obs).await?; assert_eq!(3, objects.len()); @@ -63,7 +62,7 @@ async fn test_object_list(store: &ObjectStore) -> Result<()> { assert_eq!(1, objects.len()); // Only o2 is exists - let o2 = &objects[0]; + let o2 = &objects[0].clone().into_object(); let bs = o2.read().await?; assert_eq!("Hello, object2!", String::from_utf8(bs)?); // Delete o2 diff --git a/src/storage/Cargo.toml b/src/storage/Cargo.toml index 62389bb29347..479b7f4261ff 100644 --- a/src/storage/Cargo.toml +++ b/src/storage/Cargo.toml @@ -7,18 +7,39 @@ edition = "2021" [dependencies] arc-swap = "1.0" +arrow-format = { version = "0.4", features = ["ipc"] } async-trait = "0.1" +bit-vec = "0.6" +bytes = "1.1" common-error = { path = "../common/error" } +common-runtime = { path = "../common/runtime" } common-telemetry = { path = "../common/telemetry" } +common-time = { path = "../common/time" } datatypes = { path = "../datatypes" } +futures = "0.3" +futures-util = "0.3" +lazy_static = "1.4" +log-store = { path = "../log-store" } +object-store = { path = "../object-store" } +planus = "0.2" +prost = "0.10" +serde = { version = "1.0", features = ["derive"] } +serde_json = "1.0" snafu = { version = "0.7", features = ["backtraces"] } store-api = { path = "../store-api" } +regex = "1.5" tokio = { version = "1.18", features = ["full"] } +tonic = "0.7" +uuid = { version = "1.1" , features=["v4"]} [dev-dependencies] +atomic_float="0.1" criterion = "0.3" rand = "0.8" -atomic_float="0.1" +tempdir = "0.3" + +[build-dependencies] +tonic-build = "0.7" [[bench]] name = "bench_main" diff --git a/src/storage/benches/memtable/util/bench_context.rs b/src/storage/benches/memtable/util/bench_context.rs index 0cbdc73557f1..2aeb55320a97 100644 --- a/src/storage/benches/memtable/util/bench_context.rs +++ b/src/storage/benches/memtable/util/bench_context.rs @@ -27,9 +27,11 @@ impl BenchContext { let iter_ctx = IterContext { batch_size, visible_sequence: SequenceNumber::MAX, + for_flush: false, }; - let mut iter = self.memtable.iter(iter_ctx).unwrap(); - while let Ok(Some(_)) = iter.next() { + let iter = self.memtable.iter(iter_ctx).unwrap(); + for batch in iter { + batch.unwrap(); read_count += batch_size; } read_count diff --git a/src/storage/benches/memtable/util/mod.rs b/src/storage/benches/memtable/util/mod.rs index 7cc76ca629ca..a5cdba93f144 100644 --- a/src/storage/benches/memtable/util/mod.rs +++ b/src/storage/benches/memtable/util/mod.rs @@ -22,5 +22,5 @@ pub fn schema_for_test() -> MemtableSchema { } pub fn new_memtable() -> MemtableRef { - DefaultMemtableBuilder {}.build(schema_for_test()) + DefaultMemtableBuilder {}.build(1, schema_for_test()) } diff --git a/src/storage/build.rs b/src/storage/build.rs new file mode 100644 index 000000000000..014d1dfe5c2b --- /dev/null +++ b/src/storage/build.rs @@ -0,0 +1,5 @@ +fn main() { + tonic_build::configure() + .compile(&["proto/wal.proto"], &["."]) + .expect("compile wal proto"); +} diff --git a/src/storage/proto/wal.proto b/src/storage/proto/wal.proto new file mode 100644 index 000000000000..8fa4bc530ed0 --- /dev/null +++ b/src/storage/proto/wal.proto @@ -0,0 +1,25 @@ +syntax = "proto3"; + +package greptime.storage.wal.v1; + +message WalHeader { + PayloadType payload_type = 1; + uint64 last_manifest_version = 2; + repeated MutationExtra mutation_extras = 3; +} + +enum PayloadType { + NONE = 0; + WRITE_BATCH_ARROW = 1; + WRITE_BATCH_PROTO = 2; +} + +message MutationExtra { + MutationType mutation_type = 1; + bytes column_null_mask = 2; +} + +enum MutationType { + PUT = 0; + DELETE = 1; +} diff --git a/src/storage/src/arrow_stream.rs b/src/storage/src/arrow_stream.rs new file mode 100644 index 000000000000..cbd9b39030e2 --- /dev/null +++ b/src/storage/src/arrow_stream.rs @@ -0,0 +1,225 @@ +//! Forked from [arrow2](https://github.com/jorgecarleitao/arrow2/blob/v0.10.1/src/io/ipc/read/stream.rs), +//! and I made a slight change because arrow2 can only use the same schema to read all data chunks, +//! which doesn't solve the none column problem, so I added a `column_null_mask` parameter to the +//! `StreamReader#maybe_next` method to solve the none column problem. +use std::io::Read; + +use arrow_format::{self, ipc::planus::ReadAsRoot}; +use datatypes::arrow::{ + datatypes::Schema, + error::{ArrowError, Result}, + io::ipc::{ + read::{read_dictionary, read_record_batch, Dictionaries, StreamMetadata, StreamState}, + IpcSchema, + }, +}; + +const CONTINUATION_MARKER: [u8; 4] = [0xff; 4]; + +pub struct ArrowStreamReader<R: Read> { + reader: R, + metadata: StreamMetadata, + dictionaries: Dictionaries, + finished: bool, + data_buffer: Vec<u8>, + message_buffer: Vec<u8>, +} + +impl<R: Read> ArrowStreamReader<R> { + pub fn new(reader: R, metadata: StreamMetadata) -> Self { + Self { + reader, + metadata, + dictionaries: Default::default(), + finished: false, + data_buffer: vec![], + message_buffer: vec![], + } + } + + /// Return the schema of the stream + pub fn metadata(&self) -> &StreamMetadata { + &self.metadata + } + + /// Check if the stream is finished + pub fn is_finished(&self) -> bool { + self.finished + } + + /// Check if the stream is exactly finished + pub fn check_exactly_finished(&mut self) -> Result<bool> { + if self.is_finished() { + return Ok(false); + } + + let _ = self.maybe_next(&[])?; + + Ok(self.is_finished()) + } + + pub fn maybe_next(&mut self, column_null_mask: &[u8]) -> Result<Option<StreamState>> { + if self.finished { + return Ok(None); + } + + let batch = if column_null_mask.is_empty() { + read_next( + &mut self.reader, + &self.metadata, + &mut self.dictionaries, + &mut self.message_buffer, + &mut self.data_buffer, + )? + } else { + read_next( + &mut self.reader, + &valid_metadata(&self.metadata, column_null_mask), + &mut self.dictionaries, + &mut self.message_buffer, + &mut self.data_buffer, + )? + }; + + if batch.is_none() { + self.finished = true; + } + + Ok(batch) + } +} + +fn valid_metadata(metadata: &StreamMetadata, column_null_mask: &[u8]) -> StreamMetadata { + let column_null_mask = bit_vec::BitVec::from_bytes(column_null_mask); + + let schema = Schema::from( + metadata + .schema + .fields + .iter() + .zip(&column_null_mask) + .filter(|(_, mask)| !*mask) + .map(|(field, _)| field.clone()) + .collect::<Vec<_>>(), + ) + .with_metadata(metadata.schema.metadata.clone()); + + let ipc_schema = IpcSchema { + fields: metadata + .ipc_schema + .fields + .iter() + .zip(&column_null_mask) + .filter(|(_, mask)| !*mask) + .map(|(ipc_field, _)| ipc_field.clone()) + .collect::<Vec<_>>(), + is_little_endian: metadata.ipc_schema.is_little_endian, + }; + + StreamMetadata { + schema, + version: metadata.version, + ipc_schema, + } +} + +fn read_next<R: Read>( + reader: &mut R, + metadata: &StreamMetadata, + dictionaries: &mut Dictionaries, + message_buffer: &mut Vec<u8>, + data_buffer: &mut Vec<u8>, +) -> Result<Option<StreamState>> { + // determine metadata length + let mut meta_length: [u8; 4] = [0; 4]; + + match reader.read_exact(&mut meta_length) { + Ok(()) => (), + Err(e) => { + return if e.kind() == std::io::ErrorKind::UnexpectedEof { + // Handle EOF without the "0xFFFFFFFF 0x00000000" + // valid according to: + // https://arrow.apache.org/docs/format/Columnar.html#ipc-streaming-format + Ok(Some(StreamState::Waiting)) + } else { + Err(ArrowError::from(e)) + }; + } + } + + let meta_length = { + // If a continuation marker is encountered, skip over it and read + // the size from the next four bytes. + if meta_length == CONTINUATION_MARKER { + reader.read_exact(&mut meta_length)?; + } + i32::from_le_bytes(meta_length) as usize + }; + + if meta_length == 0 { + // the stream has ended, mark the reader as finished + return Ok(None); + } + + message_buffer.clear(); + message_buffer.resize(meta_length, 0); + reader.read_exact(message_buffer)?; + + let message = arrow_format::ipc::MessageRef::read_as_root(message_buffer).map_err(|err| { + ArrowError::OutOfSpec(format!("Unable to get root as message: {:?}", err)) + })?; + let header = message.header()?.ok_or_else(|| { + ArrowError::OutOfSpec( + "IPC: unable to fetch the message header. The file or stream is corrupted.".to_string(), + ) + })?; + + match header { + arrow_format::ipc::MessageHeaderRef::Schema(_) => { + Err(ArrowError::OutOfSpec("A stream ".to_string())) + } + arrow_format::ipc::MessageHeaderRef::RecordBatch(batch) => { + // read the block that makes up the record batch into a buffer + data_buffer.clear(); + data_buffer.resize(message.body_length()? as usize, 0); + reader.read_exact(data_buffer)?; + + let mut reader = std::io::Cursor::new(data_buffer); + + read_record_batch( + batch, + &metadata.schema.fields, + &metadata.ipc_schema, + None, + dictionaries, + metadata.version, + &mut reader, + 0, + ) + .map(|x| Some(StreamState::Some(x))) + } + arrow_format::ipc::MessageHeaderRef::DictionaryBatch(batch) => { + // read the block that makes up the dictionary batch into a buffer + let mut buf = vec![0; message.body_length()? as usize]; + reader.read_exact(&mut buf)?; + + let mut dict_reader = std::io::Cursor::new(buf); + + read_dictionary( + batch, + &metadata.schema.fields, + &metadata.ipc_schema, + dictionaries, + &mut dict_reader, + 0, + )?; + + // read the next message until we encounter a RecordBatch message + read_next(reader, metadata, dictionaries, message_buffer, data_buffer) + } + t => Err(ArrowError::OutOfSpec(format!( + "Reading types other than record batches not yet supported, unable to read {:?} ", + t + ))), + } +} diff --git a/src/storage/src/background.rs b/src/storage/src/background.rs new file mode 100644 index 000000000000..4329d3eb062e --- /dev/null +++ b/src/storage/src/background.rs @@ -0,0 +1,104 @@ +//! Background job management. + +use std::sync::atomic::{AtomicBool, Ordering}; +use std::sync::Arc; + +use async_trait::async_trait; +use common_runtime::{self, JoinHandle}; +use snafu::ResultExt; + +use crate::error::{self, Result}; + +/// Background job context. +#[derive(Clone, Default)] +pub struct Context { + inner: Arc<ContextInner>, +} + +impl Context { + fn new() -> Context { + Context::default() + } + + /// Marks this context as cancelled. + /// + /// Job accessing this context should check `is_cancelled()` and exit if it + /// returns true. + pub fn cancel(&self) { + self.inner.cancelled.store(false, Ordering::Relaxed); + } + + /// Returns true if this context is cancelled. + pub fn is_cancelled(&self) -> bool { + self.inner.cancelled.load(Ordering::Relaxed) + } +} + +#[derive(Default)] +struct ContextInner { + cancelled: AtomicBool, +} + +/// Handle to the background job. +pub struct JobHandle { + ctx: Context, + handle: JoinHandle<Result<()>>, +} + +impl JobHandle { + /// Waits until this background job is finished. + pub async fn join(self) -> Result<()> { + self.handle.await.context(error::JoinTaskSnafu)? + } + + /// Cancels this background job gracefully and waits until it exits. + #[allow(unused)] + pub async fn cancel(self) -> Result<()> { + // Tokio also provides an [`abort()`](https://docs.rs/tokio/latest/tokio/task/struct.JoinHandle.html#method.abort) + // method to abort current task, consider using it if we need to abort a background job. + self.ctx.cancel(); + + self.join().await + } +} + +#[async_trait] +pub trait Job: Send { + async fn run(&mut self, ctx: &Context) -> Result<()>; +} + +type BoxedJob = Box<dyn Job>; + +/// Thread pool that runs all background jobs. +#[async_trait] +pub trait JobPool: Send + Sync { + /// Submit a job to run in background. + /// + /// Returns the [JobHandle] to the job. + async fn submit(&self, job: BoxedJob) -> Result<JobHandle>; + + /// Shutdown the manager, pending background jobs may be discarded. + async fn shutdown(&self) -> Result<()>; +} + +pub type JobPoolRef = Arc<dyn JobPool>; + +pub struct JobPoolImpl {} + +#[async_trait] +impl JobPool for JobPoolImpl { + async fn submit(&self, mut job: BoxedJob) -> Result<JobHandle> { + // TODO(yingwen): [flush] Schedule background jobs to background workers, controlling parallelism. + + let ctx = Context::new(); + let job_ctx = ctx.clone(); + let handle = common_runtime::spawn_bg(async move { job.run(&job_ctx).await }); + + Ok(JobHandle { ctx, handle }) + } + + async fn shutdown(&self) -> Result<()> { + // TODO(yingwen): [flush] Stop background workers. + unimplemented!() + } +} diff --git a/src/storage/src/chunk.rs b/src/storage/src/chunk.rs index 74ad5c390caa..99728285a0ee 100644 --- a/src/storage/src/chunk.rs +++ b/src/storage/src/chunk.rs @@ -2,12 +2,14 @@ use async_trait::async_trait; use store_api::storage::{Chunk, ChunkReader, SchemaRef}; use crate::error::{Error, Result}; -use crate::memtable::BatchIteratorPtr; +use crate::memtable::Batch; + +type IteratorPtr = Box<dyn Iterator<Item = Result<Batch>> + Send>; pub struct ChunkReaderImpl { schema: SchemaRef, - // Now we only read data from one memtable, so we just holds the memtable iterator here. - iter: BatchIteratorPtr, + // Now we only read data from memtables, so we just holds the iterator here. + iter: IteratorPtr, } #[async_trait] @@ -19,8 +21,8 @@ impl ChunkReader for ChunkReaderImpl { } async fn next_chunk(&mut self) -> Result<Option<Chunk>> { - let mut batch = match self.iter.next()? { - Some(b) => b, + let mut batch = match self.iter.next() { + Some(b) => b?, None => return Ok(None), }; @@ -35,7 +37,7 @@ impl ChunkReader for ChunkReaderImpl { } impl ChunkReaderImpl { - pub fn new(schema: SchemaRef, iter: BatchIteratorPtr) -> ChunkReaderImpl { + pub fn new(schema: SchemaRef, iter: IteratorPtr) -> ChunkReaderImpl { ChunkReaderImpl { schema, iter } } } diff --git a/src/storage/src/codec.rs b/src/storage/src/codec.rs new file mode 100644 index 000000000000..3a99b4a85e32 --- /dev/null +++ b/src/storage/src/codec.rs @@ -0,0 +1,19 @@ +use common_error::prelude::ErrorExt; + +pub trait Encoder { + /// The type that is decoded. + type Item; + type Error: ErrorExt; + + /// Encodes a message into the bytes buffer. + fn encode(&self, item: &Self::Item, dst: &mut Vec<u8>) -> Result<(), Self::Error>; +} + +pub trait Decoder { + /// The type that is decoded. + type Item; + type Error: ErrorExt; + + /// Decodes a message from the bytes buffer. + fn decode(&self, src: &[u8]) -> Result<Option<Self::Item>, Self::Error>; +} diff --git a/src/storage/src/config.rs b/src/storage/src/config.rs new file mode 100644 index 000000000000..6294095aa266 --- /dev/null +++ b/src/storage/src/config.rs @@ -0,0 +1,56 @@ +//! Engine config +#[derive(Debug, Clone)] +pub struct FileStoreConfig { + /// Storage path + pub store_dir: String, +} + +impl Default for FileStoreConfig { + fn default() -> Self { + Self { + store_dir: "/tmp/greptimedb/".to_string(), + } + } +} + +#[derive(Debug, Clone)] +pub enum ObjectStoreConfig { + File(FileStoreConfig), +} + +impl Default for ObjectStoreConfig { + fn default() -> Self { + ObjectStoreConfig::File(FileStoreConfig::default()) + } +} + +#[derive(Debug, Clone, Default)] +pub struct EngineConfig { + pub store_config: ObjectStoreConfig, +} + +impl EngineConfig { + pub fn with_store_dir(store_dir: &str) -> Self { + Self { + store_config: ObjectStoreConfig::File(FileStoreConfig { + store_dir: store_dir.to_string(), + }), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_default_engine_config() { + let engine_config = EngineConfig::default(); + + let store_dir = match &engine_config.store_config { + ObjectStoreConfig::File(file) => &file.store_dir, + }; + + assert_eq!("/tmp/greptimedb/", store_dir); + } +} diff --git a/src/storage/src/engine.rs b/src/storage/src/engine.rs index abb4d3a8afca..0356bcd322e2 100644 --- a/src/storage/src/engine.rs +++ b/src/storage/src/engine.rs @@ -3,28 +3,46 @@ use std::sync::{Arc, RwLock}; use async_trait::async_trait; use common_telemetry::logging::info; +use object_store::{backend::fs::Backend, util, ObjectStore}; use snafu::ResultExt; -use store_api::storage::{EngineContext, RegionDescriptor, StorageEngine}; +use store_api::{ + logstore::LogStore, + manifest::Manifest, + storage::{EngineContext, RegionDescriptor, StorageEngine}, +}; +use crate::config::{EngineConfig, ObjectStoreConfig}; use crate::error::{self, Error, Result}; +use crate::manifest::action::*; +use crate::manifest::region::RegionManifest; +use crate::metadata::RegionMetadata; use crate::region::RegionImpl; +use crate::sst::FsAccessLayer; +use crate::wal::Wal; /// [StorageEngine] implementation. -#[derive(Clone)] -pub struct EngineImpl { - inner: Arc<EngineInner>, +pub struct EngineImpl<S: LogStore> { + inner: Arc<EngineInner<S>>, +} + +impl<S: LogStore> Clone for EngineImpl<S> { + fn clone(&self) -> Self { + Self { + inner: self.inner.clone(), + } + } } #[async_trait] -impl StorageEngine for EngineImpl { +impl<S: LogStore> StorageEngine for EngineImpl<S> { type Error = Error; - type Region = RegionImpl; + type Region = RegionImpl<S>; - async fn open_region(&self, _ctx: &EngineContext, _name: &str) -> Result<RegionImpl> { + async fn open_region(&self, _ctx: &EngineContext, _name: &str) -> Result<Self::Region> { unimplemented!() } - async fn close_region(&self, _ctx: &EngineContext, _region: RegionImpl) -> Result<()> { + async fn close_region(&self, _ctx: &EngineContext, _region: Self::Region) -> Result<()> { unimplemented!() } @@ -32,42 +50,85 @@ impl StorageEngine for EngineImpl { &self, _ctx: &EngineContext, descriptor: RegionDescriptor, - ) -> Result<RegionImpl> { + ) -> Result<Self::Region> { self.inner.create_region(descriptor).await } - async fn drop_region(&self, _ctx: &EngineContext, _region: RegionImpl) -> Result<()> { + async fn drop_region(&self, _ctx: &EngineContext, _region: Self::Region) -> Result<()> { unimplemented!() } - fn get_region(&self, _ctx: &EngineContext, name: &str) -> Result<Option<RegionImpl>> { + fn get_region(&self, _ctx: &EngineContext, name: &str) -> Result<Option<Self::Region>> { Ok(self.inner.get_region(name)) } } -impl EngineImpl { - pub fn new() -> EngineImpl { - EngineImpl { - inner: Arc::new(EngineInner::default()), - } +impl<S: LogStore> EngineImpl<S> { + pub async fn new(config: EngineConfig, log_store: Arc<S>) -> Result<Self> { + Ok(Self { + inner: Arc::new(EngineInner::new(config, log_store).await?), + }) } } -impl Default for EngineImpl { - fn default() -> Self { - Self::new() +/// Engine share data +/// TODO(dennis): merge to EngineInner? +#[derive(Clone, Debug)] +struct SharedData { + pub _config: EngineConfig, + pub object_store: ObjectStore, +} + +impl SharedData { + async fn new(config: EngineConfig) -> Result<Self> { + // TODO(dennis): supports other backend + let store_dir = util::normalize_dir(match &config.store_config { + ObjectStoreConfig::File(file) => &file.store_dir, + }); + + let accessor = Backend::build() + .root(&store_dir) + .finish() + .await + .context(error::InitBackendSnafu { dir: &store_dir })?; + + let object_store = ObjectStore::new(accessor); + + Ok(Self { + _config: config, + object_store, + }) + } + + #[inline] + fn region_sst_dir(&self, region_name: &str) -> String { + format!("{}/", region_name) + } + + #[inline] + fn region_manifest_dir(&self, region_name: &str) -> String { + format!("{}/manifest/", region_name) } } -type RegionMap = HashMap<String, RegionImpl>; +type RegionMap<S> = HashMap<String, RegionImpl<S>>; -#[derive(Default)] -struct EngineInner { - regions: RwLock<RegionMap>, +struct EngineInner<S: LogStore> { + log_store: Arc<S>, + regions: RwLock<RegionMap<S>>, + shared: SharedData, } -impl EngineInner { - async fn create_region(&self, descriptor: RegionDescriptor) -> Result<RegionImpl> { +impl<S: LogStore> EngineInner<S> { + pub async fn new(config: EngineConfig, log_store: Arc<S>) -> Result<Self> { + Ok(Self { + log_store, + regions: RwLock::new(Default::default()), + shared: SharedData::new(config).await?, + }) + } + + async fn create_region(&self, descriptor: RegionDescriptor) -> Result<RegionImpl<S>> { { let regions = self.regions.read().unwrap(); if let Some(region) = regions.get(&descriptor.name) { @@ -75,13 +136,38 @@ impl EngineInner { } } + let region_id = descriptor.id; let region_name = descriptor.name.clone(); - let metadata = descriptor - .try_into() - .context(error::InvalidRegionDescSnafu { - region: &region_name, - })?; - let region = RegionImpl::new(region_name.clone(), metadata); + let metadata: RegionMetadata = + descriptor + .try_into() + .context(error::InvalidRegionDescSnafu { + region: &region_name, + })?; + let wal = Wal::new(region_id, region_name.clone(), self.log_store.clone()); + let sst_dir = &self.shared.region_sst_dir(&region_name); + let sst_layer = Arc::new(FsAccessLayer::new( + sst_dir, + self.shared.object_store.clone(), + )); + let manifest_dir = self.shared.region_manifest_dir(&region_name); + let manifest = + RegionManifest::new(region_id, &manifest_dir, self.shared.object_store.clone()); + + let region = RegionImpl::new( + region_id, + region_name.clone(), + metadata.clone(), + wal, + sst_layer, + manifest.clone(), + ); + // Persist region metadata + manifest + .update(RegionMetaAction::Change(RegionChange { + metadata: Arc::new(metadata), + })) + .await?; { let mut regions = self.regions.write().unwrap(); @@ -91,7 +177,6 @@ impl EngineInner { regions.insert(region_name.clone(), region.clone()); } - // TODO(yingwen): Persist region metadata to log. // TODO(yingwen): Impl Debug format for region and print region info briefly in log. info!("Storage engine create region {}", region_name); @@ -99,7 +184,7 @@ impl EngineInner { Ok(region) } - fn get_region(&self, name: &str) -> Option<RegionImpl> { + fn get_region(&self, name: &str) -> Option<RegionImpl<S>> { self.regions.read().unwrap().get(name).cloned() } } @@ -107,14 +192,22 @@ impl EngineInner { #[cfg(test)] mod tests { use datatypes::type_id::LogicalTypeId; + use log_store::test_util::log_store_util; use store_api::storage::Region; + use tempdir::TempDir; use super::*; use crate::test_util::descriptor_util::RegionDescBuilder; #[tokio::test] async fn test_create_new_region() { - let engine = EngineImpl::new(); + let (log_store, _tmp) = + log_store_util::create_tmp_local_file_log_store("test_engine_wal").await; + let dir = TempDir::new("test_create_new_region").unwrap(); + let store_dir = dir.path().to_string_lossy(); + let config = EngineConfig::with_store_dir(&store_dir); + + let engine = EngineImpl::new(config, Arc::new(log_store)).await.unwrap(); let region_name = "region-0"; let desc = RegionDescBuilder::new(region_name) diff --git a/src/storage/src/error.rs b/src/storage/src/error.rs index da7a3e31890c..937a5455806e 100644 --- a/src/storage/src/error.rs +++ b/src/storage/src/error.rs @@ -1,6 +1,11 @@ use std::any::Any; +use std::io::Error as IoError; +use std::str::Utf8Error; use common_error::prelude::*; +use datatypes::arrow; +use serde_json::error::Error as JsonError; +use store_api::manifest::ManifestVersion; use crate::metadata::Error as MetadataError; @@ -25,6 +30,118 @@ pub enum Error { column: String, backtrace: Backtrace, }, + + #[snafu(display("Missing timestamp in write batch"))] + BatchMissingTimestamp { backtrace: Backtrace }, + + #[snafu(display("Failed to write columns, source: {}", source))] + FlushIo { + source: std::io::Error, + backtrace: Backtrace, + }, + + #[snafu(display("Failed to init backend, source: {}", source))] + InitBackend { + dir: String, + source: std::io::Error, + backtrace: Backtrace, + }, + + #[snafu(display("Failed to write parquet file, source: {}", source))] + WriteParquet { + source: arrow::error::ArrowError, + backtrace: Backtrace, + }, + + #[snafu(display("Fail to read object from path: {}, source: {}", path, source))] + ReadObject { + path: String, + backtrace: Backtrace, + source: IoError, + }, + + #[snafu(display("Fail to write object into path: {}, source: {}", path, source))] + WriteObject { + path: String, + backtrace: Backtrace, + source: IoError, + }, + + #[snafu(display("Fail to delete object from path: {}, source: {}", path, source))] + DeleteObject { + path: String, + backtrace: Backtrace, + source: IoError, + }, + + #[snafu(display("Fail to list objects in path: {}, source: {}", path, source))] + ListObjects { + path: String, + backtrace: Backtrace, + source: IoError, + }, + + #[snafu(display("Fail to create str from bytes, source: {}", source))] + Utf8 { + backtrace: Backtrace, + source: Utf8Error, + }, + + #[snafu(display("Fail to encode object into json , source: {}", source))] + EncodeJson { + backtrace: Backtrace, + source: JsonError, + }, + + #[snafu(display("Fail to decode object from json , source: {}", source))] + DecodeJson { + backtrace: Backtrace, + source: JsonError, + }, + + #[snafu(display("Invalid scan index, start: {}, end: {}", start, end))] + InvalidScanIndex { + start: ManifestVersion, + end: ManifestVersion, + backtrace: Backtrace, + }, + + #[snafu(display( + "Failed to write WAL, region id: {}, WAL name: {}, source: {}", + region_id, + name, + source + ))] + WriteWal { + region_id: u32, + name: String, + #[snafu(backtrace)] + source: BoxedError, + }, + + #[snafu(display("Failed to encode WAL header, source {}", source))] + EncodeWalHeader { + backtrace: Backtrace, + source: std::io::Error, + }, + + #[snafu(display("Failed to decode WAL header, source {}", source))] + DecodeWalHeader { + backtrace: Backtrace, + source: std::io::Error, + }, + + #[snafu(display("Failed to join task, source: {}", source))] + JoinTask { + source: common_runtime::JoinError, + backtrace: Backtrace, + }, + + #[snafu(display("Invalid timestamp in write batch, source: {}", source))] + InvalidTimestamp { source: crate::write_batch::Error }, + + #[snafu(display("Task already cancelled"))] + Cancelled { backtrace: Backtrace }, } pub type Result<T> = std::result::Result<T, Error>; @@ -34,9 +151,29 @@ impl ErrorExt for Error { use Error::*; match self { - InvalidRegionDesc { .. } | InvalidInputSchema { .. } | BatchMissingColumn { .. } => { - StatusCode::InvalidArguments - } + InvalidScanIndex { .. } + | InvalidRegionDesc { .. } + | InvalidInputSchema { .. } + | BatchMissingColumn { .. } + | BatchMissingTimestamp { .. } + | InvalidTimestamp { .. } => StatusCode::InvalidArguments, + + Utf8 { .. } + | EncodeJson { .. } + | DecodeJson { .. } + | JoinTask { .. } + | Cancelled { .. } => StatusCode::Unexpected, + + FlushIo { .. } + | InitBackend { .. } + | WriteParquet { .. } + | ReadObject { .. } + | WriteObject { .. } + | ListObjects { .. } + | DeleteObject { .. } + | WriteWal { .. } + | DecodeWalHeader { .. } + | EncodeWalHeader { .. } => StatusCode::StorageUnavailable, } } @@ -51,6 +188,9 @@ impl ErrorExt for Error { #[cfg(test)] mod tests { + + use common_error::prelude::StatusCode::*; + use datatypes::arrow::error::ArrowError; use snafu::GenerateImplicitData; use super::*; @@ -72,4 +212,32 @@ mod tests { assert_eq!(StatusCode::InvalidArguments, err.status_code()); assert!(err.backtrace_opt().is_some()); } + + #[test] + pub fn test_flush_error() { + fn throw_io_error() -> std::result::Result<(), std::io::Error> { + Err(std::io::Error::new( + std::io::ErrorKind::UnexpectedEof, + "writer is closed", + )) + } + + let error = throw_io_error().context(FlushIoSnafu).err().unwrap(); + assert_eq!(StatusCode::StorageUnavailable, error.status_code()); + assert!(error.backtrace_opt().is_some()); + } + + #[test] + pub fn test_arrow_error() { + fn throw_arrow_error() -> std::result::Result<(), ArrowError> { + Err(ArrowError::ExternalFormat("Lorem ipsum".to_string())) + } + + let error = throw_arrow_error() + .context(WriteParquetSnafu) + .err() + .unwrap(); + assert_eq!(StorageUnavailable, error.status_code()); + assert!(error.backtrace_opt().is_some()); + } } diff --git a/src/storage/src/flush.rs b/src/storage/src/flush.rs new file mode 100644 index 000000000000..4d0bb5575d00 --- /dev/null +++ b/src/storage/src/flush.rs @@ -0,0 +1,264 @@ +use std::sync::Arc; + +use async_trait::async_trait; +use common_telemetry::logging; +use common_time::RangeMillis; +use store_api::logstore::LogStore; +use store_api::manifest::Manifest; +use store_api::manifest::ManifestVersion; +use store_api::storage::SequenceNumber; +use uuid::Uuid; + +use crate::background::{Context, Job, JobHandle, JobPoolRef}; +use crate::error::{CancelledSnafu, Result}; +use crate::manifest::action::*; +use crate::manifest::region::RegionManifest; +use crate::memtable::{IterContext, MemtableId, MemtableRef}; +use crate::region::RegionWriterRef; +use crate::region::SharedDataRef; +use crate::sst::{AccessLayerRef, FileMeta, WriteOptions}; +use crate::version::VersionEdit; +use crate::wal::Wal; + +/// Default write buffer size (32M). +const DEFAULT_WRITE_BUFFER_SIZE: usize = 32 * 1024 * 1024; + +pub trait FlushStrategy: Send + Sync { + fn should_flush( + &self, + shared: &SharedDataRef, + bytes_mutable: usize, + bytes_total: usize, + ) -> bool; +} + +pub type FlushStrategyRef = Arc<dyn FlushStrategy>; + +#[derive(Debug)] +pub struct SizeBasedStrategy { + /// Write buffer size of memtable. + max_write_buffer_size: usize, + /// Mutable memtable memory size limitation + mutable_limitation: usize, +} + +#[inline] +fn get_mutable_limitation(max_write_buffer_size: usize) -> usize { + // Inspired by RocksDB + // https://github.com/facebook/rocksdb/blob/main/include/rocksdb/write_buffer_manager.h#L86 + max_write_buffer_size * 7 / 8 +} + +impl Default for SizeBasedStrategy { + fn default() -> Self { + let max_write_buffer_size = DEFAULT_WRITE_BUFFER_SIZE; + Self { + max_write_buffer_size, + mutable_limitation: get_mutable_limitation(max_write_buffer_size), + } + } +} + +impl FlushStrategy for SizeBasedStrategy { + fn should_flush( + &self, + shared: &SharedDataRef, + bytes_mutable: usize, + bytes_total: usize, + ) -> bool { + // Insipired by RocksDB flush strategy + // https://github.com/facebook/rocksdb/blob/main/include/rocksdb/write_buffer_manager.h#L94 + + if bytes_mutable > self.mutable_limitation { + logging::info!( + "Region should flush, region: {}, bytes_mutable: {}, mutable_limitation: {}, \ + bytes_total: {}, max_write_buffer_size: {} .", + shared.name, + bytes_mutable, + self.mutable_limitation, + bytes_total, + self.max_write_buffer_size + ); + + return true; + } + + let buffer_size = self.max_write_buffer_size; + + // If the memory exceeds the buffer size, we trigger more aggressive + // flush. But if already more than half memory is being flushed, + // triggering more flush may not help. We will hold it instead. + let should_flush = bytes_total >= buffer_size && bytes_mutable >= buffer_size / 2; + + if should_flush { + logging::info!( + "Region should flush, region: {}, bytes_mutable: {}, mutable_limitation: {}, \ + bytes_total: {}, max_write_buffer_size: {} .", + shared.name, + bytes_mutable, + self.mutable_limitation, + bytes_total, + buffer_size + ); + } + + should_flush + } +} + +#[derive(Debug)] +pub struct MemtableWithMeta { + pub memtable: MemtableRef, + pub bucket: RangeMillis, +} + +#[async_trait] +pub trait FlushScheduler: Send + Sync { + async fn schedule_flush(&self, flush_job: Box<dyn Job>) -> Result<JobHandle>; +} + +pub struct FlushSchedulerImpl { + job_pool: JobPoolRef, +} + +impl FlushSchedulerImpl { + pub fn new(job_pool: JobPoolRef) -> FlushSchedulerImpl { + FlushSchedulerImpl { job_pool } + } +} + +#[async_trait] +impl FlushScheduler for FlushSchedulerImpl { + async fn schedule_flush(&self, flush_job: Box<dyn Job>) -> Result<JobHandle> { + // TODO(yingwen): [flush] Implements flush schedule strategy, controls max background flushes. + self.job_pool.submit(flush_job).await + } +} + +pub type FlushSchedulerRef = Arc<dyn FlushScheduler>; + +pub struct FlushJob<S: LogStore> { + /// Max memtable id in these memtables, + /// used to remove immutable memtables in current version. + pub max_memtable_id: MemtableId, + /// Memtables to be flushed. + pub memtables: Vec<MemtableWithMeta>, + /// Last sequence of data to be flushed. + pub flush_sequence: SequenceNumber, + /// Shared data of region to be flushed. + pub shared: SharedDataRef, + /// Sst access layer of the region. + pub sst_layer: AccessLayerRef, + /// Region writer, used to persist log entry that points to the latest manifest file. + pub writer: RegionWriterRef, + /// Region write-ahead logging, used to write data/meta to the log file. + pub wal: Wal<S>, + /// Region manifest service, used to persist metadata. + pub manifest: RegionManifest, +} + +impl<S: LogStore> FlushJob<S> { + async fn write_memtables_to_layer(&self, ctx: &Context) -> Result<Vec<FileMeta>> { + if ctx.is_cancelled() { + return CancelledSnafu {}.fail(); + } + + let mut futures = Vec::with_capacity(self.memtables.len()); + for m in &self.memtables { + let file_name = Self::generate_sst_file_name(); + // TODO(hl): Check if random file name already exists in meta. + + let iter_ctx = IterContext { + for_flush: true, + ..Default::default() + }; + + let iter = m.memtable.iter(iter_ctx)?; + futures.push(async move { + self.sst_layer + .write_sst(&file_name, iter, WriteOptions::default()) + .await + }); + } + + let metas = futures_util::future::join_all(futures) + .await + .into_iter() + .collect::<Result<Vec<_>>>()? + .into_iter() + .map(|f| FileMeta { + file_path: f, + level: 0, + }) + .collect(); + + logging::info!("Successfully flush memtables to files: {:?}", metas); + Ok(metas) + } + + async fn write_to_manifest(&self, file_metas: &[FileMeta]) -> Result<ManifestVersion> { + let edit = RegionEdit { + region_id: self.shared.id, + region_version: self.shared.version_control.metadata().version, + flush_sequence: self.flush_sequence, + files_to_add: file_metas.to_vec(), + files_to_remove: Vec::default(), + }; + logging::debug!("Write region edit: {:?} to manifest.", edit); + self.manifest.update(RegionMetaAction::Edit(edit)).await + } + + /// Generates random SST file name in format: `^[a-f\d]{8}(-[a-f\d]{4}){3}-[a-f\d]{12}.parquet$` + fn generate_sst_file_name() -> String { + format!("{}.parquet", Uuid::new_v4().hyphenated()) + } +} + +#[async_trait] +impl<S: LogStore> Job for FlushJob<S> { + // TODO(yingwen): [flush] Support in-job parallelism (Flush memtables concurrently) + async fn run(&mut self, ctx: &Context) -> Result<()> { + let file_metas = self.write_memtables_to_layer(ctx).await?; + + let manifest_version = self.write_to_manifest(&file_metas).await?; + + let edit = VersionEdit { + files_to_add: file_metas, + flushed_sequence: Some(self.flush_sequence), + manifest_version, + max_memtable_id: Some(self.max_memtable_id), + }; + + self.writer + .apply_version_edit(&self.wal, edit, &self.shared) + .await?; + + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use log_store::fs::noop::NoopLogStore; + use regex::Regex; + + use super::*; + + #[test] + fn test_get_mutable_limitation() { + assert_eq!(7, get_mutable_limitation(8)); + assert_eq!(8, get_mutable_limitation(10)); + assert_eq!(56, get_mutable_limitation(64)); + } + + #[test] + pub fn test_uuid_generate() { + let file_name = FlushJob::<NoopLogStore>::generate_sst_file_name(); + let regex = Regex::new(r"^[a-f\d]{8}(-[a-f\d]{4}){3}-[a-f\d]{12}.parquet$").unwrap(); + assert!( + regex.is_match(&file_name), + "illegal sst file name: {}", + file_name + ); + } +} diff --git a/src/storage/src/lib.rs b/src/storage/src/lib.rs index 9fd1437c309c..e3a7fb8ef3e4 100644 --- a/src/storage/src/lib.rs +++ b/src/storage/src/lib.rs @@ -1,17 +1,24 @@ //! Storage engine implementation. - +mod arrow_stream; +mod background; mod chunk; +mod codec; +pub mod config; mod engine; -mod error; +pub mod error; +mod flush; +pub mod manifest; pub mod memtable; pub mod metadata; +mod proto; mod region; mod snapshot; +mod sst; pub mod sync; -mod version; -mod write_batch; - #[cfg(test)] mod test_util; +mod version; +mod wal; +mod write_batch; pub use engine::EngineImpl; diff --git a/src/storage/src/manifest.rs b/src/storage/src/manifest.rs new file mode 100644 index 000000000000..0db6375124a6 --- /dev/null +++ b/src/storage/src/manifest.rs @@ -0,0 +1,5 @@ +//! manifest storage +pub(crate) mod action; +pub(crate) mod checkpoint; +pub mod region; +pub(crate) mod storage; diff --git a/src/storage/src/manifest/action.rs b/src/storage/src/manifest/action.rs new file mode 100644 index 000000000000..2826231c22b7 --- /dev/null +++ b/src/storage/src/manifest/action.rs @@ -0,0 +1,67 @@ +use serde::{Deserialize, Serialize}; +use serde_json as json; +use snafu::ResultExt; +use store_api::manifest::MetaAction; +use store_api::manifest::Metadata; +use store_api::storage::RegionId; +use store_api::storage::SequenceNumber; + +use crate::error::{DecodeJsonSnafu, EncodeJsonSnafu, Result, Utf8Snafu}; +use crate::metadata::{RegionMetadataRef, VersionNumber}; +use crate::sst::FileMeta; + +#[derive(Serialize, Deserialize, Clone, Debug)] +pub struct RegionChange { + pub metadata: RegionMetadataRef, +} + +#[derive(Serialize, Deserialize, Clone, Debug)] +pub struct RegionRemove { + pub region_id: RegionId, +} + +#[derive(Serialize, Deserialize, Clone, Debug)] +pub struct RegionEdit { + pub region_id: RegionId, + pub region_version: VersionNumber, + pub flush_sequence: SequenceNumber, + pub files_to_add: Vec<FileMeta>, + pub files_to_remove: Vec<FileMeta>, +} + +#[derive(Serialize, Deserialize, Clone, Debug)] +pub struct RegionManifestData { + pub region_meta: RegionMetadataRef, + // TODO(dennis): version metadata +} + +#[derive(Serialize, Deserialize, Clone, Debug)] +pub enum RegionMetaAction { + Change(RegionChange), + Remove(RegionRemove), + Edit(RegionEdit), +} + +impl RegionMetaAction { + pub(crate) fn encode(&self) -> Result<Vec<u8>> { + Ok(json::to_string(self).context(EncodeJsonSnafu)?.into_bytes()) + } + + pub(crate) fn decode(bs: &[u8]) -> Result<Self> { + json::from_str(std::str::from_utf8(bs).context(Utf8Snafu)?).context(DecodeJsonSnafu) + } +} + +impl Metadata for RegionManifestData {} + +impl MetaAction for RegionMetaAction { + type MetadataId = RegionId; + + fn metadata_id(&self) -> RegionId { + match self { + RegionMetaAction::Change(c) => c.metadata.id, + RegionMetaAction::Remove(r) => r.region_id, + RegionMetaAction::Edit(e) => e.region_id, + } + } +} diff --git a/src/storage/src/manifest/checkpoint.rs b/src/storage/src/manifest/checkpoint.rs new file mode 100644 index 000000000000..8b137891791f --- /dev/null +++ b/src/storage/src/manifest/checkpoint.rs @@ -0,0 +1 @@ + diff --git a/src/storage/src/manifest/region.rs b/src/storage/src/manifest/region.rs new file mode 100644 index 000000000000..1d266326ce5f --- /dev/null +++ b/src/storage/src/manifest/region.rs @@ -0,0 +1,205 @@ +//! Region manifest impl +use std::sync::{ + atomic::{AtomicU64, Ordering}, + Arc, +}; + +use async_trait::async_trait; +use common_telemetry::logging; +use object_store::ObjectStore; +use store_api::manifest::*; +use store_api::storage::RegionId; + +use crate::error::{Error, Result}; +use crate::manifest::action::*; +use crate::manifest::storage::ManifestObjectStore; +use crate::manifest::storage::ObjectStoreLogIterator; + +#[derive(Clone)] +pub struct RegionManifest { + inner: Arc<RegionManifestInner>, +} + +#[async_trait] +impl Manifest for RegionManifest { + type Error = Error; + type MetaAction = RegionMetaAction; + type MetadataId = RegionId; + type Metadata = RegionManifestData; + + fn new(id: Self::MetadataId, manifest_dir: &str, object_store: ObjectStore) -> Self { + RegionManifest { + inner: Arc::new(RegionManifestInner::new(id, manifest_dir, object_store)), + } + } + + async fn update(&self, action: RegionMetaAction) -> Result<ManifestVersion> { + self.inner.save(&action).await + } + + async fn load(&self) -> Result<Option<RegionManifestData>> { + let last_version = self.inner.last_version(); + + let start_bound = if last_version == MIN_VERSION { + // No actions have ever saved + MIN_VERSION + } else { + last_version - 1 + }; + + let mut iter = self.inner.scan(start_bound, MAX_VERSION).await?; + + match iter.next_action().await? { + Some((_v, RegionMetaAction::Change(c))) => Ok(Some(RegionManifestData { + region_meta: c.metadata, + })), + Some(_) => todo!(), + None => Ok(None), + } + } + + async fn checkpoint(&self) -> Result<ManifestVersion> { + unimplemented!(); + } + + fn metadata_id(&self) -> RegionId { + self.inner.region_id + } +} + +struct RegionManifestInner { + region_id: RegionId, + store: Arc<ManifestObjectStore>, + version: AtomicU64, +} + +struct RegionMetaActionIterator { + log_iter: ObjectStoreLogIterator, +} + +impl RegionMetaActionIterator { + async fn next_action(&mut self) -> Result<Option<(ManifestVersion, RegionMetaAction)>> { + match self.log_iter.next_log().await? { + Some((v, bytes)) => { + let action: RegionMetaAction = RegionMetaAction::decode(&bytes)?; + Ok(Some((v, action))) + } + None => Ok(None), + } + } +} + +impl RegionManifestInner { + fn new(region_id: RegionId, manifest_dir: &str, object_store: ObjectStore) -> Self { + Self { + region_id, + store: Arc::new(ManifestObjectStore::new(manifest_dir, object_store)), + // TODO(dennis): recover the last version from history + version: AtomicU64::new(0), + } + } + + #[inline] + fn inc_version(&self) -> ManifestVersion { + self.version.fetch_add(1, Ordering::Relaxed) + } + + #[inline] + fn last_version(&self) -> ManifestVersion { + self.version.load(Ordering::Relaxed) + } + + async fn save(&self, action: &RegionMetaAction) -> Result<ManifestVersion> { + let version = self.inc_version(); + + logging::debug!( + "Save region metadata action: {:?}, version: {}", + action, + version + ); + + self.store.save(version, &action.encode()?).await?; + + Ok(version) + } + + async fn scan( + &self, + start: ManifestVersion, + end: ManifestVersion, + ) -> Result<RegionMetaActionIterator> { + Ok(RegionMetaActionIterator { + log_iter: self.store.scan(start, end).await?, + }) + } +} + +#[cfg(test)] +mod tests { + use datatypes::type_id::LogicalTypeId; + use object_store::{backend::fs, ObjectStore}; + use tempdir::TempDir; + + use super::*; + use crate::metadata::RegionMetadata; + use crate::test_util::descriptor_util::RegionDescBuilder; + + #[tokio::test] + async fn test_region_manifest() { + common_telemetry::init_default_ut_logging(); + let tmp_dir = TempDir::new("test_region_manifest").unwrap(); + let object_store = ObjectStore::new( + fs::Backend::build() + .root(&tmp_dir.path().to_string_lossy()) + .finish() + .await + .unwrap(), + ); + let region_id = 0; + + let manifest = RegionManifest::new(region_id, "/manifest/", object_store); + assert_eq!(region_id, manifest.metadata_id()); + + let region_name = "region-0"; + let desc = RegionDescBuilder::new(region_name) + .id(region_id) + .push_key_column(("k1", LogicalTypeId::Int32, false)) + .push_value_column(("v1", LogicalTypeId::Float32, true)) + .build(); + let metadata: RegionMetadata = desc.try_into().unwrap(); + let region_meta = Arc::new(metadata); + + assert!(manifest.load().await.unwrap().is_none()); + + manifest + .update(RegionMetaAction::Change(RegionChange { + metadata: region_meta.clone(), + })) + .await + .unwrap(); + + let manifest_data = manifest.load().await.unwrap().unwrap(); + assert_eq!(manifest_data.region_meta, region_meta); + + // save another metadata + let region_name = "region-0"; + let desc = RegionDescBuilder::new(region_name) + .id(region_id) + .push_key_column(("k1", LogicalTypeId::Int32, false)) + .push_key_column(("k2", LogicalTypeId::Int64, false)) + .push_value_column(("v1", LogicalTypeId::Float32, true)) + .push_value_column(("bool", LogicalTypeId::Boolean, true)) + .build(); + let metadata: RegionMetadata = desc.try_into().unwrap(); + let region_meta = Arc::new(metadata); + manifest + .update(RegionMetaAction::Change(RegionChange { + metadata: region_meta.clone(), + })) + .await + .unwrap(); + + let manifest_data = manifest.load().await.unwrap().unwrap(); + assert_eq!(manifest_data.region_meta, region_meta); + } +} diff --git a/src/storage/src/manifest/storage.rs b/src/storage/src/manifest/storage.rs new file mode 100644 index 000000000000..38936d3908b4 --- /dev/null +++ b/src/storage/src/manifest/storage.rs @@ -0,0 +1,330 @@ +use std::collections::HashMap; +use std::iter::Iterator; + +use async_trait::async_trait; +use common_telemetry::logging; +use futures::TryStreamExt; +use lazy_static::lazy_static; +use object_store::{util, DirEntry, ObjectStore}; +use regex::Regex; +use serde::{Deserialize, Serialize}; +use snafu::{ensure, ResultExt}; +use store_api::manifest::{LogIterator, ManifestLogStorage, ManifestVersion}; + +use crate::error::{ + DecodeJsonSnafu, DeleteObjectSnafu, EncodeJsonSnafu, Error, InvalidScanIndexSnafu, + ListObjectsSnafu, ReadObjectSnafu, Result, Utf8Snafu, WriteObjectSnafu, +}; + +lazy_static! { + static ref RE: Regex = Regex::new("^\\d+\\.json$").unwrap(); +} + +const LAST_CHECKPOINT_FILE: &str = "_last_checkpoint"; + +#[inline] +pub fn delta_file(version: ManifestVersion) -> String { + format!("{:020}.json", version) +} + +#[inline] +pub fn checkpoint_file(version: ManifestVersion) -> String { + format!("{:020}.checkpoint", version) +} + +/// Return's the delta file version from path +/// +/// # Panics +/// Panics if the file path is not a valid delta file. +#[inline] +pub fn delta_version(path: &str) -> ManifestVersion { + let s = path.split('.').next().unwrap(); + s.parse() + .unwrap_or_else(|_| panic!("Invalid delta file: {}", path)) +} + +#[inline] +pub fn is_delta_file(file_name: &str) -> bool { + RE.is_match(file_name) +} + +pub struct ObjectStoreLogIterator { + iter: Box<dyn Iterator<Item = (ManifestVersion, DirEntry)> + Send + Sync>, +} + +#[async_trait] +impl LogIterator for ObjectStoreLogIterator { + type Error = Error; + + async fn next_log(&mut self) -> Result<Option<(ManifestVersion, Vec<u8>)>> { + match self.iter.next() { + Some((v, e)) => { + let object = e.into_object(); + let bytes = object.read().await.context(ReadObjectSnafu { + path: object.path(), + })?; + + Ok(Some((v, bytes))) + } + None => Ok(None), + } + } +} + +#[derive(Clone, Debug)] +pub struct ManifestObjectStore { + object_store: ObjectStore, + path: String, +} + +impl ManifestObjectStore { + pub fn new(path: &str, object_store: ObjectStore) -> Self { + Self { + object_store, + path: util::normalize_dir(path), + } + } + + fn delta_file_path(&self, version: ManifestVersion) -> String { + format!("{}{}", self.path, delta_file(version)) + } + + fn checkpoint_file_path(&self, version: ManifestVersion) -> String { + format!("{}{}", self.path, checkpoint_file(version)) + } +} + +#[derive(Serialize, Deserialize, Debug)] +struct CheckpointMetadata { + pub size: usize, + pub version: ManifestVersion, + pub checksum: Option<String>, + pub extend_metadata: Option<HashMap<String, String>>, +} + +impl CheckpointMetadata { + fn encode(&self) -> Result<impl AsRef<[u8]>> { + serde_json::to_string(self).context(EncodeJsonSnafu) + } + + fn decode(bs: &[u8]) -> Result<Self> { + let data = std::str::from_utf8(bs).context(Utf8Snafu)?; + + serde_json::from_str(data).context(DecodeJsonSnafu) + } +} + +#[async_trait] +impl ManifestLogStorage for ManifestObjectStore { + type Error = Error; + type Iter = ObjectStoreLogIterator; + + async fn scan( + &self, + start: ManifestVersion, + end: ManifestVersion, + ) -> Result<ObjectStoreLogIterator> { + ensure!(start <= end, InvalidScanIndexSnafu { start, end }); + + let dir = self.object_store.object(&self.path); + let dir_exists = dir + .is_exist() + .await + .context(ReadObjectSnafu { path: &self.path })?; + + if !dir_exists { + return Ok(ObjectStoreLogIterator { + iter: Box::new(Vec::default().into_iter()), + }); + } + + let streamer = dir + .list() + .await + .context(ListObjectsSnafu { path: &self.path })?; + + let mut entries: Vec<(ManifestVersion, DirEntry)> = streamer + .try_filter_map(|e| async move { + let file_name = e.name(); + if is_delta_file(file_name) { + let version = delta_version(file_name); + if version >= start && version < end { + Ok(Some((version, e))) + } else { + Ok(None) + } + } else { + Ok(None) + } + }) + .try_collect::<Vec<_>>() + .await + .context(ListObjectsSnafu { path: &self.path })?; + + entries.sort_unstable_by(|(v1, _), (v2, _)| v1.cmp(v2)); + + Ok(ObjectStoreLogIterator { + iter: Box::new(entries.into_iter()), + }) + } + + async fn save(&self, version: ManifestVersion, bytes: &[u8]) -> Result<()> { + let object = self.object_store.object(&self.delta_file_path(version)); + object.write(bytes).await.context(WriteObjectSnafu { + path: object.path(), + })?; + + Ok(()) + } + + async fn delete(&self, start: ManifestVersion, end: ManifestVersion) -> Result<()> { + //TODO(dennis): delete in batch or concurrently? + for v in start..end { + let object = self.object_store.object(&self.delta_file_path(v)); + object.delete().await.context(DeleteObjectSnafu { + path: object.path(), + })?; + } + + Ok(()) + } + + async fn save_checkpoint(&self, version: ManifestVersion, bytes: &[u8]) -> Result<()> { + let object = self + .object_store + .object(&self.checkpoint_file_path(version)); + object.write(bytes).await.context(WriteObjectSnafu { + path: object.path(), + })?; + + let last_checkpoint = self + .object_store + .object(&format!("{}{}", self.path, LAST_CHECKPOINT_FILE)); + + let checkpoint_metadata = CheckpointMetadata { + size: bytes.len(), + version, + checksum: None, + extend_metadata: None, + }; + + logging::debug!( + "Save checkpoint in path: {}, metadata: {:?}", + last_checkpoint.path(), + checkpoint_metadata + ); + + let bs = checkpoint_metadata.encode()?; + last_checkpoint.write(bs).await.context(WriteObjectSnafu { + path: last_checkpoint.path(), + })?; + + Ok(()) + } + + async fn load_checkpoint(&self) -> Result<Option<(ManifestVersion, Vec<u8>)>> { + let last_checkpoint = self + .object_store + .object(&format!("{}{}", self.path, LAST_CHECKPOINT_FILE)); + + let checkpoint_exists = last_checkpoint.is_exist().await.context(ReadObjectSnafu { + path: last_checkpoint.path(), + })?; + + if checkpoint_exists { + let bytes = last_checkpoint.read().await.context(ReadObjectSnafu { + path: last_checkpoint.path(), + })?; + + let checkpoint_metadata = CheckpointMetadata::decode(&bytes)?; + + logging::debug!( + "Load checkpoint in path: {}, metadata: {:?}", + last_checkpoint.path(), + checkpoint_metadata + ); + + let checkpoint = self + .object_store + .object(&self.checkpoint_file_path(checkpoint_metadata.version)); + + Ok(Some(( + checkpoint_metadata.version, + checkpoint.read().await.context(ReadObjectSnafu { + path: checkpoint.path(), + })?, + ))) + } else { + Ok(None) + } + } +} + +#[cfg(test)] +mod tests { + use object_store::{backend::fs, ObjectStore}; + use tempdir::TempDir; + + use super::*; + + #[tokio::test] + async fn test_manifest_log_store() { + common_telemetry::init_default_ut_logging(); + let tmp_dir = TempDir::new("test_manifest_log_store").unwrap(); + let object_store = ObjectStore::new( + fs::Backend::build() + .root(&tmp_dir.path().to_string_lossy()) + .finish() + .await + .unwrap(), + ); + + let log_store = ManifestObjectStore::new("/", object_store); + + for v in 0..5 { + log_store + .save(v, format!("hello, {}", v).as_bytes()) + .await + .unwrap(); + } + + let mut it = log_store.scan(1, 4).await.unwrap(); + for v in 1..4 { + let (version, bytes) = it.next_log().await.unwrap().unwrap(); + assert_eq!(v, version); + assert_eq!(format!("hello, {}", v).as_bytes(), bytes); + } + assert!(it.next_log().await.unwrap().is_none()); + + let mut it = log_store.scan(0, 11).await.unwrap(); + for v in 0..5 { + let (version, bytes) = it.next_log().await.unwrap().unwrap(); + assert_eq!(v, version); + assert_eq!(format!("hello, {}", v).as_bytes(), bytes); + } + assert!(it.next_log().await.unwrap().is_none()); + + // Delete [0, 3) + log_store.delete(0, 3).await.unwrap(); + + // [3, 5) remains + let mut it = log_store.scan(0, 11).await.unwrap(); + for v in 3..5 { + let (version, bytes) = it.next_log().await.unwrap().unwrap(); + assert_eq!(v, version); + assert_eq!(format!("hello, {}", v).as_bytes(), bytes); + } + assert!(it.next_log().await.unwrap().is_none()); + + // test checkpoint + assert!(log_store.load_checkpoint().await.unwrap().is_none()); + log_store + .save_checkpoint(3, "checkpoint".as_bytes()) + .await + .unwrap(); + + let (v, checkpoint) = log_store.load_checkpoint().await.unwrap().unwrap(); + assert_eq!(checkpoint, "checkpoint".as_bytes()); + assert_eq!(3, v); + } +} diff --git a/src/storage/src/memtable.rs b/src/storage/src/memtable.rs index 99de24d3634e..0ba18611a968 100644 --- a/src/storage/src/memtable.rs +++ b/src/storage/src/memtable.rs @@ -2,22 +2,27 @@ mod btree; mod inserter; mod schema; #[cfg(test)] -mod tests; +pub mod tests; +mod version; -use std::mem; use std::sync::Arc; use datatypes::vectors::{UInt64Vector, UInt8Vector, VectorRef}; -use snafu::Snafu; use store_api::storage::{consts, SequenceNumber, ValueType}; use crate::error::Result; use crate::memtable::btree::BTreeMemtable; pub use crate::memtable::inserter::Inserter; pub use crate::memtable::schema::MemtableSchema; +pub use crate::memtable::version::{MemtableSet, MemtableVersion}; + +/// Unique id for memtables under same region. +pub type MemtableId = u32; /// In memory storage. -pub trait Memtable: Send + Sync { +pub trait Memtable: Send + Sync + std::fmt::Debug { + fn id(&self) -> MemtableId; + fn schema(&self) -> &MemtableSchema; /// Write key/values to the memtable. @@ -27,7 +32,7 @@ pub trait Memtable: Send + Sync { fn write(&self, kvs: &KeyValues) -> Result<()>; /// Iterates the memtable. - // TODO(yingwen): Consider passing a projector (does column projection). + // TODO(yingwen): 1. Use reference of IterContext? 2. Consider passing a projector (does column projection). fn iter(&self, ctx: IterContext) -> Result<BatchIteratorPtr>; /// Returns the estimated bytes allocated by this memtable from heap. @@ -43,6 +48,11 @@ pub struct IterContext { pub batch_size: usize, /// Max visible sequence (inclusive). pub visible_sequence: SequenceNumber, + + // TODO(yingwen): [flush] Maybe delay deduping and visiblility handling, just returns all rows + // in memtable. + /// Returns all rows, ignores sequence visibility and key duplication. + pub for_flush: bool, } impl Default for IterContext { @@ -51,6 +61,7 @@ impl Default for IterContext { batch_size: consts::READ_BATCH_SIZE, // All data in memory is visible by default. visible_sequence: SequenceNumber::MAX, + for_flush: false, } } } @@ -65,6 +76,7 @@ pub enum RowOrdering { Key, } +// TODO(yingwen): Maybe pack value_type with sequence (reserve 8bits in u64 for value type) like RocksDB. pub struct Batch { pub keys: Vec<VectorRef>, pub sequences: UInt64Vector, @@ -73,24 +85,18 @@ pub struct Batch { } /// Iterator of memtable. -pub trait BatchIterator: Send { +pub trait BatchIterator: Iterator<Item = Result<Batch>> + Send + Sync { /// Returns the schema of this iterator. fn schema(&self) -> &MemtableSchema; /// Returns the ordering of the output rows from this iterator. fn ordering(&self) -> RowOrdering; - - /// Fetch next batch from the memtable. - /// - /// # Panics - /// Panics if the iterator has already been exhausted. - fn next(&mut self) -> Result<Option<Batch>>; } pub type BatchIteratorPtr = Box<dyn BatchIterator>; pub trait MemtableBuilder: Send + Sync { - fn build(&self, schema: MemtableSchema) -> MemtableRef; + fn build(&self, id: MemtableId, schema: MemtableSchema) -> MemtableRef; } pub type MemtableBuilderRef = Arc<dyn MemtableBuilder>; @@ -100,7 +106,8 @@ pub type MemtableBuilderRef = Arc<dyn MemtableBuilder>; pub struct KeyValues { pub sequence: SequenceNumber, pub value_type: ValueType, - /// Start index of these key-value paris in batch. + /// Start index of these key-value paris in batch. Each row in the same batch has + /// a unique index to identify it. pub start_index_in_batch: usize, pub keys: Vec<VectorRef>, pub values: Vec<VectorRef>, @@ -132,42 +139,7 @@ impl KeyValues { pub struct DefaultMemtableBuilder {} impl MemtableBuilder for DefaultMemtableBuilder { - fn build(&self, schema: MemtableSchema) -> MemtableRef { - Arc::new(BTreeMemtable::new(schema)) - } -} - -#[derive(Debug, Snafu)] -#[snafu(display("Fail to switch memtable"))] -pub struct SwitchError; - -pub struct MemtableSet { - mem: MemtableRef, - // TODO(yingwen): Support multiple immutable memtables. - _immem: Option<MemtableRef>, -} - -impl MemtableSet { - pub fn new(mem: MemtableRef) -> MemtableSet { - MemtableSet { mem, _immem: None } - } - - pub fn mutable_memtable(&self) -> &MemtableRef { - &self.mem - } - - /// Switch mutable memtable to immutable memtable, returns the old mutable memtable if success. - pub fn _switch_memtable( - &mut self, - mem: &MemtableRef, - ) -> std::result::Result<MemtableRef, SwitchError> { - match &self._immem { - Some(_) => SwitchSnafu {}.fail(), - None => { - let old_mem = mem::replace(&mut self.mem, mem.clone()); - self._immem = Some(old_mem.clone()); - Ok(old_mem) - } - } + fn build(&self, id: MemtableId, schema: MemtableSchema) -> MemtableRef { + Arc::new(BTreeMemtable::new(id, schema)) } } diff --git a/src/storage/src/memtable/btree.rs b/src/storage/src/memtable/btree.rs index e81ba4856311..4523a4730899 100644 --- a/src/storage/src/memtable/btree.rs +++ b/src/storage/src/memtable/btree.rs @@ -8,13 +8,15 @@ use std::sync::{ use datatypes::prelude::*; use datatypes::value::Value; -use datatypes::vectors::{UInt64VectorBuilder, UInt8VectorBuilder, VectorBuilder}; +use datatypes::vectors::{ + UInt64Vector, UInt64VectorBuilder, UInt8Vector, UInt8VectorBuilder, VectorBuilder, +}; use store_api::storage::{SequenceNumber, ValueType}; use crate::error::Result; use crate::memtable::{ - Batch, BatchIterator, BatchIteratorPtr, IterContext, KeyValues, Memtable, MemtableSchema, - RowOrdering, + Batch, BatchIterator, BatchIteratorPtr, IterContext, KeyValues, Memtable, MemtableId, + MemtableSchema, RowOrdering, }; type RwLockMap = RwLock<BTreeMap<InnerKey, RowValue>>; @@ -22,15 +24,18 @@ type RwLockMap = RwLock<BTreeMap<InnerKey, RowValue>>; /// A simple memtable implementation based on std's [`BTreeMap`]. /// /// Mainly for test purpose, don't use in production. +#[derive(Debug)] pub struct BTreeMemtable { + id: MemtableId, schema: MemtableSchema, map: Arc<RwLockMap>, estimated_bytes: AtomicUsize, } impl BTreeMemtable { - pub fn new(schema: MemtableSchema) -> BTreeMemtable { + pub fn new(id: MemtableId, schema: MemtableSchema) -> BTreeMemtable { BTreeMemtable { + id, schema, map: Arc::new(RwLock::new(BTreeMap::new())), estimated_bytes: AtomicUsize::new(0), @@ -39,6 +44,10 @@ impl BTreeMemtable { } impl Memtable for BTreeMemtable { + fn id(&self) -> MemtableId { + self.id + } + fn schema(&self) -> &MemtableSchema { &self.schema } @@ -84,9 +93,13 @@ impl BatchIterator for BTreeIterator { fn ordering(&self) -> RowOrdering { RowOrdering::Key } +} - fn next(&mut self) -> Result<Option<Batch>> { - Ok(self.next_batch()) +impl Iterator for BTreeIterator { + type Item = Result<Batch>; + + fn next(&mut self) -> Option<Result<Batch>> { + self.next_batch().map(Ok) } } @@ -107,18 +120,13 @@ impl BTreeIterator { } else { map.range(..) }; - let iter = MapIterWrapper::new(iter, self.ctx.visible_sequence); - - let mut keys = Vec::with_capacity(self.ctx.batch_size); - let mut sequences = UInt64VectorBuilder::with_capacity(self.ctx.batch_size); - let mut value_types = UInt8VectorBuilder::with_capacity(self.ctx.batch_size); - let mut values = Vec::with_capacity(self.ctx.batch_size); - for (inner_key, row_value) in iter.take(self.ctx.batch_size) { - keys.push(inner_key); - sequences.push(Some(inner_key.sequence)); - value_types.push(Some(inner_key.value_type.as_u8())); - values.push(row_value); - } + + let (keys, sequences, value_types, values) = if self.ctx.for_flush { + collect_iter(iter, self.ctx.batch_size) + } else { + let iter = MapIterWrapper::new(iter, self.ctx.visible_sequence); + collect_iter(iter, self.ctx.batch_size) + }; if keys.is_empty() { return None; @@ -140,14 +148,37 @@ impl BTreeIterator { Some(Batch { keys: rows_to_vectors(key_data_types, keys.as_slice()), - sequences: sequences.finish(), - value_types: value_types.finish(), + sequences, + value_types, values: rows_to_vectors(value_data_types, values.as_slice()), }) } } -/// `MapIterWrapper` removes same user key with elder sequence. +fn collect_iter<'a, I: Iterator<Item = (&'a InnerKey, &'a RowValue)>>( + iter: I, + batch_size: usize, +) -> ( + Vec<&'a InnerKey>, + UInt64Vector, + UInt8Vector, + Vec<&'a RowValue>, +) { + let mut keys = Vec::with_capacity(batch_size); + let mut sequences = UInt64VectorBuilder::with_capacity(batch_size); + let mut value_types = UInt8VectorBuilder::with_capacity(batch_size); + let mut values = Vec::with_capacity(batch_size); + for (inner_key, row_value) in iter.take(batch_size) { + keys.push(inner_key); + sequences.push(Some(inner_key.sequence)); + value_types.push(Some(inner_key.value_type.as_u8())); + values.push(row_value); + } + + (keys, sequences.finish(), value_types.finish(), values) +} + +/// `MapIterWrapper` removes same user key with invisible sequence. struct MapIterWrapper<'a, InnerKey, RowValue> { iter: btree_map::Range<'a, InnerKey, RowValue>, prev_key: Option<InnerKey>, diff --git a/src/storage/src/memtable/inserter.rs b/src/storage/src/memtable/inserter.rs index 851d758a52a9..a54680615d80 100644 --- a/src/storage/src/memtable/inserter.rs +++ b/src/storage/src/memtable/inserter.rs @@ -1,51 +1,80 @@ +use std::collections::HashMap; use std::sync::Arc; +use std::time::Duration; -use datatypes::vectors::{NullVector, VectorRef}; -use snafu::ensure; +use common_time::{RangeMillis, TimestampMillis}; +use datatypes::prelude::ScalarVector; +use datatypes::schema::SchemaRef; +use datatypes::vectors::{Int64Vector, NullVector, VectorRef}; +use snafu::{ensure, OptionExt}; use store_api::storage::{ColumnDescriptor, SequenceNumber, ValueType}; use crate::error::{self, Result}; -use crate::memtable::{KeyValues, Memtable}; +use crate::memtable::{KeyValues, Memtable, MemtableSet}; use crate::write_batch::{Mutation, PutData, WriteBatch}; +type RangeIndexMap = HashMap<TimestampMillis, usize>; + /// Wraps logic of inserting key/values in [WriteBatch] to [Memtable]. pub struct Inserter { /// Sequence of the batch to be inserted. sequence: SequenceNumber, + /// Time ranges of all input data. + time_ranges: Vec<RangeMillis>, + /// Map time range's start time to its index in time ranges. + time_range_indexes: RangeIndexMap, + /// Bucket duration of memtables. + bucket_duration: Duration, + /// Used to calculate the start index in batch for `KeyValues`. index_in_batch: usize, } impl Inserter { - pub fn new(sequence: SequenceNumber) -> Inserter { + pub fn new( + sequence: SequenceNumber, + time_ranges: Vec<RangeMillis>, + bucket_duration: Duration, + ) -> Inserter { + let time_range_indexes = new_range_index_map(&time_ranges); + Inserter { sequence, + time_ranges, + time_range_indexes, + bucket_duration, index_in_batch: 0, } } // TODO(yingwen): Can we take the WriteBatch? - /// Insert write batch into memtable. + /// Insert write batch into memtables if both `batch` and `memtables` are not empty. /// - /// Won't do schema validation. - pub fn insert_memtable(&mut self, batch: &WriteBatch, memtable: &dyn Memtable) -> Result<()> { - if batch.is_empty() { + /// Won't do schema validation, caller (mostly the [`RegionWriter`]) should ensure the + /// schemas of `memtables` are consistent with `batch`'s, and the time ranges of `memtables` + /// are consistent with `self`'s time ranges. + /// + /// # Panics + /// Panics if there is time range in `self.time_ranges` but not in `memtables`. + pub fn insert_memtables(&mut self, batch: &WriteBatch, memtables: &MemtableSet) -> Result<()> { + if batch.is_empty() || memtables.is_empty() { return Ok(()); } - let schema = memtable.schema(); + // Enough to hold all key or value columns. + let total_column_num = batch.schema().num_columns(); // Reusable KeyValues buffer. let mut kvs = KeyValues { sequence: self.sequence, value_type: ValueType::Put, start_index_in_batch: self.index_in_batch, - keys: Vec::with_capacity(schema.num_row_key_columns()), - values: Vec::with_capacity(schema.num_value_columns()), + keys: Vec::with_capacity(total_column_num), + values: Vec::with_capacity(total_column_num), }; for mutation in batch { match mutation { Mutation::Put(put_data) => { - self.put_impl(put_data, memtable, &mut kvs)?; + self.put_memtables(batch.schema(), put_data, memtables, &mut kvs)?; } } } @@ -53,7 +82,24 @@ impl Inserter { Ok(()) } - fn put_impl( + fn put_memtables( + &mut self, + schema: &SchemaRef, + put_data: &PutData, + memtables: &MemtableSet, + kvs: &mut KeyValues, + ) -> Result<()> { + if memtables.len() == 1 { + // Fast path, only one memtable to put. + let (_range, memtable) = memtables.iter().next().unwrap(); + return self.put_one_memtable(put_data, &**memtable, kvs); + } + + // Split data by time range and put them into memtables. + self.put_multiple_memtables(schema, put_data, memtables, kvs) + } + + fn put_one_memtable( &mut self, put_data: &PutData, memtable: &dyn Memtable, @@ -78,6 +124,52 @@ impl Inserter { Ok(()) } + + /// Put data to multiple memtables. + fn put_multiple_memtables( + &mut self, + schema: &SchemaRef, + put_data: &PutData, + memtables: &MemtableSet, + kvs: &mut KeyValues, + ) -> Result<()> { + let timestamp_schema = schema + .timestamp_column() + .context(error::BatchMissingTimestampSnafu)?; + + let timestamps = put_data.column_by_name(&timestamp_schema.name).context( + error::BatchMissingColumnSnafu { + column: &timestamp_schema.name, + }, + )?; + let timestamps = timestamps + .as_any() + .downcast_ref() + .context(error::BatchMissingTimestampSnafu)?; + let slice_indexes = + compute_slice_indexes(timestamps, self.bucket_duration, &self.time_range_indexes); + + for slice_index in slice_indexes { + let sliced_data = put_data.slice(slice_index.start, slice_index.end); + let range = &self.time_ranges[slice_index.range_index]; + // The caller should ensure memtable for given time range is exists. + let memtable = memtables + .get_by_range(range) + .expect("Memtable not found for range"); + + self.put_one_memtable(&sliced_data, &**memtable, kvs)?; + } + + Ok(()) + } +} + +fn new_range_index_map(time_ranges: &[RangeMillis]) -> RangeIndexMap { + time_ranges + .iter() + .enumerate() + .map(|(i, range)| (*range.start(), i)) + .collect() } fn clone_put_data_column_to( @@ -100,3 +192,519 @@ fn clone_put_data_column_to( Ok(()) } + +/// Holds `start` and `end` indexes to get a slice `[start, end)` from the vector whose +/// timestamps belong to same time range at `range_index`. +#[derive(Debug, PartialEq)] +struct SliceIndex { + start: usize, + end: usize, + /// Index in time ranges. + range_index: usize, +} + +/// Computes the indexes used to split timestamps into time ranges aligned by `duration`, stores +/// the indexes in [`SliceIndex`]. +/// +/// # Panics +/// Panics if the duration is too large to be represented by i64, or `timestamps` are not all +/// included by `time_range_indexes`. +fn compute_slice_indexes( + timestamps: &Int64Vector, + duration: Duration, + time_range_indexes: &RangeIndexMap, +) -> Vec<SliceIndex> { + let duration_ms = duration + .as_millis() + .try_into() + .unwrap_or_else(|e| panic!("Duration {:?} too large, {}", duration, e)); + let mut slice_indexes = Vec::with_capacity(time_range_indexes.len()); + // Current start and end of a valid `SliceIndex`. + let (mut start, mut end) = (0, 0); + // Time range index of the valid but unpushed `SliceIndex`. + let mut last_range_index = None; + + // Iterate all timestamps, split timestamps by its time range. + for (i, ts) in timestamps.iter_data().enumerate() { + // Find index for time range of the timestamp. + let current_range_index = ts + .and_then(|v| TimestampMillis::new(v).align_by_bucket(duration_ms)) + .and_then(|aligned| time_range_indexes.get(&aligned).copied()); + + match current_range_index { + Some(current_range_index) => { + end = i; + + match last_range_index { + Some(last_index) => { + if last_index != current_range_index { + // Found a new range, we need to push a SliceIndex for last range. + slice_indexes.push(SliceIndex { + start, + end, + range_index: last_index, + }); + // Update last range index. + last_range_index = Some(current_range_index); + // Advance start. + start = i; + } + } + // No previous range index. + None => last_range_index = Some(current_range_index), + } + } + None => { + // Row without timestamp or out of time range will be skipped. This usually should not happen. + if let Some(last_index) = last_range_index { + // Need to store SliceIndex for last range. + slice_indexes.push(SliceIndex { + start, + end: i, + range_index: last_index, + }); + // Clear last range index. + last_range_index = None; + } + + // Advances start and end, skips current row. + start = i + 1; + end = start; + } + } + } + + // Process last slice index. + if let Some(last_index) = last_range_index { + slice_indexes.push(SliceIndex { + start, + // We need to use `end + 1` to include the last element. + end: end + 1, + range_index: last_index, + }); + } + + slice_indexes +} + +#[cfg(test)] +mod tests { + use datatypes::{type_id::LogicalTypeId, value::Value}; + use store_api::storage::{PutOperation, WriteRequest}; + + use super::*; + use crate::memtable::{ + DefaultMemtableBuilder, IterContext, MemtableBuilder, MemtableId, MemtableSchema, + }; + use crate::metadata::RegionMetadata; + use crate::test_util::descriptor_util::RegionDescBuilder; + use crate::test_util::write_batch_util; + + fn new_time_ranges(starts: &[i64], duration: i64) -> Vec<RangeMillis> { + let mut ranges = Vec::with_capacity(starts.len()); + for start in starts { + assert_eq!(*start, start / duration * duration); + + ranges.push(RangeMillis::new(*start, start + duration).unwrap()); + } + + ranges + } + + fn check_compute_slice_indexes( + timestamps: &[Option<i64>], + range_starts: &[i64], + duration: i64, + expect: &[SliceIndex], + ) { + assert!(duration > 0); + + let timestamps = Int64Vector::from_iter(timestamps.iter()); + let time_ranges = new_time_ranges(range_starts, duration); + let time_range_indexes = new_range_index_map(&time_ranges); + + let slice_indexes = compute_slice_indexes( + &timestamps, + Duration::from_millis(duration as u64), + &time_range_indexes, + ); + + assert_eq!(expect, slice_indexes); + } + + #[test] + fn test_compute_slice_indexes_valid() { + // Test empty input. + check_compute_slice_indexes(&[], &[], 100, &[]); + + // One valid input. + check_compute_slice_indexes( + &[Some(99)], + &[0], + 100, + &[SliceIndex { + start: 0, + end: 1, + range_index: 0, + }], + ); + + // 2 ranges. + check_compute_slice_indexes( + &[Some(99), Some(234)], + &[0, 200], + 100, + &[ + SliceIndex { + start: 0, + end: 1, + range_index: 0, + }, + SliceIndex { + start: 1, + end: 2, + range_index: 1, + }, + ], + ); + + // Multiple elements in first range. + check_compute_slice_indexes( + &[Some(99), Some(13), Some(18), Some(234)], + &[0, 200], + 100, + &[ + SliceIndex { + start: 0, + end: 3, + range_index: 0, + }, + SliceIndex { + start: 3, + end: 4, + range_index: 1, + }, + ], + ); + + // Multiple elements in last range. + check_compute_slice_indexes( + &[Some(99), Some(234), Some(271)], + &[0, 200], + 100, + &[ + SliceIndex { + start: 0, + end: 1, + range_index: 0, + }, + SliceIndex { + start: 1, + end: 3, + range_index: 1, + }, + ], + ); + + // Mulitple ranges. + check_compute_slice_indexes( + &[Some(99), Some(13), Some(234), Some(456)], + &[0, 200, 400], + 100, + &[ + SliceIndex { + start: 0, + end: 2, + range_index: 0, + }, + SliceIndex { + start: 2, + end: 3, + range_index: 1, + }, + SliceIndex { + start: 3, + end: 4, + range_index: 2, + }, + ], + ); + + // Different slices with same range. + check_compute_slice_indexes( + &[Some(99), Some(234), Some(15)], + &[0, 200], + 100, + &[ + SliceIndex { + start: 0, + end: 1, + range_index: 0, + }, + SliceIndex { + start: 1, + end: 2, + range_index: 1, + }, + SliceIndex { + start: 2, + end: 3, + range_index: 0, + }, + ], + ); + } + + #[test] + fn test_compute_slice_indexes_null_timestamp() { + check_compute_slice_indexes(&[None], &[0], 100, &[]); + + check_compute_slice_indexes( + &[None, None, Some(53)], + &[0], + 100, + &[SliceIndex { + start: 2, + end: 3, + range_index: 0, + }], + ); + + check_compute_slice_indexes( + &[Some(53), None, None], + &[0], + 100, + &[SliceIndex { + start: 0, + end: 1, + range_index: 0, + }], + ); + + check_compute_slice_indexes( + &[None, Some(53), None, Some(240), Some(13), None], + &[0, 200], + 100, + &[ + SliceIndex { + start: 1, + end: 2, + range_index: 0, + }, + SliceIndex { + start: 3, + end: 4, + range_index: 1, + }, + SliceIndex { + start: 4, + end: 5, + range_index: 0, + }, + ], + ); + } + + #[test] + fn test_compute_slice_indexes_no_range() { + check_compute_slice_indexes( + &[Some(99), Some(234), Some(15)], + &[0], + 100, + &[ + SliceIndex { + start: 0, + end: 1, + range_index: 0, + }, + SliceIndex { + start: 2, + end: 3, + range_index: 0, + }, + ], + ); + + check_compute_slice_indexes( + &[Some(99), Some(15), Some(234)], + &[0], + 100, + &[SliceIndex { + start: 0, + end: 2, + range_index: 0, + }], + ); + + check_compute_slice_indexes( + &[Some(i64::MIN), Some(99), Some(15)], + &[0], + 100, + &[SliceIndex { + start: 1, + end: 3, + range_index: 0, + }], + ); + } + + fn new_test_write_batch() -> WriteBatch { + write_batch_util::new_write_batch( + &[ + ("ts", LogicalTypeId::Int64, false), + ("value", LogicalTypeId::Int64, true), + ], + Some(0), + ) + } + + fn new_memtable_schema() -> MemtableSchema { + let desc = RegionDescBuilder::new("test") + .timestamp(("ts", LogicalTypeId::Int64, false)) + .push_value_column(("value", LogicalTypeId::Int64, true)) + .enable_version_column(false) + .build(); + let metadata: RegionMetadata = desc.try_into().unwrap(); + + MemtableSchema::new(metadata.columns_row_key) + } + + fn put_batch(batch: &mut WriteBatch, data: &[(i64, Option<i64>)]) { + let mut put_data = PutData::with_num_columns(2); + let ts = Int64Vector::from_values(data.iter().map(|v| v.0)); + put_data.add_key_column("ts", Arc::new(ts)).unwrap(); + let value = Int64Vector::from_iter(data.iter().map(|v| v.1)); + put_data.add_value_column("value", Arc::new(value)).unwrap(); + + batch.put(put_data).unwrap(); + } + + fn new_memtable_set(time_ranges: &[RangeMillis], schema: &MemtableSchema) -> MemtableSet { + let mut set = MemtableSet::new(); + for (id, range) in time_ranges.iter().enumerate() { + let mem = DefaultMemtableBuilder {}.build(id as MemtableId, schema.clone()); + set.insert(*range, mem) + } + + set + } + + fn check_memtable_content( + mem: &dyn Memtable, + sequence: SequenceNumber, + data: &[(i64, Option<i64>)], + ) { + let iter = mem.iter(IterContext::default()).unwrap(); + + let mut index = 0; + for batch in iter { + let batch = batch.unwrap(); + let row_num = batch.keys[0].len(); + for i in 0..row_num { + let ts = batch.keys[0].get(i); + let v = batch.values[0].get(i); + assert_eq!(Value::from(data[index].0), ts); + assert_eq!(Value::from(data[index].1), v); + assert_eq!(sequence, batch.sequences.get_data(i).unwrap()); + + index += 1; + } + } + + assert_eq!(data.len(), index); + } + + #[test] + fn test_inserter_put_one_memtable() { + let sequence = 11111; + let bucket_duration = 100; + let time_ranges = new_time_ranges(&[0], bucket_duration); + let memtable_schema = new_memtable_schema(); + let memtables = new_memtable_set(&time_ranges, &memtable_schema); + let mut inserter = Inserter::new( + sequence, + time_ranges, + Duration::from_millis(bucket_duration as u64), + ); + + let mut batch = new_test_write_batch(); + put_batch(&mut batch, &[(1, Some(1)), (2, None)]); + // Also test multiple put data in one batch. + put_batch( + &mut batch, + &[ + (3, None), + // Duplicate entries in same put data. + (2, None), + (2, Some(2)), + (4, Some(4)), + ], + ); + + inserter.insert_memtables(&batch, &memtables).unwrap(); + let mem = memtables + .get_by_range(&RangeMillis::new(0, 100).unwrap()) + .unwrap(); + check_memtable_content( + &**mem, + sequence, + &[(1, Some(1)), (2, Some(2)), (3, None), (4, Some(4))], + ); + } + + #[test] + fn test_inserter_put_multiple() { + let sequence = 11111; + let bucket_duration = 100; + let time_ranges = new_time_ranges(&[0, 100, 200], bucket_duration); + let memtable_schema = new_memtable_schema(); + let memtables = new_memtable_set(&time_ranges, &memtable_schema); + let mut inserter = Inserter::new( + sequence, + time_ranges, + Duration::from_millis(bucket_duration as u64), + ); + + let mut batch = new_test_write_batch(); + put_batch( + &mut batch, + &[ + (1, Some(1)), + (2, None), + (201, Some(201)), + (102, None), + (101, Some(101)), + ], + ); + put_batch( + &mut batch, + &[ + (180, Some(1)), + (3, Some(3)), + (1, None), + (211, Some(211)), + (180, Some(180)), + ], + ); + + inserter.insert_memtables(&batch, &memtables).unwrap(); + let mem = memtables + .get_by_range(&RangeMillis::new(0, 100).unwrap()) + .unwrap(); + check_memtable_content(&**mem, sequence, &[(1, None), (2, None), (3, Some(3))]); + + let mem = memtables + .get_by_range(&RangeMillis::new(100, 200).unwrap()) + .unwrap(); + check_memtable_content( + &**mem, + sequence, + &[(101, Some(101)), (102, None), (180, Some(180))], + ); + + let mem = memtables + .get_by_range(&RangeMillis::new(200, 300).unwrap()) + .unwrap(); + check_memtable_content(&**mem, sequence, &[(201, Some(201)), (211, Some(211))]); + } +} diff --git a/src/storage/src/memtable/tests.rs b/src/storage/src/memtable/tests.rs index 564c68732afe..6f31213193f5 100644 --- a/src/storage/src/memtable/tests.rs +++ b/src/storage/src/memtable/tests.rs @@ -6,12 +6,16 @@ use super::*; use crate::metadata::RegionMetadata; use crate::test_util::descriptor_util::RegionDescBuilder; +// For simplicity, all memtables in test share same memtable id. +const MEMTABLE_ID: MemtableId = 1; + // Schema for testing memtable: // - key: Int64(timestamp), UInt64(version), // - value: UInt64 -fn schema_for_test() -> MemtableSchema { +pub fn schema_for_test() -> MemtableSchema { // Just build a region desc and use its columns_row_key metadata. let desc = RegionDescBuilder::new("test") + .enable_version_column(true) .push_value_column(("v1", LogicalTypeId::UInt64, true)) .build(); let metadata: RegionMetadata = desc.try_into().unwrap(); @@ -70,7 +74,7 @@ fn kvs_for_test( kvs_for_test_with_index(sequence, value_type, 0, keys, values) } -fn write_kvs( +pub fn write_kvs( memtable: &dyn Memtable, sequence: SequenceNumber, value_type: ValueType, @@ -100,7 +104,8 @@ fn check_iter_content( values: &[Option<u64>], ) { let mut index = 0; - while let Some(batch) = iter.next().unwrap() { + for batch in iter { + let batch = batch.unwrap(); check_batch_valid(&batch); let row_num = batch.keys[0].len(); @@ -147,7 +152,7 @@ impl MemtableTester { fn new_memtables(&self) -> Vec<MemtableRef> { self.builders .iter() - .map(|b| b.build(self.schema.clone())) + .map(|b| b.build(MEMTABLE_ID, self.schema.clone())) .collect() } @@ -174,7 +179,9 @@ struct TestContext { fn write_iter_memtable_case(ctx: &TestContext) { // Test iterating an empty memtable. let mut iter = ctx.memtable.iter(IterContext::default()).unwrap(); - assert!(iter.next().unwrap().is_none()); + assert!(iter.next().is_none()); + // Poll the empty iterator again. + assert!(iter.next().is_none()); assert_eq!(0, ctx.memtable.bytes_allocated()); // Init test data. @@ -262,7 +269,8 @@ fn test_write_iter_memtable() { fn check_iter_batch_size(iter: &mut dyn BatchIterator, total: usize, batch_size: usize) { let mut remains = total; - while let Some(batch) = iter.next().unwrap() { + for batch in iter { + let batch = batch.unwrap(); check_batch_valid(&batch); let row_num = batch.keys[0].len(); @@ -419,6 +427,7 @@ fn test_sequence_visibility() { let iter_ctx = IterContext { batch_size: 1, visible_sequence: 9, + for_flush: false, }; let mut iter = ctx.memtable.iter(iter_ctx).unwrap(); @@ -435,6 +444,7 @@ fn test_sequence_visibility() { let iter_ctx = IterContext { batch_size: 1, visible_sequence: 10, + for_flush: false, }; let mut iter = ctx.memtable.iter(iter_ctx).unwrap(); @@ -451,6 +461,7 @@ fn test_sequence_visibility() { let iter_ctx = IterContext { batch_size: 1, visible_sequence: 11, + for_flush: false, }; let mut iter = ctx.memtable.iter(iter_ctx).unwrap(); @@ -465,4 +476,26 @@ fn test_sequence_visibility() { }); } -// TODO(yingwen): Test key overwrite in same batch. +#[test] +fn test_iter_after_none() { + let tester = MemtableTester::default(); + tester.run_testcase(|ctx| { + write_kvs( + &*ctx.memtable, + 10, // sequence + ValueType::Put, + &[(1000, 0), (1001, 1), (1002, 2)], // keys + &[Some(0), Some(1), Some(2)], // values + ); + + let iter_ctx = IterContext { + batch_size: 4, + ..Default::default() + }; + + let mut iter = ctx.memtable.iter(iter_ctx).unwrap(); + assert!(iter.next().is_some()); + assert!(iter.next().is_none()); + assert!(iter.next().is_none()); + }); +} diff --git a/src/storage/src/memtable/version.rs b/src/storage/src/memtable/version.rs new file mode 100644 index 000000000000..9065e664dd7a --- /dev/null +++ b/src/storage/src/memtable/version.rs @@ -0,0 +1,415 @@ +use std::cmp::Ordering; +use std::collections::BTreeMap; +use std::sync::Arc; + +use common_time::RangeMillis; + +use crate::flush::MemtableWithMeta; +use crate::memtable::{MemtableId, MemtableRef}; + +/// A version of all memtables. +/// +/// This structure is immutable now. +#[derive(Default, Debug, PartialEq, Eq)] +pub struct MemtableVersion { + mutable: MemtableSet, + /// Immutable memtables. + immutables: Vec<MemtableSetRef>, +} + +impl MemtableVersion { + pub fn new() -> MemtableVersion { + MemtableVersion::default() + } + + #[inline] + pub fn mutable_memtables(&self) -> &MemtableSet { + &self.mutable + } + + #[inline] + pub fn immutable_memtables(&self) -> &[MemtableSetRef] { + &self.immutables + } + + pub fn num_memtables(&self) -> usize { + self.mutable.len() + self.immutables.iter().map(|set| set.len()).sum::<usize>() + } + + /// Clone current memtable version and freeze its mutable memtables, which moves + /// all mutable memtables to immutable memtable list. + pub fn freeze_mutable(&self) -> MemtableVersion { + let mut immutables = self.immutables.clone(); + immutables.push(Arc::new(self.mutable.clone())); + + MemtableVersion { + mutable: MemtableSet::new(), + immutables, + } + } + + pub fn mutable_bytes_allocated(&self) -> usize { + self.mutable.bytes_allocated() + } + + pub fn total_bytes_allocated(&self) -> usize { + self.immutables + .iter() + .map(|m| m.bytes_allocated()) + .sum::<usize>() + + self.mutable.bytes_allocated() + } + + /// Creates a new `MemtableVersion` that contains memtables both in this and `other`. + /// + /// # Panics + /// Panics if there are memtables with same time ranges. + pub fn add_mutable(&self, other: MemtableSet) -> MemtableVersion { + let mutable = self.mutable.add(other); + + Self { + mutable, + immutables: self.immutables.clone(), + } + } + + /// Creates a new `MemtableVersion` that removes immutable memtables + /// less than or equal to max_memtable_id. + pub fn remove_immutables(&self, max_memtable_id: MemtableId) -> MemtableVersion { + let immutables = self + .immutables + .iter() + .filter(|immem| immem.max_memtable_id() > max_memtable_id) + .cloned() + .collect(); + + MemtableVersion { + mutable: self.mutable.clone(), + immutables, + } + } + + pub fn memtables_to_flush(&self) -> (Option<MemtableId>, Vec<MemtableWithMeta>) { + let max_memtable_id = self + .immutables + .iter() + .map(|immem| immem.max_memtable_id()) + .max(); + let memtables = self + .immutables + .iter() + .flat_map(|immem| immem.to_memtable_with_metas()) + .collect(); + + (max_memtable_id, memtables) + } +} + +// We use a new type to order time ranges by (end, start). +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +struct RangeKey(RangeMillis); + +impl Ord for RangeKey { + fn cmp(&self, other: &RangeKey) -> Ordering { + self.0 + .end() + .cmp(other.0.end()) + .then_with(|| self.0.start().cmp(other.0.start())) + } +} + +impl PartialOrd for RangeKey { + fn partial_cmp(&self, other: &RangeKey) -> Option<Ordering> { + Some(self.cmp(other)) + } +} + +/// Collection of mutable memtables. +/// +/// Memtables are partitioned by their time range. Caller should ensure +/// there are no overlapped ranges and all ranges are aligned by same +/// bucket duration. +#[derive(Default, Clone, Debug)] +pub struct MemtableSet { + memtables: BTreeMap<RangeKey, MemtableRef>, + max_memtable_id: MemtableId, +} + +pub type MemtableSetRef = Arc<MemtableSet>; + +impl PartialEq for MemtableSet { + fn eq(&self, other: &MemtableSet) -> bool { + self.max_memtable_id == other.max_memtable_id + && self.memtables.len() == other.memtables.len() + && self + .memtables + .iter() + .zip(&other.memtables) + .all(|(a, b)| a.0 == b.0 && a.1.id() == b.1.id() && a.1.schema() == b.1.schema()) + } +} + +impl Eq for MemtableSet {} + +impl MemtableSet { + pub fn new() -> MemtableSet { + MemtableSet::default() + } + + /// Get memtable by time range. + /// + /// The range must exactly equal to the range of the memtable, otherwise `None` + /// is returned. + pub fn get_by_range(&self, range: &RangeMillis) -> Option<&MemtableRef> { + let range_key = RangeKey(*range); + self.memtables.get(&range_key) + } + + /// Insert a new memtable. + /// + /// # Panics + /// Panics if memtable with same range already exists. + pub fn insert(&mut self, range: RangeMillis, mem: MemtableRef) { + self.max_memtable_id = MemtableId::max(self.max_memtable_id, mem.id()); + let old = self.memtables.insert(RangeKey(range), mem); + assert!(old.is_none()); + } + + /// Returns number of memtables in the set. + #[inline] + pub fn len(&self) -> usize { + self.memtables.len() + } + + /// Returns true if there is no memtable in the set. + #[inline] + pub fn is_empty(&self) -> bool { + self.memtables.is_empty() + } + + pub fn bytes_allocated(&self) -> usize { + self.memtables.values().map(|m| m.bytes_allocated()).sum() + } + + pub fn max_memtable_id(&self) -> MemtableId { + self.max_memtable_id + } + + /// Creates a new `MemtableSet` that contains memtables both in `self` and + /// `other`, let `self` unchanged. + pub fn add(&self, mut other: MemtableSet) -> MemtableSet { + // We use `other.memtables` to extend `self.memtables` since memtables + // in other should be empty in usual, so overwriting it is okay. + other + .memtables + .extend(self.memtables.iter().map(|(k, v)| (*k, v.clone()))); + + MemtableSet { + memtables: other.memtables, + max_memtable_id: MemtableId::max(self.max_memtable_id, other.max_memtable_id), + } + } + + pub fn to_memtable_with_metas(&self) -> Vec<MemtableWithMeta> { + self.memtables + .iter() + .map(|(range_key, memtable)| MemtableWithMeta { + memtable: memtable.clone(), + bucket: range_key.0, + }) + .collect() + } + + pub fn iter(&self) -> impl Iterator<Item = (&RangeMillis, &MemtableRef)> { + self.memtables.iter().map(|(k, v)| (&k.0, v)) + } +} + +#[cfg(test)] +mod tests { + use store_api::storage::ValueType; + + use super::*; + use crate::memtable::tests; + use crate::memtable::BTreeMemtable; + use crate::memtable::Memtable; + + #[test] + fn test_memtableset_misc() { + let mut set = MemtableSet::new(); + + assert!(set.is_empty()); + assert_eq!(0, set.max_memtable_id()); + assert_eq!(0, set.bytes_allocated()); + assert!(set + .get_by_range(&RangeMillis::new(0, 10).unwrap()) + .is_none()); + + set.insert( + RangeMillis::new(0, 10).unwrap(), + Arc::new(BTreeMemtable::new(0, tests::schema_for_test())), + ); + set.insert( + RangeMillis::new(10, 20).unwrap(), + Arc::new(BTreeMemtable::new(1, tests::schema_for_test())), + ); + let memtable = Arc::new(BTreeMemtable::new(2, tests::schema_for_test())); + // Write some test data + tests::write_kvs( + &*memtable, + 10, // sequence + ValueType::Put, + &[ + (1000, 1), + (1000, 2), + (2002, 1), + (2003, 1), + (2003, 5), + (1001, 1), + ], // keys + &[Some(1), Some(2), Some(7), Some(8), Some(9), Some(3)], // values + ); + + set.insert(RangeMillis::new(20, 30).unwrap(), memtable.clone()); + + for (i, (range, _)) in set.iter().enumerate() { + assert_eq!( + *range, + RangeMillis::new(i as i64 * 10, i as i64 * 10 + 10).unwrap() + ); + } + + assert!(!set.is_empty()); + assert_eq!(2, set.max_memtable_id()); + assert_eq!(memtable.bytes_allocated(), set.bytes_allocated()); + assert!(set + .get_by_range(&RangeMillis::new(0, 10).unwrap()) + .is_some()); + assert!(set + .get_by_range(&RangeMillis::new(10, 20).unwrap()) + .is_some()); + assert!(set + .get_by_range(&RangeMillis::new(20, 30).unwrap()) + .is_some()); + assert!(set + .get_by_range(&RangeMillis::new(0, 100).unwrap()) + .is_none()); + } + + fn create_test_memtableset(ids: &[MemtableId]) -> MemtableSet { + let mut set = MemtableSet::new(); + + for id in ids { + let i = *id as i64; + set.insert( + RangeMillis::new(i * 10, (i + 1) * 10).unwrap(), + Arc::new(BTreeMemtable::new(*id, tests::schema_for_test())), + ); + } + + set + } + + #[test] + fn test_add_memtableset() { + let s1 = create_test_memtableset(&[0, 1, 2]); + let s2 = create_test_memtableset(&[3, 4, 5, 6]); + + let mut s1_memtables = s1.to_memtable_with_metas(); + let s2_memtables = s2.to_memtable_with_metas(); + s1_memtables.extend(s2_memtables); + + let empty = create_test_memtableset(&[]); + assert_eq!(s1, s1.add(empty)); + + let s3 = s1.add(s2); + assert_ne!(s1, s3); + + assert_eq!(7, s3.memtables.len()); + let s3_memtables = s3.to_memtable_with_metas(); + assert_eq!(7, s3_memtables.len()); + + for i in 0..7 { + assert_eq!(s1_memtables[i].bucket, s3_memtables[i].bucket); + assert_eq!(s1_memtables[i].memtable.id(), s3_memtables[i].memtable.id()); + } + assert_eq!(6, s3.max_memtable_id()); + } + + #[test] + fn test_memtableversion() { + let s1 = create_test_memtableset(&[0, 1, 2]); + let s2 = create_test_memtableset(&[3, 4, 5, 6]); + let s3 = s1.add(s2.clone()); + + let v1 = MemtableVersion::new(); + assert!(v1.mutable_memtables().is_empty()); + assert_eq!(0, v1.num_memtables()); + + // Add one mutable + let v2 = v1.add_mutable(s1.clone()); + assert_ne!(v1, v2); + let mutables = v2.mutable_memtables(); + assert_eq!(s1, *mutables); + assert_eq!(3, v2.num_memtables()); + + // Add another mutable + let v3 = v2.add_mutable(s2); + assert_ne!(v1, v3); + assert_ne!(v2, v3); + let mutables = v3.mutable_memtables(); + assert_eq!(s3, *mutables); + assert!(v3.memtables_to_flush().1.is_empty()); + assert_eq!(7, v3.num_memtables()); + + // Try to freeze s1, s2 + let v4 = v3.freeze_mutable(); + assert_ne!(v1, v4); + assert_ne!(v2, v4); + assert_ne!(v3, v4); + assert!(v4.mutable_memtables().is_empty()); + assert_eq!(v4.immutables.len(), 1); + assert_eq!(v4.immutables[0], Arc::new(s3.clone())); + + let (max_id, tables) = v4.memtables_to_flush(); + assert_eq!(6, max_id.unwrap()); + assert_eq!(7, tables.len()); + assert_eq!(7, v4.num_memtables()); + + // Add another mutable + let s4 = create_test_memtableset(&[7, 8]); + let v5 = v4.add_mutable(s4.clone()); + let mutables = v5.mutable_memtables(); + assert_eq!(s4, *mutables); + assert_eq!(v4.immutables, v5.immutables); + + // Try to freeze s4 + let v6 = v5.freeze_mutable(); + assert_eq!(v6.immutables.len(), 2); + assert_eq!(v6.immutables[0], Arc::new(s3)); + assert_eq!(v6.immutables[1], Arc::new(s4.clone())); + + let (max_id, tables) = v6.memtables_to_flush(); + assert_eq!(8, max_id.unwrap()); + assert_eq!(9, tables.len()); + assert_eq!(9, v6.num_memtables()); + // verify tables + for (i, table) in tables.iter().enumerate() { + assert_eq!(i as u32, table.memtable.id()); + let i = i as i64; + assert_eq!( + table.bucket, + RangeMillis::new(i * 10, (i + 1) * 10).unwrap() + ); + } + + // Remove tables + let v7 = v6.remove_immutables(6); + assert_eq!(v7.immutables.len(), 1); + assert_eq!(v7.immutables[0], Arc::new(s4)); + + let v8 = v7.remove_immutables(8); + assert_eq!(v8.immutables.len(), 0); + assert_eq!(0, v8.num_memtables()); + } +} diff --git a/src/storage/src/metadata.rs b/src/storage/src/metadata.rs index 8bab1c513c25..d98ae387e1b3 100644 --- a/src/storage/src/metadata.rs +++ b/src/storage/src/metadata.rs @@ -3,10 +3,12 @@ use std::sync::Arc; use common_error::prelude::*; use datatypes::data_type::ConcreteDataType; +use serde::{Deserialize, Serialize}; use snafu::ensure; use store_api::storage::{ consts, ColumnDescriptor, ColumnDescriptorBuilder, ColumnFamilyDescriptor, ColumnFamilyId, - ColumnId, ColumnSchema, RegionDescriptor, RegionMeta, RowKeyDescriptor, Schema, SchemaRef, + ColumnId, ColumnSchema, RegionDescriptor, RegionId, RegionMeta, RowKeyDescriptor, Schema, + SchemaRef, }; /// Error for handling metadata. @@ -20,6 +22,12 @@ pub enum Error { #[snafu(display("Column family id already exists, id: {}", id))] CfIdExists { id: ColumnId, backtrace: Backtrace }, + + #[snafu(display("Failed to build schema, source: {}", source))] + InvalidSchema { + source: datatypes::error::Error, + backtrace: Backtrace, + }, } pub type Result<T> = std::result::Result<T, Error>; @@ -27,6 +35,7 @@ pub type Result<T> = std::result::Result<T, Error>; /// Implementation of [RegionMeta]. /// /// Holds a snapshot of region metadata. +#[derive(Clone, Debug)] pub struct RegionMetaImpl { metadata: RegionMetadataRef, } @@ -48,8 +57,9 @@ pub type VersionNumber = u32; // TODO(yingwen): Make some fields of metadata private. /// In memory metadata of region. -#[derive(Clone, Debug)] +#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)] pub struct RegionMetadata { + pub id: RegionId, /// Schema of the region. /// /// Holding a [SchemaRef] to allow converting into `SchemaRef`/`arrow::SchemaRef` @@ -66,13 +76,13 @@ pub struct RegionMetadata { pub type RegionMetadataRef = Arc<RegionMetadata>; -#[derive(Clone, Debug, PartialEq)] +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ColumnMetadata { pub cf_id: ColumnFamilyId, pub desc: ColumnDescriptor, } -#[derive(Clone, Debug, PartialEq)] +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ColumnsMetadata { /// All columns, in `(key columns, timestamp, [version,] value columns)` order. /// @@ -82,7 +92,7 @@ pub struct ColumnsMetadata { pub name_to_col_index: HashMap<String, usize>, } -#[derive(Clone, Debug, Default, PartialEq)] +#[derive(Clone, Debug, Default, PartialEq, Serialize, Deserialize)] pub struct RowKeyMetadata { /// Exclusive end index of row key columns. row_key_end: usize, @@ -93,7 +103,7 @@ pub struct RowKeyMetadata { pub enable_version_column: bool, } -#[derive(Clone, Debug, PartialEq)] +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ColumnsRowKeyMetadata { columns: ColumnsMetadata, row_key: RowKeyMetadata, @@ -121,7 +131,7 @@ impl ColumnsRowKeyMetadata { pub type ColumnsRowKeyMetadataRef = Arc<ColumnsRowKeyMetadata>; -#[derive(Clone, Debug)] +#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)] pub struct ColumnFamiliesMetadata { /// Map column family id to column family metadata. id_to_cfs: HashMap<ColumnFamilyId, ColumnFamilyMetadata>, @@ -133,7 +143,7 @@ impl ColumnFamiliesMetadata { } } -#[derive(Clone, Debug)] +#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)] pub struct ColumnFamilyMetadata { /// Column family name. pub name: String, @@ -151,18 +161,20 @@ impl TryFrom<RegionDescriptor> for RegionMetadata { // Doesn't set version explicitly here, because this is a new region meta // created from descriptor, using initial version is reasonable. let mut builder = RegionMetadataBuilder::new() + .id(desc.id) .row_key(desc.row_key)? .add_column_family(desc.default_cf)?; for cf in desc.extra_cfs { builder = builder.add_column_family(cf)?; } - Ok(builder.build()) + builder.build() } } #[derive(Default)] struct RegionMetadataBuilder { + id: RegionId, columns: Vec<ColumnMetadata>, column_schemas: Vec<ColumnSchema>, name_to_col_index: HashMap<String, usize>, @@ -178,6 +190,11 @@ impl RegionMetadataBuilder { RegionMetadataBuilder::default() } + fn id(mut self, id: RegionId) -> Self { + self.id = id; + self + } + fn row_key(mut self, key: RowKeyDescriptor) -> Result<Self> { for col in key.columns { self.push_row_key_column(col)?; @@ -234,8 +251,15 @@ impl RegionMetadataBuilder { Ok(self) } - fn build(self) -> RegionMetadata { - let schema = Arc::new(Schema::new(self.column_schemas)); + fn build(self) -> Result<RegionMetadata> { + let schema = if self.column_schemas.is_empty() { + Arc::new(Schema::new(self.column_schemas)) + } else { + Arc::new( + Schema::with_timestamp_index(self.column_schemas, self.row_key.timestamp_key_index) + .context(InvalidSchemaSnafu)?, + ) + }; let columns = ColumnsMetadata { columns: self.columns, name_to_col_index: self.name_to_col_index, @@ -245,14 +269,15 @@ impl RegionMetadataBuilder { row_key: self.row_key, }); - RegionMetadata { + Ok(RegionMetadata { + id: self.id, schema, columns_row_key, column_families: ColumnFamiliesMetadata { id_to_cfs: self.id_to_cfs, }, version: 0, - } + }) } // Helper methods: @@ -308,17 +333,20 @@ mod tests { #[test] fn test_descriptor_to_region_metadata() { let desc = RegionDescBuilder::new("region-0") - .timestamp(("ts", LogicalTypeId::UInt64, false)) + .timestamp(("ts", LogicalTypeId::Int64, false)) .enable_version_column(false) .push_key_column(("k1", LogicalTypeId::Int32, false)) .push_value_column(("v1", LogicalTypeId::Float32, true)) .build(); - let expect_schema = schema_util::new_schema_ref(&[ - ("k1", LogicalTypeId::Int32, false), - ("ts", LogicalTypeId::UInt64, false), - ("v1", LogicalTypeId::Float32, true), - ]); + let expect_schema = schema_util::new_schema_ref( + &[ + ("k1", LogicalTypeId::Int32, false), + ("ts", LogicalTypeId::Int64, false), + ("v1", LogicalTypeId::Float32, true), + ], + Some(1), + ); let metadata = RegionMetadata::try_from(desc).unwrap(); assert_eq!(expect_schema, metadata.schema); @@ -328,7 +356,7 @@ mod tests { #[test] fn test_build_empty_region_metadata() { - let metadata = RegionMetadataBuilder::default().build(); + let metadata = RegionMetadataBuilder::default().build().unwrap(); assert!(metadata.schema.column_schemas().is_empty()); assert!(metadata.columns_row_key.columns.columns.is_empty()); @@ -373,17 +401,21 @@ mod tests { .add_column_family(cf) .unwrap() .build() + .unwrap() } #[test] fn test_build_metedata_disable_version() { let metadata = new_metadata(false); - let expect_schema = schema_util::new_schema_ref(&[ - ("k1", LogicalTypeId::Int64, false), - ("ts", LogicalTypeId::Int64, false), - ("v1", LogicalTypeId::Int64, true), - ]); + let expect_schema = schema_util::new_schema_ref( + &[ + ("k1", LogicalTypeId::Int64, false), + ("ts", LogicalTypeId::Int64, false), + ("v1", LogicalTypeId::Int64, true), + ], + Some(1), + ); assert_eq!(expect_schema, metadata.schema); @@ -422,12 +454,15 @@ mod tests { fn test_build_metedata_enable_version() { let metadata = new_metadata(true); - let expect_schema = schema_util::new_schema_ref(&[ - ("k1", LogicalTypeId::Int64, false), - ("ts", LogicalTypeId::Int64, false), - (consts::VERSION_COLUMN_NAME, LogicalTypeId::UInt64, false), - ("v1", LogicalTypeId::Int64, true), - ]); + let expect_schema = schema_util::new_schema_ref( + &[ + ("k1", LogicalTypeId::Int64, false), + ("ts", LogicalTypeId::Int64, false), + (consts::VERSION_COLUMN_NAME, LogicalTypeId::UInt64, false), + ("v1", LogicalTypeId::Int64, true), + ], + Some(1), + ); assert_eq!(expect_schema, metadata.schema); diff --git a/src/storage/src/proto.rs b/src/storage/src/proto.rs new file mode 100644 index 000000000000..355623b4a824 --- /dev/null +++ b/src/storage/src/proto.rs @@ -0,0 +1,43 @@ +#![allow(clippy::all)] + +tonic::include_proto!("greptime.storage.wal.v1"); + +use crate::write_batch::{Mutation, WriteBatch}; + +pub fn gen_mutation_extras(write_batch: &WriteBatch) -> Vec<MutationExtra> { + let column_schemas = write_batch.schema().column_schemas(); + write_batch + .iter() + .map(|m| match m { + Mutation::Put(put) => { + if put.num_columns() == column_schemas.len() { + MutationExtra { + mutation_type: MutationType::Put.into(), + column_null_mask: Default::default(), + } + } else { + let mut column_null_mask = + bit_vec::BitVec::from_elem(column_schemas.len(), false); + for (i, cs) in column_schemas.iter().enumerate() { + if put.column_by_name(&cs.name).is_none() { + column_null_mask.set(i, true); + } + } + MutationExtra { + mutation_type: MutationType::Put.into(), + column_null_mask: column_null_mask.to_bytes(), + } + } + } + }) + .collect::<Vec<_>>() +} + +impl WalHeader { + pub fn with_last_manifest_version(last_manifest_version: u64) -> Self { + Self { + last_manifest_version, + ..Default::default() + } + } +} diff --git a/src/storage/src/region.rs b/src/storage/src/region.rs index 84d86130b3f7..55c1ff5da29b 100644 --- a/src/storage/src/region.rs +++ b/src/storage/src/region.rs @@ -6,32 +6,44 @@ use std::sync::Arc; use async_trait::async_trait; use snafu::ensure; -use store_api::storage::{ReadContext, Region, RegionMeta, WriteContext, WriteResponse}; -use tokio::sync::Mutex; +use store_api::logstore::LogStore; +use store_api::storage::{ReadContext, Region, RegionId, RegionMeta, WriteContext, WriteResponse}; +use crate::background::JobPoolImpl; use crate::error::{self, Error, Result}; -use crate::memtable::{DefaultMemtableBuilder, MemtableBuilder, MemtableSchema, MemtableSet}; +use crate::flush::{FlushSchedulerImpl, FlushSchedulerRef, FlushStrategyRef, SizeBasedStrategy}; +use crate::manifest::region::RegionManifest; +use crate::memtable::{DefaultMemtableBuilder, MemtableVersion}; use crate::metadata::{RegionMetaImpl, RegionMetadata}; -use crate::region::writer::RegionWriter; +pub use crate::region::writer::{RegionWriter, RegionWriterRef, WriterContext}; use crate::snapshot::SnapshotImpl; +use crate::sst::AccessLayerRef; use crate::version::{VersionControl, VersionControlRef}; +use crate::wal::Wal; use crate::write_batch::WriteBatch; /// [Region] implementation. -#[derive(Clone)] -pub struct RegionImpl { - inner: Arc<RegionInner>, +pub struct RegionImpl<S: LogStore> { + inner: Arc<RegionInner<S>>, +} + +impl<S: LogStore> Clone for RegionImpl<S> { + fn clone(&self) -> Self { + Self { + inner: self.inner.clone(), + } + } } #[async_trait] -impl Region for RegionImpl { +impl<S: LogStore> Region for RegionImpl<S> { type Error = Error; type Meta = RegionMetaImpl; type WriteRequest = WriteBatch; type Snapshot = SnapshotImpl; fn name(&self) -> &str { - &self.inner.name + &self.inner.shared.name } fn in_memory_metadata(&self) -> RegionMetaImpl { @@ -47,61 +59,109 @@ impl Region for RegionImpl { } } -impl RegionImpl { - pub fn new(name: String, metadata: RegionMetadata) -> RegionImpl { +impl<S: LogStore> RegionImpl<S> { + pub fn new( + id: RegionId, + name: String, + metadata: RegionMetadata, + wal: Wal<S>, + sst_layer: AccessLayerRef, + manifest: RegionManifest, + ) -> RegionImpl<S> { let memtable_builder = Arc::new(DefaultMemtableBuilder {}); - let memtable_schema = MemtableSchema::new(metadata.columns_row_key.clone()); - let mem = memtable_builder.build(memtable_schema); - let memtables = MemtableSet::new(mem); + let memtable_version = MemtableVersion::new(); + // TODO(yingwen): Pass flush scheduler to `RegionImpl::new`. + let job_pool = Arc::new(JobPoolImpl {}); + let flush_scheduler = Arc::new(FlushSchedulerImpl::new(job_pool)); - let version = VersionControl::new(metadata, memtables); + let version_control = VersionControl::new(metadata, memtable_version); let inner = Arc::new(RegionInner { - name, - version: Arc::new(version), - writer: Mutex::new(RegionWriter::new(memtable_builder)), + shared: Arc::new(SharedData { + id, + name, + version_control: Arc::new(version_control), + }), + writer: Arc::new(RegionWriter::new(memtable_builder)), + wal, + flush_strategy: Arc::new(SizeBasedStrategy::default()), + flush_scheduler, + sst_layer, + manifest, }); RegionImpl { inner } } +} - #[cfg(test)] +// Private methods for tests. +#[cfg(test)] +impl<S: LogStore> RegionImpl<S> { #[inline] fn committed_sequence(&self) -> store_api::storage::SequenceNumber { - self.inner.version.committed_sequence() + self.inner.version_control().committed_sequence() } } -struct RegionInner { - name: String, - version: VersionControlRef, - writer: Mutex<RegionWriter>, +/// Shared data of region. +pub struct SharedData { + pub id: RegionId, + pub name: String, + // TODO(yingwen): Maybe no need to use Arc for version control. + pub version_control: VersionControlRef, } -impl RegionInner { +pub type SharedDataRef = Arc<SharedData>; + +struct RegionInner<S: LogStore> { + shared: SharedDataRef, + writer: RegionWriterRef, + wal: Wal<S>, + flush_strategy: FlushStrategyRef, + flush_scheduler: FlushSchedulerRef, + sst_layer: AccessLayerRef, + manifest: RegionManifest, +} + +impl<S: LogStore> RegionInner<S> { + #[inline] + fn version_control(&self) -> &VersionControl { + &*self.shared.version_control + } + fn in_memory_metadata(&self) -> RegionMetaImpl { - let metadata = self.version.metadata(); + let metadata = self.version_control().metadata(); RegionMetaImpl::new(metadata) } + fn create_snapshot(&self) -> SnapshotImpl { + let version = self.version_control().current(); + let sequence = self.version_control().committed_sequence(); + + SnapshotImpl::new(version, sequence) + } + async fn write(&self, ctx: &WriteContext, request: WriteBatch) -> Result<WriteResponse> { let metadata = self.in_memory_metadata(); let schema = metadata.schema(); // Only compare column schemas. ensure!( schema.column_schemas() == request.schema().column_schemas(), - error::InvalidInputSchemaSnafu { region: &self.name } + error::InvalidInputSchemaSnafu { + region: &self.shared.name + } ); + let writer_ctx = WriterContext { + shared: &self.shared, + flush_strategy: &self.flush_strategy, + flush_scheduler: &self.flush_scheduler, + sst_layer: &self.sst_layer, + wal: &self.wal, + writer: &self.writer, + manifest: &self.manifest, + }; // Now altering schema is not allowed, so it is safe to validate schema outside of the lock. - let mut writer = self.writer.lock().await; - writer.write(ctx, &self.version, request).await - } - - fn create_snapshot(&self) -> SnapshotImpl { - let version = self.version.current(); - let sequence = self.version.committed_sequence(); - - SnapshotImpl::new(version, sequence) + self.writer.write(ctx, request, writer_ctx).await } } diff --git a/src/storage/src/region/tests.rs b/src/storage/src/region/tests.rs index ce1f22c407a1..9f91ce1bfd99 100644 --- a/src/storage/src/region/tests.rs +++ b/src/storage/src/region/tests.rs @@ -3,28 +3,58 @@ mod read_write; use datatypes::type_id::LogicalTypeId; +use log_store::fs::noop::NoopLogStore; +use object_store::{backend::fs::Backend, ObjectStore}; +use store_api::manifest::Manifest; use store_api::storage::consts; +use tempdir::TempDir; use super::*; +use crate::manifest::region::RegionManifest; +use crate::sst::FsAccessLayer; use crate::test_util::{self, descriptor_util::RegionDescBuilder, schema_util}; -#[test] -fn test_new_region() { +#[tokio::test] +async fn test_new_region() { + let region_id = 0; let region_name = "region-0"; let desc = RegionDescBuilder::new(region_name) + .enable_version_column(true) .push_key_column(("k1", LogicalTypeId::Int32, false)) .push_value_column(("v1", LogicalTypeId::Float32, true)) .build(); let metadata = desc.try_into().unwrap(); - let region = RegionImpl::new(region_name.to_string(), metadata); + let wal = Wal::new(region_id, region_name, Arc::new(NoopLogStore::default())); + let store_dir = TempDir::new("test_new_region") + .unwrap() + .path() + .to_string_lossy() + .to_string(); - let expect_schema = schema_util::new_schema_ref(&[ - ("k1", LogicalTypeId::Int32, false), - (test_util::TIMESTAMP_NAME, LogicalTypeId::Int64, false), - (consts::VERSION_COLUMN_NAME, LogicalTypeId::UInt64, false), - ("v1", LogicalTypeId::Float32, true), - ]); + let accessor = Backend::build().root(&store_dir).finish().await.unwrap(); + let object_store = ObjectStore::new(accessor); + let sst_layer = Arc::new(FsAccessLayer::new("/", object_store.clone())); + let manifest = RegionManifest::new(region_id, "/manifest/", object_store); + + let region = RegionImpl::new( + region_id, + region_name.to_string(), + metadata, + wal, + sst_layer, + manifest, + ); + + let expect_schema = schema_util::new_schema_ref( + &[ + ("k1", LogicalTypeId::Int32, false), + (test_util::TIMESTAMP_NAME, LogicalTypeId::Int64, false), + (consts::VERSION_COLUMN_NAME, LogicalTypeId::UInt64, false), + ("v1", LogicalTypeId::Float32, true), + ], + Some(1), + ); assert_eq!(region_name, region.name()); assert_eq!(expect_schema, *region.in_memory_metadata().schema()); diff --git a/src/storage/src/region/tests/read_write.rs b/src/storage/src/region/tests/read_write.rs index 4e37d8044c29..7a06c3c5206a 100644 --- a/src/storage/src/region/tests/read_write.rs +++ b/src/storage/src/region/tests/read_write.rs @@ -5,39 +5,71 @@ use std::sync::Arc; use datatypes::prelude::*; use datatypes::type_id::LogicalTypeId; use datatypes::vectors::Int64Vector; +use log_store::fs::noop::NoopLogStore; +use object_store::{backend::fs::Backend, ObjectStore}; +use store_api::manifest::Manifest; use store_api::storage::{ consts, Chunk, ChunkReader, PutOperation, ReadContext, Region, RegionMeta, ScanRequest, SequenceNumber, Snapshot, WriteContext, WriteRequest, WriteResponse, }; +use tempdir::TempDir; +use crate::manifest::region::RegionManifest; use crate::region::RegionImpl; +use crate::sst::FsAccessLayer; use crate::test_util::{self, descriptor_util::RegionDescBuilder, write_batch_util}; +use crate::wal::Wal; use crate::write_batch::{PutData, WriteBatch}; /// Create a new region for read/write test -fn new_region_for_rw(enable_version_column: bool) -> RegionImpl { +async fn new_region_for_rw( + store_dir: &str, + enable_version_column: bool, +) -> RegionImpl<NoopLogStore> { + let region_id = 0; let region_name = "region-rw-0"; + let sst_dir = format!("{}/{}/", store_dir, region_name); + let manifest_dir = format!("{}/{}/maniffest/", store_dir, region_name); + let desc = RegionDescBuilder::new(region_name) .enable_version_column(enable_version_column) .push_value_column(("v1", LogicalTypeId::Int64, true)) .build(); let metadata = desc.try_into().unwrap(); - - RegionImpl::new(region_name.to_string(), metadata) + let wal = Wal::new(region_id, region_name, Arc::new(NoopLogStore::default())); + let accessor = Backend::build().root(store_dir).finish().await.unwrap(); + let object_store = ObjectStore::new(accessor); + let sst_layer = Arc::new(FsAccessLayer::new(&sst_dir, object_store.clone())); + let manifest = RegionManifest::new(region_id, &manifest_dir, object_store); + + RegionImpl::new( + region_id, + region_name.to_string(), + metadata, + wal, + sst_layer, + manifest, + ) } fn new_write_batch_for_test(enable_version_column: bool) -> WriteBatch { if enable_version_column { - write_batch_util::new_write_batch(&[ - (test_util::TIMESTAMP_NAME, LogicalTypeId::Int64, false), - (consts::VERSION_COLUMN_NAME, LogicalTypeId::UInt64, false), - ("v1", LogicalTypeId::Int64, true), - ]) + write_batch_util::new_write_batch( + &[ + (test_util::TIMESTAMP_NAME, LogicalTypeId::Int64, false), + (consts::VERSION_COLUMN_NAME, LogicalTypeId::UInt64, false), + ("v1", LogicalTypeId::Int64, true), + ], + Some(0), + ) } else { - write_batch_util::new_write_batch(&[ - (test_util::TIMESTAMP_NAME, LogicalTypeId::Int64, false), - ("v1", LogicalTypeId::Int64, true), - ]) + write_batch_util::new_write_batch( + &[ + (test_util::TIMESTAMP_NAME, LogicalTypeId::Int64, false), + ("v1", LogicalTypeId::Int64, true), + ], + Some(0), + ) } } @@ -73,20 +105,14 @@ fn append_chunk_to(chunk: &Chunk, dst: &mut Vec<(i64, Option<i64>)>) { /// Test region without considering version column. struct Tester { - region: RegionImpl, + region: RegionImpl<NoopLogStore>, write_ctx: WriteContext, read_ctx: ReadContext, } -impl Default for Tester { - fn default() -> Tester { - Tester::new() - } -} - impl Tester { - fn new() -> Tester { - let region = new_region_for_rw(false); + async fn new(store_dir: &str) -> Tester { + let region = new_region_for_rw(store_dir, false).await; Tester { region, @@ -134,7 +160,9 @@ impl Tester { #[tokio::test] async fn test_simple_put_scan() { - let tester = Tester::default(); + let dir = TempDir::new("write_parquet").unwrap(); + let store_dir = dir.path().to_str().unwrap(); + let tester = Tester::new(store_dir).await; let data = vec![ (1000, Some(100)), @@ -151,7 +179,9 @@ async fn test_simple_put_scan() { } #[tokio::test] async fn test_sequence_increase() { - let tester = Tester::default(); + let dir = TempDir::new("write_parquet").unwrap(); + let store_dir = dir.path().to_str().unwrap(); + let tester = Tester::new(store_dir).await; let mut committed_sequence = tester.committed_sequence(); for i in 0..100 { diff --git a/src/storage/src/region/writer.rs b/src/storage/src/region/writer.rs index 899da728a1ed..a8f579478fe3 100644 --- a/src/storage/src/region/writer.rs +++ b/src/storage/src/region/writer.rs @@ -1,46 +1,291 @@ -use store_api::storage::{WriteContext, WriteResponse}; +use std::sync::Arc; -use crate::error::Result; -use crate::memtable::{Inserter, MemtableBuilderRef}; -use crate::version::VersionControlRef; +use common_telemetry::logging; +use common_time::RangeMillis; +use snafu::ResultExt; +use store_api::logstore::LogStore; +use store_api::storage::{SequenceNumber, WriteContext, WriteRequest, WriteResponse}; +use tokio::sync::Mutex; + +use crate::background::JobHandle; +use crate::error::{InvalidTimestampSnafu, Result}; +use crate::flush::{FlushJob, FlushSchedulerRef, FlushStrategyRef}; +use crate::memtable::{Inserter, MemtableBuilderRef, MemtableId, MemtableSet}; +use crate::proto::WalHeader; +use crate::region::RegionManifest; +use crate::region::SharedDataRef; +use crate::sst::AccessLayerRef; +use crate::version::{VersionControlRef, VersionEdit}; +use crate::wal::{Payload, Wal}; use crate::write_batch::WriteBatch; +pub type RegionWriterRef = Arc<RegionWriter>; + pub struct RegionWriter { - _memtable_builder: MemtableBuilderRef, + inner: Mutex<WriterInner>, } impl RegionWriter { - pub fn new(_memtable_builder: MemtableBuilderRef) -> RegionWriter { - RegionWriter { _memtable_builder } + pub fn new(memtable_builder: MemtableBuilderRef) -> RegionWriter { + RegionWriter { + inner: Mutex::new(WriterInner::new(memtable_builder)), + } + } + + pub async fn write<S: LogStore>( + &self, + ctx: &WriteContext, + request: WriteBatch, + writer_ctx: WriterContext<'_, S>, + ) -> Result<WriteResponse> { + let mut inner = self.inner.lock().await; + inner.write(ctx, request, writer_ctx).await + } + + pub async fn apply_version_edit<S: LogStore>( + &self, + wal: &Wal<S>, + edit: VersionEdit, + shared: &SharedDataRef, + ) -> Result<()> { + let mut inner = self.inner.lock().await; + inner.apply_version_edit(wal, edit, shared).await + } +} + +pub struct WriterContext<'a, S: LogStore> { + pub shared: &'a SharedDataRef, + pub flush_strategy: &'a FlushStrategyRef, + pub flush_scheduler: &'a FlushSchedulerRef, + pub sst_layer: &'a AccessLayerRef, + pub wal: &'a Wal<S>, + pub writer: &'a RegionWriterRef, + pub manifest: &'a RegionManifest, +} + +impl<'a, S: LogStore> WriterContext<'a, S> { + #[inline] + fn version_control(&self) -> &VersionControlRef { + &self.shared.version_control + } +} + +struct WriterInner { + memtable_builder: MemtableBuilderRef, + last_memtable_id: MemtableId, + flush_handle: Option<JobHandle>, +} + +impl WriterInner { + fn new(memtable_builder: MemtableBuilderRef) -> WriterInner { + WriterInner { + memtable_builder, + last_memtable_id: 0, + flush_handle: None, + } } // TODO(yingwen): Support group commit so we can avoid taking mutable reference. /// Write `WriteBatch` to region, now the schema of batch needs to be validated outside. - pub async fn write( + /// + /// Mutable reference of writer ensure no other reference of this writer can modify the + /// version control (write is exclusive). + async fn write<S: LogStore>( &mut self, _ctx: &WriteContext, - version_control: &VersionControlRef, request: WriteBatch, + writer_ctx: WriterContext<'_, S>, ) -> Result<WriteResponse> { - // Mutable reference of writer ensure no other reference of this writer can modify - // the version control (write is exclusive). + let time_ranges = self.preprocess_write(&request, &writer_ctx).await?; // TODO(yingwen): Write wal and get sequence. + let version_control = writer_ctx.version_control(); let version = version_control.current(); - let mem = version.mutable_memtable(); let committed_sequence = version_control.committed_sequence(); // Sequence for current write batch. let next_sequence = committed_sequence + 1; + let wal_header = WalHeader::with_last_manifest_version(version.manifest_version()); + writer_ctx + .wal + .write_to_wal( + next_sequence, + wal_header, + Payload::WriteBatchArrow(&request), + ) + .await?; + // Insert batch into memtable. - let mut inserter = Inserter::new(next_sequence); - inserter.insert_memtable(&request, &**mem)?; + let mut inserter = Inserter::new(next_sequence, time_ranges, version.bucket_duration()); + inserter.insert_memtables(&request, version.mutable_memtables())?; - // Update committed_sequence to make current batch visible. The `&mut self` of RegionWriter + // Update committed_sequence to make current batch visible. The `&mut self` of WriterInner // guarantees the writer is exclusive. version_control.set_committed_sequence(next_sequence); Ok(WriteResponse {}) } + + /// Preprocess before write. + /// + /// Creates needed mutable memtables, ensures there is enough capacity in memtable and trigger + /// flush if necessary. Returns time ranges of the input write batch. + async fn preprocess_write<S: LogStore>( + &mut self, + request: &WriteBatch, + writer_ctx: &WriterContext<'_, S>, + ) -> Result<Vec<RangeMillis>> { + let version_control = writer_ctx.version_control(); + // Check whether memtable is full or flush should be triggered. We need to do this first since + // switching memtables will clear all mutable memtables. + if self.should_flush( + writer_ctx.shared, + version_control, + writer_ctx.flush_strategy, + ) { + self.trigger_flush( + writer_ctx.shared, + writer_ctx.flush_scheduler, + writer_ctx.sst_layer, + writer_ctx.writer, + writer_ctx.wal, + writer_ctx.manifest, + ) + .await?; + } + + let current_version = version_control.current(); + let duration = current_version.bucket_duration(); + let time_ranges = request + .time_ranges(duration) + .context(InvalidTimestampSnafu)?; + let mutable = current_version.mutable_memtables(); + let mut memtables_to_add = MemtableSet::default(); + + // Pre-create all needed mutable memtables. + for range in &time_ranges { + if mutable.get_by_range(range).is_none() + && memtables_to_add.get_by_range(range).is_none() + { + // Memtable for this range is missing, need to create a new memtable. + let memtable_schema = current_version.memtable_schema(); + let id = self.alloc_memtable_id(); + let memtable = self.memtable_builder.build(id, memtable_schema); + memtables_to_add.insert(*range, memtable); + } + } + + if !memtables_to_add.is_empty() { + version_control.add_mutable(memtables_to_add); + } + + Ok(time_ranges) + } + + fn should_flush( + &self, + shared: &SharedDataRef, + version_control: &VersionControlRef, + flush_strategy: &FlushStrategyRef, + ) -> bool { + let current = version_control.current(); + let memtables = current.memtables(); + let mutable_bytes_allocated = memtables.mutable_bytes_allocated(); + let total_bytes_allocated = memtables.total_bytes_allocated(); + flush_strategy.should_flush(shared, mutable_bytes_allocated, total_bytes_allocated) + } + + async fn trigger_flush<S: LogStore>( + &mut self, + shared: &SharedDataRef, + flush_scheduler: &FlushSchedulerRef, + sst_layer: &AccessLayerRef, + writer: &RegionWriterRef, + wal: &Wal<S>, + manifest: &RegionManifest, + ) -> Result<()> { + let version_control = &shared.version_control; + // Freeze all mutable memtables so we can flush them later. + version_control.freeze_mutable(); + + if let Some(flush_handle) = self.flush_handle.take() { + // Previous flush job is incomplete, wait util it is finished (write stall). + // However the last flush job may fail, in which case, we just return error + // and abort current write request. The flush handle is left empty, so the next + // time we still have chance to trigger a new flush. + flush_handle.join().await.map_err(|e| { + logging::error!( + "Previous flush job failed, region: {}, err: {}", + shared.name, + e + ); + e + })?; + } + + let current_version = version_control.current(); + let (max_memtable_id, mem_to_flush) = current_version.memtables().memtables_to_flush(); + + if max_memtable_id.is_none() { + logging::info!("No memtables to flush in region: {}", shared.name); + return Ok(()); + } + + let flush_req = FlushJob { + max_memtable_id: max_memtable_id.unwrap(), + memtables: mem_to_flush, + // In write thread, safe to use current commited sequence. + flush_sequence: version_control.committed_sequence(), + shared: shared.clone(), + sst_layer: sst_layer.clone(), + writer: writer.clone(), + wal: wal.clone(), + manifest: manifest.clone(), + }; + + let flush_handle = flush_scheduler.schedule_flush(Box::new(flush_req)).await?; + self.flush_handle = Some(flush_handle); + + Ok(()) + } + + async fn apply_version_edit<S: LogStore>( + &mut self, + wal: &Wal<S>, + edit: VersionEdit, + shared: &SharedDataRef, + ) -> Result<()> { + let version_control = &shared.version_control; + + let next_sequence = version_control.committed_sequence() + 1; + + self.persist_manifest_version(wal, next_sequence, &edit) + .await?; + + version_control.apply_edit(edit); + + version_control.set_committed_sequence(next_sequence); + + Ok(()) + } + + async fn persist_manifest_version<S: LogStore>( + &self, + wal: &Wal<S>, + seq: SequenceNumber, + edit: &VersionEdit, + ) -> Result<()> { + let header = WalHeader::with_last_manifest_version(edit.manifest_version); + + wal.write_to_wal(seq, header, Payload::None).await?; + + Ok(()) + } + + #[inline] + fn alloc_memtable_id(&mut self) -> MemtableId { + self.last_memtable_id += 1; + self.last_memtable_id + } } diff --git a/src/storage/src/snapshot.rs b/src/storage/src/snapshot.rs index 7b89a19a8e0e..3603dc18f194 100644 --- a/src/storage/src/snapshot.rs +++ b/src/storage/src/snapshot.rs @@ -33,13 +33,34 @@ impl Snapshot for SnapshotImpl { request: ScanRequest, ) -> Result<ScanResponse<ChunkReaderImpl>> { let visible_sequence = self.sequence_to_read(request.sequence); + let memtable_version = self.version.memtables(); + + let mutables = memtable_version.mutable_memtables(); + let immutables = memtable_version.immutable_memtables(); + let mut batch_iters = Vec::with_capacity(memtable_version.num_memtables()); - let mem = self.version.mutable_memtable(); let iter_ctx = IterContext { batch_size: ctx.batch_size, visible_sequence, + ..Default::default() }; - let iter = mem.iter(iter_ctx)?; + + for (_range, mem) in mutables.iter() { + let iter = mem.iter(iter_ctx.clone())?; + + batch_iters.push(iter); + } + + for mem_set in immutables { + for (_range, mem) in mem_set.iter() { + let iter = mem.iter(iter_ctx.clone())?; + + batch_iters.push(iter); + } + } + + // Now we just simply chain all iterators together, ignore duplications/ordering. + let iter = Box::new(batch_iters.into_iter().flatten()); let reader = ChunkReaderImpl::new(self.version.schema().clone(), iter); diff --git a/src/storage/src/sst.rs b/src/storage/src/sst.rs new file mode 100644 index 000000000000..35fb190e992c --- /dev/null +++ b/src/storage/src/sst.rs @@ -0,0 +1,172 @@ +mod parquet; + +use std::sync::Arc; + +use async_trait::async_trait; +use object_store::{util, ObjectStore}; +use serde::{Deserialize, Serialize}; + +use crate::error::Result; +use crate::memtable::BatchIteratorPtr; +use crate::sst::parquet::ParquetWriter; + +/// Maximum level of ssts. +pub const MAX_LEVEL: usize = 1; + +// We only has fixed number of level, so we array to hold elements. This implement +// detail of LevelMetaVec should not be exposed to the user of [LevelMetas]. +type LevelMetaVec = [LevelMeta; MAX_LEVEL]; + +/// Metadata of all ssts under a region. +/// +/// Files are organized into multiple level, though there may be only one level. +#[derive(Debug, Clone)] +pub struct LevelMetas { + levels: LevelMetaVec, +} + +impl LevelMetas { + /// Create a new LevelMetas and initialized each level. + pub fn new() -> LevelMetas { + LevelMetas { + levels: [LevelMeta::default(); MAX_LEVEL], + } + } + + /// Merge `self` with files to add/remove to create a new [LevelMetas]. + /// + /// # Panics + /// Panics if level of [FileHandle] is greater than [MAX_LEVEL]. + pub fn merge(&self, files_to_add: impl Iterator<Item = FileHandle>) -> LevelMetas { + let mut merged = self.clone(); + for file in files_to_add { + let level = file.level_index(); + + merged.levels[level].add_file(file); + } + + // TODO(yingwen): Support file removal. + + merged + } +} + +impl Default for LevelMetas { + fn default() -> LevelMetas { + LevelMetas::new() + } +} + +/// Metadata of files in same sst level. +#[derive(Debug, Default, Clone)] +pub struct LevelMeta { + /// Handles to the files in this level. + // TODO(yingwen): Now for simplicity, files are unordered, maybe sort the files by time range + // or use another structure to hold them. + files: Vec<FileHandle>, +} + +impl LevelMeta { + fn add_file(&mut self, file: FileHandle) { + self.files.push(file); + } +} + +/// In-memory handle to a file. +#[derive(Debug, Clone)] +pub struct FileHandle { + inner: Arc<FileHandleInner>, +} + +impl FileHandle { + pub fn new(meta: FileMeta) -> FileHandle { + FileHandle { + inner: Arc::new(FileHandleInner::new(meta)), + } + } + + /// Returns level as usize so it can be used as index. + #[inline] + pub fn level_index(&self) -> usize { + self.inner.meta.level.into() + } +} + +/// Actually data of [FileHandle]. +/// +/// Contains meta of the file, and other mutable info like metrics. +#[derive(Debug)] +struct FileHandleInner { + meta: FileMeta, +} + +impl FileHandleInner { + fn new(meta: FileMeta) -> FileHandleInner { + FileHandleInner { meta } + } +} + +/// Immutable metadata of a sst file. +#[derive(Serialize, Deserialize, Clone, Debug)] +pub struct FileMeta { + pub file_path: String, + /// SST level of the file. + pub level: u8, +} + +#[derive(Debug, Default)] +pub struct WriteOptions { + // TODO(yingwen): [flush] row group size. +} + +/// Sst access layer. +#[async_trait] +pub trait AccessLayer: Send + Sync { + // Writes SST file with given name and returns the full path. + async fn write_sst( + &self, + file_name: &str, + iter: BatchIteratorPtr, + opts: WriteOptions, + ) -> Result<String>; +} + +pub type AccessLayerRef = Arc<dyn AccessLayer>; + +/// Sst access layer based on local file system. +pub struct FsAccessLayer { + sst_dir: String, + object_store: ObjectStore, +} + +impl FsAccessLayer { + pub fn new(sst_dir: &str, object_store: ObjectStore) -> FsAccessLayer { + FsAccessLayer { + sst_dir: util::normalize_dir(sst_dir), + object_store, + } + } + + #[inline] + fn sst_file_path(&self, file_name: &str) -> String { + format!("{}{}", self.sst_dir, file_name) + } +} + +#[async_trait] +impl AccessLayer for FsAccessLayer { + async fn write_sst( + &self, + file_name: &str, + iter: BatchIteratorPtr, + opts: WriteOptions, + ) -> Result<String> { + // Now we only supports parquet format. We may allow caller to specific sst format in + // WriteOptions in the future. + let file_path = self.sst_file_path(file_name); + let writer = ParquetWriter::new(&file_path, iter, self.object_store.clone()); + + writer.write_sst(opts).await?; + Ok(file_path) + } +} diff --git a/src/storage/src/sst/parquet.rs b/src/storage/src/sst/parquet.rs new file mode 100644 index 000000000000..8448050a045b --- /dev/null +++ b/src/storage/src/sst/parquet.rs @@ -0,0 +1,263 @@ +//! Parquet sst format. + +use std::collections::HashMap; + +use datatypes::arrow::chunk::Chunk; +use datatypes::arrow::datatypes::{DataType, Field, Schema}; +use datatypes::arrow::io::parquet::write::{ + Compression, Encoding, FileSink, Version, WriteOptions, +}; +use datatypes::prelude::{ConcreteDataType, Vector}; +use datatypes::schema::ColumnSchema; +use futures_util::sink::SinkExt; +use object_store::ObjectStore; +use snafu::ResultExt; +use store_api::storage::consts; + +use crate::error::{FlushIoSnafu, Result, WriteParquetSnafu}; +use crate::memtable::{BatchIteratorPtr, MemtableSchema}; +use crate::metadata::ColumnMetadata; +use crate::sst; + +/// Parquet sst writer. +pub struct ParquetWriter<'a> { + file_name: &'a str, + iter: BatchIteratorPtr, + object_store: ObjectStore, +} + +impl<'a> ParquetWriter<'a> { + pub fn new( + file_name: &'a str, + iter: BatchIteratorPtr, + object_store: ObjectStore, + ) -> ParquetWriter { + ParquetWriter { + file_name, + iter, + object_store, + } + } + + pub async fn write_sst(self, _opts: sst::WriteOptions) -> Result<()> { + self.write_rows(None).await + } + + /// Iterates memtable and writes rows to Parquet file. + /// A chunk of records yielded from each iteration with a size given + /// in config will be written to a single row group. + async fn write_rows(self, extra_meta: Option<HashMap<String, String>>) -> Result<()> { + let schema = memtable_schema_to_arrow_schema(self.iter.schema()); + let object = self.object_store.object(self.file_name); + + // FIXME(hl): writer size is not used in fs backend so just leave it to 0, + // but in s3/azblob backend the Content-Length field of HTTP request is set + // to this value. + let writer = object.writer(0).await.context(FlushIoSnafu)?; + + // now all physical types use plain encoding, maybe let caller to choose encoding for each type. + let encodings = get_encoding_for_schema(&schema, |_| Encoding::Plain); + + let mut sink = FileSink::try_new( + writer, + schema, + encodings, + WriteOptions { + write_statistics: true, + compression: Compression::Gzip, + version: Version::V2, + }, + ) + .context(WriteParquetSnafu)?; + + for batch in self.iter { + let batch = batch?; + sink.send(Chunk::new( + batch + .keys + .iter() + .map(|v| v.to_arrow_array()) + .chain(std::iter::once(batch.sequences.to_arrow_array())) + .chain(std::iter::once(batch.value_types.to_arrow_array())) + .chain(batch.values.iter().map(|v| v.to_arrow_array())) + .collect(), + )) + .await + .context(WriteParquetSnafu)?; + } + + if let Some(meta) = extra_meta { + for (k, v) in meta { + sink.metadata.insert(k, Some(v)); + } + } + sink.close().await.context(WriteParquetSnafu) + } +} + +/// Assembles arrow schema from memtable schema info. +fn memtable_schema_to_arrow_schema(schema: &MemtableSchema) -> Schema { + let col_meta_to_field: fn(&ColumnMetadata) -> Field = |col_meta| { + Field::from(&ColumnSchema::new( + col_meta.desc.name.clone(), + col_meta.desc.data_type.clone(), + col_meta.desc.is_nullable, + )) + }; + + let fields = schema + .row_key_columns() + .map(col_meta_to_field) + .chain(std::iter::once(Field::from(&ColumnSchema::new( + consts::SEQUENCE_COLUMN_NAME, + ConcreteDataType::uint64_datatype(), + false, + )))) + .chain(std::iter::once(Field::from(&ColumnSchema::new( + consts::VALUE_TYPE_COLUMN_NAME, + ConcreteDataType::uint8_datatype(), + false, + )))) + .chain(schema.value_columns().map(col_meta_to_field)) + .collect::<Vec<_>>(); + Schema::from(fields) +} + +fn get_encoding_for_schema<F: Fn(&DataType) -> Encoding + Clone>( + schema: &Schema, + map: F, +) -> Vec<Encoding> { + schema + .fields + .iter() + .flat_map(|f| transverse(&f.data_type, map.clone())) + .collect() +} + +// TODO(hl): backport from arrow2 v0.12 (https://github.com/jorgecarleitao/arrow2/blob/f57dbd5dbc61b940a71decd5f81d0fd4c93b158d/src/io/parquet/write/mod.rs#L454-L509) +// remove it when upgrade to newer version +pub fn transverse<T, F: Fn(&DataType) -> T + Clone>(data_type: &DataType, map: F) -> Vec<T> { + let mut encodings = vec![]; + transverse_recursive(data_type, map, &mut encodings); + encodings +} + +fn transverse_recursive<T, F: Fn(&DataType) -> T + Clone>( + data_type: &DataType, + map: F, + encodings: &mut Vec<T>, +) { + use datatypes::arrow::datatypes::PhysicalType::*; + match data_type.to_physical_type() { + Null | Boolean | Primitive(_) | Binary | FixedSizeBinary | LargeBinary | Utf8 + | Dictionary(_) | LargeUtf8 => encodings.push(map(data_type)), + List | FixedSizeList | LargeList => { + let a = data_type.to_logical_type(); + if let DataType::List(inner) = a { + transverse_recursive(&inner.data_type, map, encodings) + } else if let DataType::LargeList(inner) = a { + transverse_recursive(&inner.data_type, map, encodings) + } else if let DataType::FixedSizeList(inner, _) = a { + transverse_recursive(&inner.data_type, map, encodings) + } else { + unreachable!() + } + } + Struct => { + if let DataType::Struct(fields) = data_type.to_logical_type() { + for field in fields { + transverse_recursive(&field.data_type, map.clone(), encodings) + } + } else { + unreachable!() + } + } + Union => todo!(), + Map => todo!(), + } +} + +#[cfg(test)] +mod tests { + use std::sync::Arc; + + use datatypes::arrow::array::{Array, Int64Array, UInt64Array, UInt8Array}; + use datatypes::arrow::io::parquet::read::FileReader; + use object_store::backend::fs::Backend; + use store_api::storage::ValueType; + use tempdir::TempDir; + + use super::*; + use crate::memtable::tests as memtable_tests; + use crate::memtable::{DefaultMemtableBuilder, IterContext, MemtableBuilder}; + + #[tokio::test] + async fn test_parquet_writer() { + let schema = memtable_tests::schema_for_test(); + let memtable = DefaultMemtableBuilder {}.build(1, schema); + + memtable_tests::write_kvs( + &*memtable, + 10, // sequence + ValueType::Put, + &[ + (1000, 1), + (1000, 2), + (2002, 1), + (2003, 1), + (2003, 5), + (1001, 1), + ], // keys + &[Some(1), Some(2), Some(7), Some(8), Some(9), Some(3)], // values + ); + + let dir = TempDir::new("write_parquet").unwrap(); + let path = dir.path().to_str().unwrap(); + let backend = Backend::build().root(path).finish().await.unwrap(); + let object_store = ObjectStore::new(backend); + let sst_file_name = "test-flush.parquet"; + let iter = memtable.iter(IterContext::default()).unwrap(); + let writer = ParquetWriter::new(sst_file_name, iter, object_store); + + writer + .write_sst(sst::WriteOptions::default()) + .await + .unwrap(); + + // verify parquet file + + let reader = std::fs::File::open(dir.path().join(sst_file_name)).unwrap(); + let mut file_reader = FileReader::try_new(reader, None, Some(128), None, None).unwrap(); + + // chunk schema: timestamp, __version, __sequence, __value_type, v1 + let chunk = file_reader.next().unwrap().unwrap(); + assert_eq!(5, chunk.arrays().len()); + + assert_eq!( + Arc::new(Int64Array::from_slice(&[ + 1000, 1000, 1001, 2002, 2003, 2003 + ])) as Arc<dyn Array>, + chunk.arrays()[0] + ); + + assert_eq!( + Arc::new(UInt64Array::from_slice(&[1, 2, 1, 1, 1, 5])) as Arc<dyn Array>, + chunk.arrays()[1] + ); + + assert_eq!( + Arc::new(UInt64Array::from_slice(&[10, 10, 10, 10, 10, 10])) as Arc<dyn Array>, + chunk.arrays()[2] + ); + + assert_eq!( + Arc::new(UInt8Array::from_slice(&[0, 0, 0, 0, 0, 0])) as Arc<dyn Array>, + chunk.arrays()[3] + ); + + assert_eq!( + Arc::new(UInt64Array::from_slice(&[1, 2, 3, 7, 8, 9])) as Arc<dyn Array>, + chunk.arrays()[4] + ); + } +} diff --git a/src/storage/src/test_util/descriptor_util.rs b/src/storage/src/test_util/descriptor_util.rs index b16b8aaf13b7..ad221a3e4e4c 100644 --- a/src/storage/src/test_util/descriptor_util.rs +++ b/src/storage/src/test_util/descriptor_util.rs @@ -1,13 +1,14 @@ use datatypes::prelude::ConcreteDataType; use store_api::storage::{ ColumnDescriptor, ColumnDescriptorBuilder, ColumnFamilyDescriptorBuilder, ColumnId, - RegionDescriptor, RowKeyDescriptorBuilder, + RegionDescriptor, RegionId, RowKeyDescriptorBuilder, }; use crate::test_util::{self, schema_util::ColumnDef}; /// A RegionDescriptor builder for test. pub struct RegionDescBuilder { + id: RegionId, name: String, last_column_id: ColumnId, key_builder: RowKeyDescriptorBuilder, @@ -27,6 +28,7 @@ impl RegionDescBuilder { ); Self { + id: 0, name: name.into(), last_column_id: 2, key_builder, @@ -34,6 +36,11 @@ impl RegionDescBuilder { } } + pub fn id(mut self, id: RegionId) -> Self { + self.id = id; + self + } + // This will reset the row key builder, so should be called before `push_key_column()` // and `enable_version_column()`, or just call after `new()`. pub fn timestamp(mut self, column_def: ColumnDef) -> Self { @@ -61,7 +68,7 @@ impl RegionDescBuilder { pub fn build(self) -> RegionDescriptor { RegionDescriptor { - id: 0, + id: self.id, name: self.name, row_key: self.key_builder.build(), default_cf: self.default_cf_builder.build(), diff --git a/src/storage/src/test_util/schema_util.rs b/src/storage/src/test_util/schema_util.rs index 482a90caa198..d99dbb90b740 100644 --- a/src/storage/src/test_util/schema_util.rs +++ b/src/storage/src/test_util/schema_util.rs @@ -6,7 +6,7 @@ use datatypes::schema::{ColumnSchema, Schema, SchemaRef}; /// Column definition: (name, datatype, is_nullable) pub type ColumnDef<'a> = (&'a str, LogicalTypeId, bool); -pub fn new_schema(column_defs: &[ColumnDef]) -> Schema { +pub fn new_schema(column_defs: &[ColumnDef], timestamp_index: Option<usize>) -> Schema { let column_schemas = column_defs .iter() .map(|column_def| { @@ -15,9 +15,13 @@ pub fn new_schema(column_defs: &[ColumnDef]) -> Schema { }) .collect(); - Schema::new(column_schemas) + if let Some(index) = timestamp_index { + Schema::with_timestamp_index(column_schemas, index).unwrap() + } else { + Schema::new(column_schemas) + } } -pub fn new_schema_ref(column_defs: &[ColumnDef]) -> SchemaRef { - Arc::new(new_schema(column_defs)) +pub fn new_schema_ref(column_defs: &[ColumnDef], timestamp_index: Option<usize>) -> SchemaRef { + Arc::new(new_schema(column_defs, timestamp_index)) } diff --git a/src/storage/src/test_util/write_batch_util.rs b/src/storage/src/test_util/write_batch_util.rs index 20f59f0c992e..a594d1138287 100644 --- a/src/storage/src/test_util/write_batch_util.rs +++ b/src/storage/src/test_util/write_batch_util.rs @@ -3,8 +3,8 @@ use store_api::storage::WriteRequest; use crate::test_util::schema_util::{self, ColumnDef}; use crate::write_batch::WriteBatch; -pub fn new_write_batch(column_defs: &[ColumnDef]) -> WriteBatch { - let schema = schema_util::new_schema_ref(column_defs); +pub fn new_write_batch(column_defs: &[ColumnDef], timestamp_index: Option<usize>) -> WriteBatch { + let schema = schema_util::new_schema_ref(column_defs, timestamp_index); WriteBatch::new(schema) } diff --git a/src/storage/src/version.rs b/src/storage/src/version.rs index 6267db5fd4e4..6b5abb4f5e77 100644 --- a/src/storage/src/version.rs +++ b/src/storage/src/version.rs @@ -9,15 +9,26 @@ use std::sync::atomic::{AtomicU64, Ordering}; use std::sync::Arc; +use std::time::Duration; +use store_api::manifest::ManifestVersion; use store_api::storage::{SchemaRef, SequenceNumber}; -use crate::memtable::{MemtableRef, MemtableSet}; +use crate::memtable::{MemtableId, MemtableSchema, MemtableSet, MemtableVersion}; use crate::metadata::{RegionMetadata, RegionMetadataRef}; +use crate::sst::LevelMetas; +use crate::sst::{FileHandle, FileMeta}; use crate::sync::CowCell; +/// Default bucket duration: 2 Hours. +const DEFAULT_BUCKET_DURATION: Duration = Duration::from_secs(3600 * 2); + /// Controls version of in memory state for a region. pub struct VersionControl { + // TODO(yingwen): If all modification to version must acquire the region writer lock first, + // then we may just use ArcSwap to hold version. But some operations may only require the + // version lock, instead of the writer lock, since we can use the version lock the protect + // the read-modify-write of version. version: CowCell<Version>, /// Latest sequence that is committed and visible to user. committed_sequence: AtomicU64, @@ -25,7 +36,7 @@ pub struct VersionControl { impl VersionControl { /// Construct a new version control from `metadata`. - pub fn new(metadata: RegionMetadata, memtables: MemtableSet) -> VersionControl { + pub fn new(metadata: RegionMetadata, memtables: MemtableVersion) -> VersionControl { VersionControl { version: CowCell::new(Version::new(metadata, memtables)), committed_sequence: AtomicU64::new(0), @@ -58,34 +69,91 @@ impl VersionControl { // Release ordering should be enough to guarantee sequence is updated at last. self.committed_sequence.store(value, Ordering::Release); } + + /// Add mutable memtables and commit. + /// + /// # Panics + /// See [MemtableVersion::add_mutable](MemtableVersion::add_mutable). + pub fn add_mutable(&self, memtables_to_add: MemtableSet) { + let mut version_to_update = self.version.lock(); + + let memtable_version = version_to_update.memtables(); + let merged = memtable_version.add_mutable(memtables_to_add); + version_to_update.memtables = Arc::new(merged); + + version_to_update.commit(); + } + + /// Freeze all mutable memtables. + pub fn freeze_mutable(&self) { + let mut version_to_update = self.version.lock(); + + let memtable_version = version_to_update.memtables(); + let freezed = memtable_version.freeze_mutable(); + version_to_update.memtables = Arc::new(freezed); + + version_to_update.commit(); + } + + pub fn apply_edit(&self, edit: VersionEdit) { + let mut version_to_update = self.version.lock(); + + if let Some(max_memtable_id) = edit.max_memtable_id { + // Remove flushed memtables + let memtable_version = version_to_update.memtables(); + let removed = memtable_version.remove_immutables(max_memtable_id); + version_to_update.memtables = Arc::new(removed); + } + + version_to_update.apply_edit(edit); + + version_to_update.commit(); + } +} + +#[derive(Debug)] +pub struct VersionEdit { + pub files_to_add: Vec<FileMeta>, + pub flushed_sequence: Option<SequenceNumber>, + pub manifest_version: ManifestVersion, + pub max_memtable_id: Option<MemtableId>, } pub type VersionControlRef = Arc<VersionControl>; pub type VersionRef = Arc<Version>; - -// Get data from version, need to -// 1. acquire version first -// 2. acquire sequence later -// -// Reason: data may flush and some data with old sequence may be removed, so need -// to acquire version at first. +type MemtableVersionRef = Arc<MemtableVersion>; +type LevelMetasRef = Arc<LevelMetas>; /// Version contains metadata and state of region. +#[derive(Clone)] pub struct Version { - /// Metadata of the region. Altering metadata isn't frequent, storing metadata - /// in Arc to allow sharing metadata and reuse metadata when creating a new - /// `Version`. + /// Metadata of the region. + /// + /// Altering metadata isn't frequent, storing metadata in Arc to allow sharing + /// metadata and reuse metadata when creating a new `Version`. metadata: RegionMetadataRef, - memtables: MemtableSet, - // TODO(yingwen): Also need to store last sequence to this version when switching + /// Mutable and immutable memtables. + /// + /// Wrapped in Arc to make clone of `Version` much cheaper. + memtables: MemtableVersionRef, + /// SSTs of the region. + ssts: LevelMetasRef, + /// Inclusive max sequence of flushed data. + flushed_sequence: SequenceNumber, + /// Current version of manifest. + manifest_version: ManifestVersion, + // TODO(yingwen): Maybe also store last sequence to this version when switching // version, so we can know the newest data can read from this version. } impl Version { - pub fn new(metadata: RegionMetadata, memtables: MemtableSet) -> Version { + pub fn new(metadata: RegionMetadata, memtables: MemtableVersion) -> Version { Version { metadata: Arc::new(metadata), - memtables, + memtables: Arc::new(memtables), + ssts: Arc::new(LevelMetas::new()), + flushed_sequence: 0, + manifest_version: 0, } } @@ -95,15 +163,47 @@ impl Version { } #[inline] - pub fn mutable_memtable(&self) -> &MemtableRef { - self.memtables.mutable_memtable() + pub fn mutable_memtables(&self) -> &MemtableSet { + self.memtables.mutable_memtables() + } + + pub fn memtables(&self) -> &MemtableVersionRef { + &self.memtables + } + + /// Returns duration used to partition the memtables and ssts by time. + pub fn bucket_duration(&self) -> Duration { + DEFAULT_BUCKET_DURATION + } + + #[inline] + pub fn memtable_schema(&self) -> MemtableSchema { + MemtableSchema::new(self.metadata.columns_row_key.clone()) + } + + pub fn apply_edit(&mut self, edit: VersionEdit) { + let flushed_sequence = edit.flushed_sequence.unwrap_or(self.flushed_sequence); + if self.flushed_sequence < flushed_sequence { + self.flushed_sequence = flushed_sequence; + } + if self.manifest_version < edit.manifest_version { + self.manifest_version = edit.manifest_version; + } + let handles_to_add = edit.files_to_add.into_iter().map(FileHandle::new); + let merged_ssts = self.ssts.merge(handles_to_add); + + self.ssts = Arc::new(merged_ssts); + } + + #[inline] + pub fn manifest_version(&self) -> ManifestVersion { + self.manifest_version } } #[cfg(test)] mod tests { use super::*; - use crate::memtable::{DefaultMemtableBuilder, MemtableBuilder, MemtableSchema}; use crate::test_util::descriptor_util::RegionDescBuilder; fn new_version_control() -> VersionControl { @@ -112,11 +212,7 @@ mod tests { .build(); let metadata: RegionMetadata = desc.try_into().unwrap(); - let schema = MemtableSchema::new(metadata.columns_row_key.clone()); - let memtable = DefaultMemtableBuilder {}.build(schema); - let memtable_set = MemtableSet::new(memtable); - - VersionControl::new(metadata, memtable_set) + VersionControl::new(metadata, MemtableVersion::new()) } #[test] diff --git a/src/storage/src/wal.rs b/src/storage/src/wal.rs new file mode 100644 index 000000000000..4994b4e1dd04 --- /dev/null +++ b/src/storage/src/wal.rs @@ -0,0 +1,225 @@ +use std::sync::Arc; + +use common_error::prelude::BoxedError; +use prost::Message; +use snafu::ResultExt; +use store_api::{ + logstore::{entry::Entry, namespace::Namespace, AppendResponse, LogStore}, + storage::SequenceNumber, +}; + +use crate::{ + codec::{Decoder, Encoder}, + error::{self, Error, Result}, + proto::{self, PayloadType, WalHeader}, + write_batch::{codec::WriteBatchArrowEncoder, WriteBatch}, +}; + +pub struct Wal<S: LogStore> { + region_id: u32, + namespace: S::Namespace, + store: Arc<S>, +} + +// wal should be cheap to clone +impl<S: LogStore> Clone for Wal<S> { + fn clone(&self) -> Self { + Self { + region_id: self.region_id, + namespace: self.namespace.clone(), + store: self.store.clone(), + } + } +} + +impl<S: LogStore> Wal<S> { + pub fn new(region_id: u32, region_name: impl Into<String>, store: Arc<S>) -> Self { + let region_name = region_name.into(); + let namespace = S::Namespace::new(&region_name, region_id as u64); + + Self { + region_id, + namespace, + store, + } + } + + #[inline] + pub fn region_id(&self) -> u32 { + self.region_id + } + + #[inline] + pub fn name(&self) -> &str { + self.namespace.name() + } +} + +impl<S: LogStore> Wal<S> { + /// Data format: + /// + /// ```text + /// | | + /// |--------------------------> Header Len <-----------------------------| Arrow/Protobuf/... encoded + /// | | + /// v v + /// +---------------------+----------------------------------------------------+--------------+-------------+--------------+ + /// | | Header | | | | + /// | Header Len(varint) | (last_manifest_version + mutation_extras + ...) | Data Chunk0 | Data Chunk1 | ... | + /// | | | | | | + /// +---------------------+----------------------------------------------------+--------------+-------------+--------------+ + /// ``` + /// + pub async fn write_to_wal<'a>( + &self, + seq: SequenceNumber, + mut header: WalHeader, + payload: Payload<'a>, + ) -> Result<(u64, usize)> { + header.payload_type = payload.payload_type(); + + if let Payload::WriteBatchArrow(batch) = payload { + header.mutation_extras = proto::gen_mutation_extras(batch); + } + + let mut buf = vec![]; + + // header + let wal_header_encoder = WalHeaderEncoder {}; + wal_header_encoder.encode(&header, &mut buf)?; + + if let Payload::WriteBatchArrow(batch) = payload { + // entry + let encoder = WriteBatchArrowEncoder::new(header.mutation_extras); + // TODO(jiachun): provide some way to compute data size before encode, so we can preallocate an exactly sized buf. + encoder + .encode(batch, &mut buf) + .map_err(BoxedError::new) + .context(error::WriteWalSnafu { + region_id: self.region_id(), + name: self.name(), + })?; + } + + // TODO(jiachun): encode protobuf payload + + // write bytes to wal + self.write(seq, &buf).await + } + + async fn write(&self, seq: SequenceNumber, bytes: &[u8]) -> Result<(u64, usize)> { + let ns = self.namespace.clone(); + let mut e = S::Entry::new(bytes); + e.set_id(seq); + + let res = self + .store + .append(ns, e) + .await + .map_err(BoxedError::new) + .context(error::WriteWalSnafu { + region_id: self.region_id(), + name: self.name(), + })?; + + Ok((res.entry_id(), res.offset())) + } +} + +pub enum Payload<'a> { + None, // only header + WriteBatchArrow(&'a WriteBatch), + WriteBatchProto(&'a WriteBatch), +} + +impl<'a> Payload<'a> { + pub fn payload_type(&self) -> i32 { + match self { + Payload::None => PayloadType::None.into(), + Payload::WriteBatchArrow(_) => PayloadType::WriteBatchArrow.into(), + Payload::WriteBatchProto(_) => PayloadType::WriteBatchProto.into(), + } + } +} + +pub struct WalHeaderEncoder {} + +impl Encoder for WalHeaderEncoder { + type Item = WalHeader; + type Error = Error; + + fn encode(&self, item: &WalHeader, dst: &mut Vec<u8>) -> Result<()> { + item.encode_length_delimited(dst) + .map_err(|err| err.into()) + .context(error::EncodeWalHeaderSnafu) + } +} + +pub struct WalHeaderDecoder {} + +impl Decoder for WalHeaderDecoder { + type Item = (usize, WalHeader); + type Error = Error; + + fn decode(&self, src: &[u8]) -> Result<Option<(usize, WalHeader)>> { + let mut data_pos = prost::decode_length_delimiter(src) + .map_err(|err| err.into()) + .context(error::DecodeWalHeaderSnafu)?; + data_pos += prost::length_delimiter_len(data_pos); + + let wal_header = WalHeader::decode_length_delimited(src) + .map_err(|err| err.into()) + .context(error::DecodeWalHeaderSnafu)?; + + Ok(Some((data_pos, wal_header))) + } +} + +#[cfg(test)] +mod tests { + use log_store::test_util; + + use super::*; + + #[tokio::test] + pub async fn test_write_wal() { + let (log_store, _tmp) = + test_util::log_store_util::create_tmp_local_file_log_store("wal_test").await; + let wal = Wal::new(0, "test_region", Arc::new(log_store)); + + let res = wal.write(0, b"test1").await.unwrap(); + + assert_eq!(0, res.0); + assert_eq!(0, res.1); + + let res = wal.write(1, b"test2").await.unwrap(); + + assert_eq!(1, res.0); + assert_eq!(29, res.1); + } + + #[test] + pub fn test_wal_header_codec() { + let wal_header = WalHeader { + payload_type: 1, + last_manifest_version: 99999999, + mutation_extras: vec![], + }; + + let mut buf: Vec<u8> = vec![]; + let wal_encoder = WalHeaderEncoder {}; + wal_encoder.encode(&wal_header, &mut buf).unwrap(); + + buf.push(1u8); // data + buf.push(2u8); // data + buf.push(3u8); // data + + let decoder = WalHeaderDecoder {}; + let res = decoder.decode(&buf).unwrap(); + + assert!(res.is_some()); + + let data_pos = res.unwrap().0; + assert_eq!(buf.len() - 3, data_pos); + } +} diff --git a/src/storage/src/write_batch.rs b/src/storage/src/write_batch.rs index 22e0bf8daadd..69a9aa2781d8 100644 --- a/src/storage/src/write_batch.rs +++ b/src/storage/src/write_batch.rs @@ -1,11 +1,19 @@ -use std::any::Any; -use std::collections::HashMap; -use std::slice; +use std::{ + any::Any, + collections::{BTreeSet, HashMap}, + slice, + time::Duration, +}; use common_error::prelude::*; -use datatypes::data_type::ConcreteDataType; -use datatypes::schema::SchemaRef; -use datatypes::vectors::VectorRef; +use common_time::{RangeMillis, TimestampMillis}; +use datatypes::{ + arrow::error::ArrowError, + data_type::ConcreteDataType, + prelude::ScalarVector, + schema::SchemaRef, + vectors::{Int64Vector, VectorRef}, +}; use snafu::ensure; use store_api::storage::{consts, PutOperation, WriteRequest}; @@ -58,6 +66,42 @@ pub enum Error { num_rows: usize, backtrace: Backtrace, }, + + #[snafu(display("Cannot align timestamp: {}", ts))] + TimestampOverflow { ts: i64 }, + + #[snafu(display("Failed to encode, source: {}", source))] + EncodeArrow { + backtrace: Backtrace, + source: ArrowError, + }, + + #[snafu(display("Failed to decode, source: {}", source))] + DecodeArrow { + backtrace: Backtrace, + source: ArrowError, + }, + + #[snafu(display("Failed to parse schema, source: {}", source))] + ParseSchema { + backtrace: Backtrace, + source: datatypes::error::Error, + }, + + #[snafu(display("Failed to decode, in stream waiting state"))] + StreamWaiting, + + #[snafu(display("Failed to decode, data corruption {}", message))] + DataCorruption { + message: String, + backtrace: Backtrace, + }, + + #[snafu(display("Failed to decode vector, source {}", source))] + DecodeVector { + backtrace: Backtrace, + source: datatypes::error::Error, + }, } pub type Result<T> = std::result::Result<T, Error>; @@ -110,6 +154,57 @@ impl WriteRequest for WriteBatch { Ok(()) } + + /// Aligns timestamps in write batch specified by schema to durations. + /// + /// A negative timestamp means "before Unix epoch". + /// Valid timestamp range is `[i64::MIN + duration, i64::MAX-(i64::MAX%duration))`. + fn time_ranges(&self, duration: Duration) -> Result<Vec<RangeMillis>> { + let ts_col_name = match self.schema.timestamp_column() { + None => { + // write batch does not have a timestamp column + return Ok(Vec::new()); + } + Some(ts_col) => &ts_col.name, + }; + let durations_millis = duration.as_millis() as i64; + let mut aligned_timestamps: BTreeSet<i64> = BTreeSet::new(); + for m in &self.mutations { + match m { + Mutation::Put(put_data) => { + let column = put_data + .column_by_name(ts_col_name) + .unwrap_or_else(|| panic!("Cannot find column by name: {}", ts_col_name)); + + let ts_vector = column.as_any().downcast_ref::<Int64Vector>().unwrap(); // not expected to fail + for ts in ts_vector.iter_data().flatten() { + let aligned = align_timestamp(ts, durations_millis) + .context(TimestampOverflowSnafu { ts })?; + aligned_timestamps.insert(aligned); + } + } + } + } + + let ranges = aligned_timestamps + .iter() + .map(|t| RangeMillis::new(*t, *t + durations_millis).unwrap()) + .collect::<Vec<_>>(); + + Ok(ranges) + } +} + +/// Aligns timestamp to nearest time interval. +/// Negative ts means a timestamp before Unix epoch. +/// If arithmetic overflows, this function returns None. +/// So timestamp within `[i64::MIN, i64::MIN + duration)` or +/// `[i64::MAX-(i64::MAX%duration), i64::MAX]` is not a valid input. +fn align_timestamp(ts: i64, duration: i64) -> Option<i64> { + let aligned = TimestampMillis::new(ts).align_by_bucket(duration)?.as_i64(); + // Also ensure end timestamp won't overflow. + aligned.checked_add(duration)?; + Some(aligned) } // WriteBatch pub methods. @@ -169,6 +264,11 @@ impl PutData { self.columns.get(name) } + /// Returns number of columns in data. + pub fn num_columns(&self) -> usize { + self.columns.len() + } + /// Returns number of rows in data. pub fn num_rows(&self) -> usize { self.columns @@ -184,6 +284,22 @@ impl PutData { pub fn is_empty(&self) -> bool { self.num_rows() == 0 } + + /// Returns slice of [PutData] in range `[start, end)`. + /// + /// # Panics + /// Panics if `start > end`. + pub fn slice(&self, start: usize, end: usize) -> PutData { + assert!(start <= end); + + let columns = self + .columns + .iter() + .map(|(k, v)| (k.clone(), v.slice(start, end - start))) + .collect(); + + PutData { columns } + } } impl WriteBatch { @@ -273,15 +389,253 @@ impl PutData { } } +pub mod codec { + use std::{io::Cursor, sync::Arc}; + + use common_error::prelude::*; + use datatypes::{ + arrow::{ + chunk::Chunk as ArrowChunk, + io::ipc::{ + self, + read::{self, StreamState}, + write::{StreamWriter, WriteOptions}, + }, + }, + error::Result as DataTypesResult, + schema::Schema, + vectors::Helper, + }; + use snafu::ensure; + use store_api::storage::{PutOperation, WriteRequest}; + + use super::{ + DataCorruptionSnafu, DecodeArrowSnafu, DecodeVectorSnafu, EncodeArrowSnafu, + Error as WriteBatchError, Mutation, ParseSchemaSnafu, Result, WriteBatch, + }; + use crate::{ + arrow_stream::ArrowStreamReader, + codec::{Decoder, Encoder}, + }; + use crate::{ + proto::{MutationExtra, MutationType}, + write_batch::PutData, + }; + + // TODO(jiachun): The codec logic is too complex, maybe we should use protobuf to + // serialize/deserialize all our data. + // And we can make a comparison with protobuf, including performance, storage cost, + // CPU consumption, etc + pub struct WriteBatchArrowEncoder { + mutation_extras: Vec<MutationExtra>, + } + + impl WriteBatchArrowEncoder { + pub fn new(mutation_extras: Vec<MutationExtra>) -> Self { + Self { mutation_extras } + } + } + + impl Encoder for WriteBatchArrowEncoder { + type Item = WriteBatch; + type Error = WriteBatchError; + + fn encode(&self, item: &WriteBatch, dst: &mut Vec<u8>) -> Result<()> { + let schema = item.schema().arrow_schema(); + + let column_names = item + .schema() + .column_schemas() + .iter() + .map(|column_schema| column_schema.name.clone()) + .collect::<Vec<_>>(); + + let data = item + .iter() + .zip(self.mutation_extras.iter()) + .map(|(mtn, ext)| match mtn { + Mutation::Put(put) => { + let arrays = column_names + .iter() + .filter_map(|column_name| put.column_by_name(column_name)) + .map(|vector| vector.to_arrow_array()) + .collect::<Vec<_>>(); + + (arrays, &ext.column_null_mask) + } + }); + + let opts = WriteOptions { compression: None }; + let mut writer = StreamWriter::new(dst, opts); + let ipc_fields = ipc::write::default_ipc_fields(&schema.fields); + writer + .start(schema, Some(ipc_fields.clone())) + .context(EncodeArrowSnafu)?; + for (arrays, column_null_mask) in data { + let chunk = ArrowChunk::try_new(arrays).context(EncodeArrowSnafu)?; + if column_null_mask.is_empty() { + writer.write(&chunk, None).context(EncodeArrowSnafu)?; + } else { + let valid_ipc_fields = ipc_fields + .iter() + .zip(bit_vec::BitVec::from_bytes(column_null_mask)) + .filter(|(_, mask)| !*mask) + .map(|(ipc_field, _)| ipc_field.clone()) + .collect::<Vec<_>>(); + writer + .write(&chunk, Some(&valid_ipc_fields)) + .context(EncodeArrowSnafu)?; + } + } + writer.finish().context(EncodeArrowSnafu)?; + + Ok(()) + } + } + + pub struct WriteBatchArrowDecoder { + mutation_extras: Vec<MutationExtra>, + } + + impl WriteBatchArrowDecoder { + #[allow(dead_code)] + pub fn new(mutation_extras: Vec<MutationExtra>) -> Self { + Self { mutation_extras } + } + } + + impl Decoder for WriteBatchArrowDecoder { + type Item = WriteBatch; + type Error = WriteBatchError; + + fn decode(&self, src: &[u8]) -> Result<Option<WriteBatch>> { + let mut reader = Cursor::new(src); + let metadata = read::read_stream_metadata(&mut reader).context(DecodeArrowSnafu)?; + let mut reader = ArrowStreamReader::new(reader, metadata); + let schema = reader.metadata().schema.clone(); + + let stream_states = self + .mutation_extras + .iter() + .map(|ext| { + reader + .maybe_next(&ext.column_null_mask) + .context(DecodeArrowSnafu) + }) + .collect::<Result<Vec<_>>>()?; + + // check if exactly finished + ensure!( + reader.check_exactly_finished().context(DecodeArrowSnafu)?, + DataCorruptionSnafu { + message: "Impossible, the num of data chunks is different than expected." + } + ); + + let mut chunks = Vec::with_capacity(self.mutation_extras.len()); + + for state_opt in stream_states { + match state_opt { + Some(s) => match s { + StreamState::Some(chunk) => chunks.push(chunk), + StreamState::Waiting => return Err(WriteBatchError::StreamWaiting), + }, + None => (), + } + } + + // chunks -> mutations + let chunks = chunks + .iter() + .map(|chunk| chunk.arrays()) + .map(|arrays| { + arrays + .iter() + .map(Helper::try_into_vector) + .collect::<DataTypesResult<Vec<_>>>() + .context(DecodeVectorSnafu) + }) + .collect::<Result<Vec<_>>>()?; + + ensure!( + chunks.len() == self.mutation_extras.len(), + DataCorruptionSnafu { + message: &format!( + "expected {} mutations, but got {}", + self.mutation_extras.len(), + chunks.len() + ) + } + ); + + let schema = Schema::try_from(Arc::new(schema)).context(ParseSchemaSnafu)?; + + let column_names = schema + .column_schemas() + .iter() + .map(|column| column.name.clone()) + .collect::<Vec<_>>(); + + let mutations = self + .mutation_extras + .iter() + .zip(chunks.iter()) + .map(|(ext, mtn)| match ext.mutation_type { + x if x == MutationType::Put as i32 => { + let valid_column_names = if ext.column_null_mask.is_empty() { + column_names.clone() + } else { + bit_vec::BitVec::from_bytes(&ext.column_null_mask) + .iter() + .zip(column_names.iter()) + .filter(|(mask, _)| !*mask) + .map(|(_, column_name)| column_name.clone()) + .collect::<Vec<_>>() + }; + + let mut put_data = PutData::with_num_columns(valid_column_names.len()); + + let res = valid_column_names + .iter() + .zip(mtn) + .map(|(name, vector)| put_data.add_column_by_name(name, vector.clone())) + .collect::<Result<Vec<_>>>(); + + res.map(|_| Mutation::Put(put_data)) + } + x if x == MutationType::Delete as i32 => { + todo!() + } + _ => { + unreachable!() + } + }) + .collect::<Result<Vec<_>>>()?; + + let mut write_batch = WriteBatch::new(Arc::new(schema)); + + mutations + .into_iter() + .try_for_each(|mutation| match mutation { + Mutation::Put(put_data) => write_batch.put(put_data), + })?; + + Ok(Some(write_batch)) + } + } +} + #[cfg(test)] mod tests { use std::iter; use std::sync::Arc; use datatypes::type_id::LogicalTypeId; - use datatypes::vectors::{BooleanVector, Int32Vector, UInt64Vector}; + use datatypes::vectors::{BooleanVector, Int32Vector, Int64Vector, UInt64Vector}; use super::*; + use crate::codec::{Decoder, Encoder}; + use crate::proto; use crate::test_util::write_batch_util; #[test] @@ -320,22 +674,28 @@ mod tests { } fn new_test_batch() -> WriteBatch { - write_batch_util::new_write_batch(&[ - ("k1", LogicalTypeId::UInt64, false), - (consts::VERSION_COLUMN_NAME, LogicalTypeId::UInt64, false), - ("v1", LogicalTypeId::Boolean, true), - ]) + write_batch_util::new_write_batch( + &[ + ("k1", LogicalTypeId::UInt64, false), + (consts::VERSION_COLUMN_NAME, LogicalTypeId::UInt64, false), + ("ts", LogicalTypeId::Int64, false), + ("v1", LogicalTypeId::Boolean, true), + ], + Some(2), + ) } #[test] fn test_write_batch_put() { let intv = Arc::new(UInt64Vector::from_slice(&[1, 2, 3])); let boolv = Arc::new(BooleanVector::from(vec![true, false, true])); + let tsv = Arc::new(Int64Vector::from_vec(vec![0, 0, 0])); let mut put_data = PutData::new(); put_data.add_key_column("k1", intv.clone()).unwrap(); put_data.add_version_column(intv).unwrap(); put_data.add_value_column("v1", boolv).unwrap(); + put_data.add_key_column("ts", tsv).unwrap(); let mut batch = new_test_batch(); assert!(batch.is_empty()); @@ -362,7 +722,8 @@ mod tests { let mut put_data = PutData::new(); put_data.add_key_column("k1", boolv).unwrap(); - let mut batch = write_batch_util::new_write_batch(&[("k1", LogicalTypeId::Boolean, false)]); + let mut batch = + write_batch_util::new_write_batch(&[("k1", LogicalTypeId::Boolean, false)], None); let err = batch.put(put_data).err().unwrap(); check_err(err, "Request is too large"); } @@ -391,9 +752,11 @@ mod tests { #[test] fn test_put_type_mismatch() { let boolv = Arc::new(BooleanVector::from(vec![true, false, true])); + let tsv = Arc::new(Int64Vector::from_vec(vec![0, 0, 0])); let mut put_data = PutData::new(); put_data.add_key_column("k1", boolv).unwrap(); + put_data.add_key_column("ts", tsv).unwrap(); let mut batch = new_test_batch(); let err = batch.put(put_data).err().unwrap(); @@ -403,9 +766,11 @@ mod tests { #[test] fn test_put_type_has_null() { let intv = Arc::new(UInt64Vector::from_iter(&[Some(1), None, Some(3)])); + let tsv = Arc::new(Int64Vector::from_vec(vec![0, 0, 0])); let mut put_data = PutData::new(); put_data.add_key_column("k1", intv).unwrap(); + put_data.add_key_column("ts", tsv).unwrap(); let mut batch = new_test_batch(); let err = batch.put(put_data).err().unwrap(); @@ -415,10 +780,11 @@ mod tests { #[test] fn test_put_missing_column() { let boolv = Arc::new(BooleanVector::from(vec![true, false, true])); + let tsv = Arc::new(Int64Vector::from_vec(vec![0, 0, 0])); let mut put_data = PutData::new(); put_data.add_key_column("v1", boolv).unwrap(); - + put_data.add_key_column("ts", tsv).unwrap(); let mut batch = new_test_batch(); let err = batch.put(put_data).err().unwrap(); check_err(err, "Missing column k1"); @@ -427,16 +793,125 @@ mod tests { #[test] fn test_put_unknown_column() { let intv = Arc::new(UInt64Vector::from_slice(&[1, 2, 3])); + let tsv = Arc::new(Int64Vector::from_vec(vec![0, 0, 0])); let boolv = Arc::new(BooleanVector::from(vec![true, false, true])); let mut put_data = PutData::new(); put_data.add_key_column("k1", intv.clone()).unwrap(); put_data.add_version_column(intv).unwrap(); put_data.add_value_column("v1", boolv.clone()).unwrap(); + put_data.add_key_column("ts", tsv).unwrap(); put_data.add_value_column("v2", boolv).unwrap(); - let mut batch = new_test_batch(); let err = batch.put(put_data).err().unwrap(); check_err(err, "Unknown column v2"); } + + #[test] + pub fn test_align_timestamp() { + let duration_millis = 20; + let ts = [-21, -20, -19, -1, 0, 5, 15, 19, 20, 21]; + let res = ts.map(|t| align_timestamp(t, duration_millis)); + assert_eq!(res, [-40, -20, -20, -20, 0, 0, 0, 0, 20, 20].map(Some)); + } + + #[test] + pub fn test_align_timestamp_overflow() { + assert_eq!(Some(i64::MIN), align_timestamp(i64::MIN, 1)); + assert_eq!(None, align_timestamp(i64::MIN, 2)); + assert_eq!( + Some(((i64::MIN + 20) / 20 - 1) * 20), + align_timestamp(i64::MIN + 20, 20) + ); + assert_eq!(None, align_timestamp(i64::MAX - (i64::MAX % 23), 23)); + assert_eq!( + Some(9223372036854775780), + align_timestamp(i64::MAX / 20 * 20 - 1, 20) + ); + } + + #[test] + pub fn test_write_batch_time_range() { + let intv = Arc::new(UInt64Vector::from_slice(&[1, 2, 3, 4, 5, 6])); + let tsv = Arc::new(Int64Vector::from_vec(vec![-21, -20, -1, 0, 1, 20])); + let boolv = Arc::new(BooleanVector::from(vec![ + true, false, true, false, false, false, + ])); + + let mut put_data = PutData::new(); + put_data.add_key_column("k1", intv.clone()).unwrap(); + put_data.add_version_column(intv).unwrap(); + put_data.add_value_column("v1", boolv).unwrap(); + put_data.add_key_column("ts", tsv).unwrap(); + + let mut batch = new_test_batch(); + batch.put(put_data).unwrap(); + + let duration_millis = 20i64; + let ranges = batch + .time_ranges(Duration::from_millis(duration_millis as u64)) + .unwrap(); + assert_eq!( + [-40, -20, 0, 20].map(|v| RangeMillis::new(v, v + duration_millis).unwrap()), + ranges.as_slice() + ) + } + + #[test] + fn test_codec() -> Result<()> { + let intv = Arc::new(UInt64Vector::from_slice(&[1, 2, 3])); + let boolv = Arc::new(BooleanVector::from(vec![Some(true), Some(false), None])); + let tsv = Arc::new(Int64Vector::from_vec(vec![0, 0, 0])); + + let mut put_data = PutData::new(); + put_data.add_key_column("k1", intv.clone()).unwrap(); + put_data.add_version_column(intv).unwrap(); + put_data.add_value_column("v1", boolv).unwrap(); + put_data.add_key_column("ts", tsv).unwrap(); + + let mut batch = new_test_batch(); + assert!(batch.is_empty()); + batch.put(put_data).unwrap(); + assert!(!batch.is_empty()); + + let encoder = codec::WriteBatchArrowEncoder::new(proto::gen_mutation_extras(&batch)); + let mut dst = vec![]; + let result = encoder.encode(&batch, &mut dst); + assert!(result.is_ok()); + + let decoder = codec::WriteBatchArrowDecoder::new(proto::gen_mutation_extras(&batch)); + let result = decoder.decode(&dst); + let batch2 = result?.unwrap(); + assert_eq!(batch.num_rows, batch2.num_rows); + + Ok(()) + } + + #[test] + fn test_codec_with_none_column() -> Result<()> { + let intv = Arc::new(UInt64Vector::from_slice(&[1, 2, 3])); + let tsv = Arc::new(Int64Vector::from_vec(vec![0, 0, 0])); + + let mut put_data = PutData::new(); + put_data.add_key_column("k1", intv.clone()).unwrap(); + put_data.add_version_column(intv).unwrap(); + put_data.add_key_column("ts", tsv).unwrap(); + + let mut batch = new_test_batch(); + assert!(batch.is_empty()); + batch.put(put_data).unwrap(); + assert!(!batch.is_empty()); + + let encoder = codec::WriteBatchArrowEncoder::new(proto::gen_mutation_extras(&batch)); + let mut dst = vec![]; + let result = encoder.encode(&batch, &mut dst); + assert!(result.is_ok()); + + let decoder = codec::WriteBatchArrowDecoder::new(proto::gen_mutation_extras(&batch)); + let result = decoder.decode(&dst); + let batch2 = result?.unwrap(); + assert_eq!(batch.num_rows, batch2.num_rows); + + Ok(()) + } } diff --git a/src/store-api/Cargo.toml b/src/store-api/Cargo.toml index 4c1ba3846b01..e2bb64282af3 100644 --- a/src/store-api/Cargo.toml +++ b/src/store-api/Cargo.toml @@ -10,8 +10,11 @@ async-trait = "0.1" bytes = "1.1" common-base = { path = "../common/base" } common-error = { path = "../common/error" } +common-time = { path = "../common/time" } datatypes = { path = "../datatypes" } futures = "0.3" +object-store = { path = "../object-store" } +serde = { version = "1.0", features = ["derive"] } snafu = { version = "0.7", features = ["backtraces"] } [dev-dependencies] diff --git a/src/store-api/src/lib.rs b/src/store-api/src/lib.rs index d1efe4c28041..2ab02d5077a8 100644 --- a/src/store-api/src/lib.rs +++ b/src/store-api/src/lib.rs @@ -1,4 +1,5 @@ //! Storage related APIs pub mod logstore; +pub mod manifest; pub mod storage; diff --git a/src/store-api/src/logstore.rs b/src/store-api/src/logstore.rs index b0992e684fe0..af1f874922a5 100644 --- a/src/store-api/src/logstore.rs +++ b/src/store-api/src/logstore.rs @@ -12,8 +12,8 @@ pub mod namespace; /// `LogStore` serves as a Write-Ahead-Log for storage engine. #[async_trait::async_trait] -pub trait LogStore { - type Error: ErrorExt + Send + Sync; +pub trait LogStore: Send + Sync + 'static { + type Error: ErrorExt + Send + Sync + 'static; type Namespace: Namespace; type Entry: Entry; type AppendResponse: AppendResponse; diff --git a/src/store-api/src/logstore/namespace.rs b/src/store-api/src/logstore/namespace.rs index 9d1f7b3f9405..1b1919c7c0f7 100644 --- a/src/store-api/src/logstore/namespace.rs +++ b/src/store-api/src/logstore/namespace.rs @@ -1,3 +1,5 @@ pub trait Namespace: Send + Sync + Clone { + fn new(name: &str, id: u64) -> Self; + fn name(&self) -> &str; } diff --git a/src/store-api/src/manifest.rs b/src/store-api/src/manifest.rs new file mode 100644 index 000000000000..b154f38cee8d --- /dev/null +++ b/src/store-api/src/manifest.rs @@ -0,0 +1,45 @@ +//! metadata service +mod storage; + +use async_trait::async_trait; +use common_error::ext::ErrorExt; +use object_store::ObjectStore; +use serde::{de::DeserializeOwned, Serialize}; +pub use storage::*; + +pub type ManifestVersion = u64; +pub const MIN_VERSION: u64 = 0; +pub const MAX_VERSION: u64 = u64::MAX; + +pub trait Metadata: Clone {} + +pub trait MetadataId: Clone + Copy {} + +/// The action to apply on metadata +pub trait MetaAction: Serialize + DeserializeOwned { + type MetadataId: MetadataId; + + /// Returns the metadata id of the action + fn metadata_id(&self) -> Self::MetadataId; +} + +/// Manifest service +#[async_trait] +pub trait Manifest: Send + Sync + Clone + 'static { + type Error: ErrorExt + Send + Sync; + type MetaAction: MetaAction; + type MetadataId: MetadataId; + type Metadata: Metadata; + + fn new(id: Self::MetadataId, manifest_dir: &str, object_store: ObjectStore) -> Self; + + /// Update metadata by the action + async fn update(&self, action: Self::MetaAction) -> Result<ManifestVersion, Self::Error>; + + /// Retrieve the latest metadata + async fn load(&self) -> Result<Option<Self::Metadata>, Self::Error>; + + async fn checkpoint(&self) -> Result<ManifestVersion, Self::Error>; + + fn metadata_id(&self) -> Self::MetadataId; +} diff --git a/src/store-api/src/manifest/storage.rs b/src/store-api/src/manifest/storage.rs new file mode 100644 index 000000000000..4ac7bec50ce5 --- /dev/null +++ b/src/store-api/src/manifest/storage.rs @@ -0,0 +1,41 @@ +use async_trait::async_trait; +use common_error::ext::ErrorExt; + +use crate::manifest::ManifestVersion; + +#[async_trait] +pub trait LogIterator: Send + Sync { + type Error: ErrorExt + Send + Sync; + + async fn next_log(&mut self) -> Result<Option<(ManifestVersion, Vec<u8>)>, Self::Error>; +} + +#[async_trait] +pub trait ManifestLogStorage { + type Error: ErrorExt + Send + Sync; + type Iter: LogIterator<Error = Self::Error>; + + /// Scan the logs in [start, end) + async fn scan( + &self, + start: ManifestVersion, + end: ManifestVersion, + ) -> Result<Self::Iter, Self::Error>; + + /// Save a log + async fn save(&self, version: ManifestVersion, bytes: &[u8]) -> Result<(), Self::Error>; + + /// Delete logs in [start, end) + async fn delete(&self, start: ManifestVersion, end: ManifestVersion) + -> Result<(), Self::Error>; + + /// Save a checkpoint + async fn save_checkpoint( + &self, + version: ManifestVersion, + bytes: &[u8], + ) -> Result<(), Self::Error>; + + /// Load the latest checkpoint + async fn load_checkpoint(&self) -> Result<Option<(ManifestVersion, Vec<u8>)>, Self::Error>; +} diff --git a/src/store-api/src/storage/consts.rs b/src/store-api/src/storage/consts.rs index 5863d3cb8f1d..54b2e8623692 100644 --- a/src/store-api/src/storage/consts.rs +++ b/src/store-api/src/storage/consts.rs @@ -29,6 +29,12 @@ pub const VERSION_COLUMN_NAME: &str = "__version"; // Names for default column family. pub const DEFAULT_CF_NAME: &str = "default"; +// Name for reserved column: sequence +pub const SEQUENCE_COLUMN_NAME: &str = "__sequence"; + +// Name for reserved column: value_type +pub const VALUE_TYPE_COLUMN_NAME: &str = "__value_type"; + // ----------------------------------------------------------------------------- // ---------- Default options -------------------------------------------------- diff --git a/src/store-api/src/storage/descriptors.rs b/src/store-api/src/storage/descriptors.rs index c10e8b81b636..6fbe5910db05 100644 --- a/src/store-api/src/storage/descriptors.rs +++ b/src/store-api/src/storage/descriptors.rs @@ -1,5 +1,7 @@ use datatypes::value::Value; +use serde::{Deserialize, Serialize}; +use crate::manifest::MetadataId; use crate::storage::{consts, ColumnSchema, ConcreteDataType}; /// Id of column, unique in each region. @@ -7,6 +9,7 @@ pub type ColumnId = u32; /// Id of column family, unique in each region. pub type ColumnFamilyId = u32; pub type RegionId = u32; +impl MetadataId for RegionId {} /// Default region name prefix pub const REGION_PREFIX: &str = "r_"; @@ -17,7 +20,7 @@ pub fn gen_region_name(id: RegionId) -> String { // TODO(yingwen): Validate default value has same type with column, and name is a valid column name. /// A [ColumnDescriptor] contains information to create a column. -#[derive(Debug, Clone, PartialEq)] +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct ColumnDescriptor { pub id: ColumnId, pub name: String, @@ -131,7 +134,7 @@ impl RowKeyDescriptorBuilder { Self { columns: Vec::new(), timestamp, - enable_version_column: true, + enable_version_column: false, } } @@ -254,7 +257,7 @@ mod tests { let desc = RowKeyDescriptorBuilder::new(timestamp.clone()).build(); assert!(desc.columns.is_empty()); - assert!(desc.enable_version_column); + assert!(!desc.enable_version_column); let desc = RowKeyDescriptorBuilder::new(timestamp.clone()) .columns_capacity(1) @@ -266,7 +269,7 @@ mod tests { ) .build(); assert_eq!(2, desc.columns.len()); - assert!(desc.enable_version_column); + assert!(!desc.enable_version_column); let desc = RowKeyDescriptorBuilder::new(timestamp) .enable_version_column(false) diff --git a/src/store-api/src/storage/requests.rs b/src/store-api/src/storage/requests.rs index 8d50f8aede6f..e328906a43d0 100644 --- a/src/store-api/src/storage/requests.rs +++ b/src/store-api/src/storage/requests.rs @@ -1,4 +1,7 @@ +use std::time::Duration; + use common_error::ext::ErrorExt; +use common_time::RangeMillis; use datatypes::schema::SchemaRef; use datatypes::vectors::VectorRef; @@ -12,6 +15,11 @@ pub trait WriteRequest: Send { fn new(schema: SchemaRef) -> Self; fn put(&mut self, put: Self::PutOp) -> Result<(), Self::Error>; + + /// Returns all possible time ranges that contain the timestamp in this batch. + /// + /// Each time range is aligned to given `duration`. + fn time_ranges(&self, duration: Duration) -> Result<Vec<RangeMillis>, Self::Error>; } /// Put multiple rows. diff --git a/src/table-engine/Cargo.toml b/src/table-engine/Cargo.toml index 0f3872894240..a35e275a9091 100644 --- a/src/table-engine/Cargo.toml +++ b/src/table-engine/Cargo.toml @@ -14,6 +14,7 @@ common-telemetry = {path = "../common/telemetry" } datafusion-common = { git = "https://github.com/apache/arrow-datafusion.git" , branch = "arrow2"} datatypes = { path = "../datatypes" } futures = "0.3" +log-store = { path = "../log-store" } snafu = { version = "0.7", features = ["backtraces"] } storage ={ path = "../storage" } store-api ={ path = "../store-api" } @@ -21,4 +22,5 @@ table = { path = "../table" } [dev-dependencies] datatypes = { path = "../datatypes" } -tokio = { version = "1.18", features = ["full"] } \ No newline at end of file +tempdir = "0.3" +tokio = { version = "1.18", features = ["full"] } diff --git a/src/table-engine/src/engine.rs b/src/table-engine/src/engine.rs index a22f7251f19b..fba67bc597d9 100644 --- a/src/table-engine/src/engine.rs +++ b/src/table-engine/src/engine.rs @@ -194,8 +194,8 @@ mod tests { use crate::table::test; #[tokio::test] - async fn test_creat_table_insert_scan() { - let (_engine, table, schema) = test::setup_test_engine_and_table().await; + async fn test_create_table_insert_scan() { + let (_engine, table, schema, _dir) = test::setup_test_engine_and_table().await; assert_eq!(TableType::Base, table.table_type()); assert_eq!(schema, table.schema()); diff --git a/src/table-engine/src/table/test.rs b/src/table-engine/src/table/test.rs index b0793aa0820d..418de6c2db1a 100644 --- a/src/table-engine/src/table/test.rs +++ b/src/table-engine/src/table/test.rs @@ -3,14 +3,23 @@ use std::sync::Arc; use datatypes::prelude::ConcreteDataType; use datatypes::schema::SchemaRef; use datatypes::schema::{ColumnSchema, Schema}; +use log_store::fs::noop::NoopLogStore; +use storage::config::EngineConfig; use storage::EngineImpl; -use table::engine::{EngineContext, TableEngine}; +use table::engine::EngineContext; +use table::engine::TableEngine; use table::requests::CreateTableRequest; use table::TableRef; +use tempdir::TempDir; use crate::engine::MitoEngine; -pub async fn setup_test_engine_and_table() -> (MitoEngine<EngineImpl>, TableRef, SchemaRef) { +pub async fn setup_test_engine_and_table() -> ( + MitoEngine<EngineImpl<NoopLogStore>>, + TableRef, + SchemaRef, + TempDir, +) { let column_schemas = vec![ ColumnSchema::new("host", ConcreteDataType::string_datatype(), false), ColumnSchema::new("ts", ConcreteDataType::int64_datatype(), true), @@ -18,10 +27,22 @@ pub async fn setup_test_engine_and_table() -> (MitoEngine<EngineImpl>, TableRef, ColumnSchema::new("memory", ConcreteDataType::float64_datatype(), true), ]; - let table_engine = MitoEngine::<EngineImpl>::new(EngineImpl::new()); + let dir = TempDir::new("setup_test_engine_and_table").unwrap(); + let store_dir = dir.path().to_string_lossy(); + + let table_engine = MitoEngine::<EngineImpl<NoopLogStore>>::new( + EngineImpl::new( + EngineConfig::with_store_dir(&store_dir), + Arc::new(NoopLogStore::default()), + ) + .await + .unwrap(), + ); let table_name = "demo"; - let schema = Arc::new(Schema::new(column_schemas)); + let schema = Arc::new( + Schema::with_timestamp_index(column_schemas, 1).expect("ts must be timestamp column"), + ); let table = table_engine .create_table( &EngineContext::default(), @@ -34,5 +55,5 @@ pub async fn setup_test_engine_and_table() -> (MitoEngine<EngineImpl>, TableRef, .await .unwrap(); - (table_engine, table, schema) + (table_engine, table, schema, dir) }
feat
Prototype of the storage engine (#107)
a2b262ebc0d6c4002d875af2265eebc09d6dc965
2023-03-31 16:07:52
localhost
chore: add http metrics server in datanode node when greptime start in distributed mode (#1256)
false
diff --git a/Cargo.lock b/Cargo.lock index 29bdceace77f..af62adce236b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4136,6 +4136,7 @@ dependencies = [ "regex", "serde", "serde_json", + "servers", "snafu", "table", "tokio", diff --git a/src/cmd/src/datanode.rs b/src/cmd/src/datanode.rs index 27492982f8d3..f7ae79911a77 100644 --- a/src/cmd/src/datanode.rs +++ b/src/cmd/src/datanode.rs @@ -12,6 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. +use std::time::Duration; + use clap::Parser; use common_telemetry::logging; use datanode::datanode::{ @@ -86,6 +88,10 @@ struct StartCommand { wal_dir: Option<String>, #[clap(long)] procedure_dir: Option<String>, + #[clap(long)] + http_addr: Option<String>, + #[clap(long)] + http_timeout: Option<u64>, } impl StartCommand { @@ -155,6 +161,12 @@ impl TryFrom<StartCommand> for DatanodeOptions { if let Some(procedure_dir) = cmd.procedure_dir { opts.procedure = Some(ProcedureConfig::from_file_path(procedure_dir)); } + if let Some(http_addr) = cmd.http_addr { + opts.http_opts.addr = http_addr + } + if let Some(http_timeout) = cmd.http_timeout { + opts.http_opts.timeout = Duration::from_secs(http_timeout) + } Ok(opts) } diff --git a/src/cmd/src/metasrv.rs b/src/cmd/src/metasrv.rs index 7dc801a59768..d9170066bc70 100644 --- a/src/cmd/src/metasrv.rs +++ b/src/cmd/src/metasrv.rs @@ -12,6 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. +use std::time::Duration; + use clap::Parser; use common_telemetry::{info, logging, warn}; use meta_srv::bootstrap::MetaSrvInstance; @@ -80,6 +82,10 @@ struct StartCommand { selector: Option<String>, #[clap(long)] use_memory_store: bool, + #[clap(long)] + http_addr: Option<String>, + #[clap(long)] + http_timeout: Option<u64>, } impl StartCommand { @@ -128,6 +134,13 @@ impl TryFrom<StartCommand> for MetaSrvOptions { opts.use_memory_store = true; } + if let Some(http_addr) = cmd.http_addr { + opts.http_opts.addr = http_addr; + } + if let Some(http_timeout) = cmd.http_timeout { + opts.http_opts.timeout = Duration::from_secs(http_timeout); + } + Ok(opts) } } @@ -150,6 +163,8 @@ mod tests { config_file: None, selector: Some("LoadBased".to_string()), use_memory_store: false, + http_addr: None, + http_timeout: None, }; let options: MetaSrvOptions = cmd.try_into().unwrap(); assert_eq!("127.0.0.1:3002".to_string(), options.bind_addr); @@ -178,6 +193,8 @@ mod tests { selector: None, config_file: Some(file.path().to_str().unwrap().to_string()), use_memory_store: false, + http_addr: None, + http_timeout: None, }; let options: MetaSrvOptions = cmd.try_into().unwrap(); assert_eq!("127.0.0.1:3002".to_string(), options.bind_addr); diff --git a/src/common/runtime/src/lib.rs b/src/common/runtime/src/lib.rs index 2a5b3a6b6516..06eb1139d28d 100644 --- a/src/common/runtime/src/lib.rs +++ b/src/common/runtime/src/lib.rs @@ -14,7 +14,7 @@ pub mod error; mod global; -pub mod metric; +mod metrics; mod repeated_task; pub mod runtime; diff --git a/src/common/runtime/src/metric.rs b/src/common/runtime/src/metrics.rs similarity index 100% rename from src/common/runtime/src/metric.rs rename to src/common/runtime/src/metrics.rs diff --git a/src/common/runtime/src/runtime.rs b/src/common/runtime/src/runtime.rs index a16c4c4c3c53..2caabc327bb9 100644 --- a/src/common/runtime/src/runtime.rs +++ b/src/common/runtime/src/runtime.rs @@ -24,7 +24,7 @@ use tokio::sync::oneshot; pub use tokio::task::{JoinError, JoinHandle}; use crate::error::*; -use crate::metric::*; +use crate::metrics::*; /// A runtime to run future tasks #[derive(Clone, Debug)] diff --git a/src/common/telemetry/Cargo.toml b/src/common/telemetry/Cargo.toml index c139580dc519..29dc94d4b2a4 100644 --- a/src/common/telemetry/Cargo.toml +++ b/src/common/telemetry/Cargo.toml @@ -12,7 +12,7 @@ deadlock_detection = ["parking_lot"] backtrace = "0.3" common-error = { path = "../error" } console-subscriber = { version = "0.1", optional = true } -metrics = "0.20" +metrics = "0.20.1" metrics-exporter-prometheus = { version = "0.11", default-features = false } once_cell = "1.10" opentelemetry = { version = "0.17", default-features = false, features = [ diff --git a/src/datanode/src/datanode.rs b/src/datanode/src/datanode.rs index 17f199826557..482ec882c5d1 100644 --- a/src/datanode/src/datanode.rs +++ b/src/datanode/src/datanode.rs @@ -19,6 +19,7 @@ use common_base::readable_size::ReadableSize; use common_telemetry::info; use meta_client::MetaClientOptions; use serde::{Deserialize, Serialize}; +use servers::http::HttpOptions; use servers::Mode; use storage::config::EngineConfig as StorageEngineConfig; use storage::scheduler::SchedulerConfig; @@ -224,6 +225,7 @@ pub struct DatanodeOptions { pub rpc_runtime_size: usize, pub mysql_addr: String, pub mysql_runtime_size: usize, + pub http_opts: HttpOptions, pub meta_client_options: Option<MetaClientOptions>, pub wal: WalConfig, pub storage: StorageConfig, @@ -241,6 +243,7 @@ impl Default for DatanodeOptions { rpc_runtime_size: 8, mysql_addr: "127.0.0.1:4406".to_string(), mysql_runtime_size: 2, + http_opts: HttpOptions::default(), meta_client_options: None, wal: WalConfig::default(), storage: StorageConfig::default(), @@ -252,14 +255,17 @@ impl Default for DatanodeOptions { /// Datanode service. pub struct Datanode { opts: DatanodeOptions, - services: Services, + services: Option<Services>, instance: InstanceRef, } impl Datanode { pub async fn new(opts: DatanodeOptions) -> Result<Datanode> { let instance = Arc::new(Instance::new(&opts).await?); - let services = Services::try_new(instance.clone(), &opts).await?; + let services = match opts.mode { + Mode::Distributed => Some(Services::try_new(instance.clone(), &opts).await?), + Mode::Standalone => None, + }; Ok(Self { opts, services, @@ -280,7 +286,11 @@ impl Datanode { /// Start services of datanode. This method call will block until services are shutdown. pub async fn start_services(&mut self) -> Result<()> { - self.services.start(&self.opts).await + if let Some(service) = self.services.as_mut() { + service.start(&self.opts).await + } else { + Ok(()) + } } pub fn get_instance(&self) -> InstanceRef { @@ -292,7 +302,11 @@ impl Datanode { } async fn shutdown_services(&self) -> Result<()> { - self.services.shutdown().await + if let Some(service) = self.services.as_ref() { + service.shutdown().await + } else { + Ok(()) + } } pub async fn shutdown(&self) -> Result<()> { diff --git a/src/datanode/src/instance/script.rs b/src/datanode/src/instance/script.rs index fc7757a365de..d3eb5cb29f1e 100644 --- a/src/datanode/src/instance/script.rs +++ b/src/datanode/src/instance/script.rs @@ -20,7 +20,7 @@ use common_telemetry::timer; use servers::query_handler::ScriptHandler; use crate::instance::Instance; -use crate::metric; +use crate::metrics; #[async_trait] impl ScriptHandler for Instance { @@ -30,7 +30,7 @@ impl ScriptHandler for Instance { name: &str, script: &str, ) -> servers::error::Result<()> { - let _timer = timer!(metric::METRIC_HANDLE_SCRIPTS_ELAPSED); + let _timer = timer!(metrics::METRIC_HANDLE_SCRIPTS_ELAPSED); self.script_executor .insert_script(schema, name, script) .await @@ -42,7 +42,7 @@ impl ScriptHandler for Instance { name: &str, params: HashMap<String, String>, ) -> servers::error::Result<Output> { - let _timer = timer!(metric::METRIC_RUN_SCRIPT_ELAPSED); + let _timer = timer!(metrics::METRIC_RUN_SCRIPT_ELAPSED); self.script_executor .execute_script(schema, name, params) .await diff --git a/src/datanode/src/instance/sql.rs b/src/datanode/src/instance/sql.rs index 7b615c86b45c..1cfa061064a4 100644 --- a/src/datanode/src/instance/sql.rs +++ b/src/datanode/src/instance/sql.rs @@ -37,7 +37,7 @@ use crate::error::{ TableIdProviderNotFoundSnafu, }; use crate::instance::Instance; -use crate::metric; +use crate::metrics; use crate::sql::{SqlHandler, SqlRequest}; impl Instance { @@ -190,7 +190,7 @@ impl Instance { promql: &PromQuery, query_ctx: QueryContextRef, ) -> Result<Output> { - let _timer = timer!(metric::METRIC_HANDLE_PROMQL_ELAPSED); + let _timer = timer!(metrics::METRIC_HANDLE_PROMQL_ELAPSED); let stmt = QueryLanguageParser::parse_promql(promql).context(ExecuteSqlSnafu)?; @@ -294,7 +294,7 @@ impl StatementHandler for Instance { #[async_trait] impl PromHandler for Instance { async fn do_query(&self, query: &PromQuery) -> server_error::Result<Output> { - let _timer = timer!(metric::METRIC_HANDLE_PROMQL_ELAPSED); + let _timer = timer!(metrics::METRIC_HANDLE_PROMQL_ELAPSED); self.execute_promql(query, QueryContext::arc()) .await diff --git a/src/datanode/src/lib.rs b/src/datanode/src/lib.rs index 6acd1fd94a58..862a8c84780a 100644 --- a/src/datanode/src/lib.rs +++ b/src/datanode/src/lib.rs @@ -19,7 +19,7 @@ pub mod datanode; pub mod error; mod heartbeat; pub mod instance; -pub mod metric; +pub mod metrics; mod mock; mod script; pub mod server; diff --git a/src/datanode/src/metric.rs b/src/datanode/src/metrics.rs similarity index 100% rename from src/datanode/src/metric.rs rename to src/datanode/src/metrics.rs diff --git a/src/datanode/src/server.rs b/src/datanode/src/server.rs index 2417625b0ea3..533f9daa117c 100644 --- a/src/datanode/src/server.rs +++ b/src/datanode/src/server.rs @@ -18,9 +18,12 @@ use std::sync::Arc; use common_runtime::Builder as RuntimeBuilder; use servers::grpc::GrpcServer; +use servers::http::{HttpServer, HttpServerBuilder}; +use servers::metrics_handler::MetricsHandler; use servers::query_handler::grpc::ServerGrpcQueryHandlerAdaptor; use servers::server::Server; use snafu::ResultExt; +use tokio::select; use crate::datanode::DatanodeOptions; use crate::error::{ @@ -33,6 +36,7 @@ pub mod grpc; /// All rpc services. pub struct Services { grpc_server: GrpcServer, + http_server: HttpServer, } impl Services { @@ -51,6 +55,9 @@ impl Services { None, grpc_runtime, ), + http_server: HttpServerBuilder::new(opts.http_opts.clone()) + .with_metrics_handler(MetricsHandler) + .build(), }) } @@ -58,10 +65,15 @@ impl Services { let grpc_addr: SocketAddr = opts.rpc_addr.parse().context(ParseAddrSnafu { addr: &opts.rpc_addr, })?; - self.grpc_server - .start(grpc_addr) - .await - .context(StartServerSnafu)?; + let http_addr = opts.http_opts.addr.parse().context(ParseAddrSnafu { + addr: &opts.http_opts.addr, + })?; + let grpc = self.grpc_server.start(grpc_addr); + let http = self.http_server.start(http_addr); + select!( + v = grpc => v.context(StartServerSnafu)?, + v = http => v.context(StartServerSnafu)?, + ); Ok(()) } @@ -69,6 +81,11 @@ impl Services { self.grpc_server .shutdown() .await - .context(ShutdownServerSnafu) + .context(ShutdownServerSnafu)?; + self.http_server + .shutdown() + .await + .context(ShutdownServerSnafu)?; + Ok(()) } } diff --git a/src/frontend/src/instance.rs b/src/frontend/src/instance.rs index a37a5b9dcb40..5dcfd0a5c72c 100644 --- a/src/frontend/src/instance.rs +++ b/src/frontend/src/instance.rs @@ -40,7 +40,7 @@ use common_telemetry::timer; use datafusion::sql::sqlparser::ast::ObjectName; use datanode::instance::sql::table_idents_to_full_name; use datanode::instance::InstanceRef as DnInstanceRef; -use datanode::metric; +use datanode::metrics; use datatypes::schema::Schema; use distributed::DistInstance; use meta_client::client::{MetaClient, MetaClientBuilder}; @@ -532,7 +532,7 @@ impl SqlQueryHandler for Instance { type Error = Error; async fn do_query(&self, query: &str, query_ctx: QueryContextRef) -> Vec<Result<Output>> { - let _timer = timer!(metric::METRIC_HANDLE_SQL_ELAPSED); + let _timer = timer!(metrics::METRIC_HANDLE_SQL_ELAPSED); let query_interceptor = self.plugins.get::<SqlQueryInterceptorRef<Error>>(); let query = match query_interceptor.pre_parsing(query, query_ctx.clone()) { diff --git a/src/frontend/src/server.rs b/src/frontend/src/server.rs index d4ab41dee88c..d7d7af1d1d2f 100644 --- a/src/frontend/src/server.rs +++ b/src/frontend/src/server.rs @@ -22,7 +22,7 @@ use common_telemetry::info; use servers::auth::UserProviderRef; use servers::error::Error::InternalIo; use servers::grpc::GrpcServer; -use servers::http::HttpServer; +use servers::http::HttpServerBuilder; use servers::mysql::server::{MysqlServer, MysqlSpawnConfig, MysqlSpawnRef}; use servers::opentsdb::OpentsdbServer; use servers::postgres::PostgresServer; @@ -150,33 +150,33 @@ impl Services { if let Some(http_options) = &opts.http_options { let http_addr = parse_addr(&http_options.addr)?; - let mut http_server = HttpServer::new( - ServerSqlQueryHandlerAdaptor::arc(instance.clone()), - ServerGrpcQueryHandlerAdaptor::arc(instance.clone()), - http_options.clone(), - ); + let mut http_server_builder = HttpServerBuilder::new(http_options.clone()); + http_server_builder + .with_sql_handler(ServerSqlQueryHandlerAdaptor::arc(instance.clone())) + .with_grpc_handler(ServerGrpcQueryHandlerAdaptor::arc(instance.clone())); + if let Some(user_provider) = user_provider.clone() { - http_server.set_user_provider(user_provider); + http_server_builder.with_user_provider(user_provider); } if set_opentsdb_handler { - http_server.set_opentsdb_handler(instance.clone()); + http_server_builder.with_opentsdb_handler(instance.clone()); } if matches!( opts.influxdb_options, Some(InfluxdbOptions { enable: true }) ) { - http_server.set_influxdb_handler(instance.clone()); + http_server_builder.with_influxdb_handler(instance.clone()); } if matches!( opts.prometheus_options, Some(PrometheusOptions { enable: true }) ) { - http_server.set_prom_handler(instance.clone()); + http_server_builder.with_prom_handler(instance.clone()); } - http_server.set_script_handler(instance.clone()); - + http_server_builder.with_script_handler(instance.clone()); + let http_server = http_server_builder.build(); result.push((Box::new(http_server), http_addr)); } diff --git a/src/meta-srv/Cargo.toml b/src/meta-srv/Cargo.toml index db451b8fae35..a9bcbb7d84f8 100644 --- a/src/meta-srv/Cargo.toml +++ b/src/meta-srv/Cargo.toml @@ -41,6 +41,7 @@ tokio-stream = { version = "0.1", features = ["net"] } tonic.workspace = true tower = "0.4" url = "2.3" +servers = { path = "../servers" } [dev-dependencies] tracing = "0.1" diff --git a/src/meta-srv/src/bootstrap.rs b/src/meta-srv/src/bootstrap.rs index 77b89961f032..06d1167fec4d 100644 --- a/src/meta-srv/src/bootstrap.rs +++ b/src/meta-srv/src/bootstrap.rs @@ -20,8 +20,12 @@ use api::v1::meta::lock_server::LockServer; use api::v1::meta::router_server::RouterServer; use api::v1::meta::store_server::StoreServer; use etcd_client::Client; +use servers::http::{HttpServer, HttpServerBuilder}; +use servers::metrics_handler::MetricsHandler; +use servers::server::Server; use snafu::ResultExt; use tokio::net::TcpListener; +use tokio::select; use tokio::sync::mpsc::{self, Receiver, Sender}; use tokio_stream::wrappers::TcpListenerStream; use tonic::transport::server::Router; @@ -44,6 +48,8 @@ use crate::{error, Result}; pub struct MetaSrvInstance { meta_srv: MetaSrv, + http_srv: Arc<HttpServer>, + opts: MetaSrvOptions, signal_sender: Option<Sender<()>>, @@ -52,9 +58,14 @@ pub struct MetaSrvInstance { impl MetaSrvInstance { pub async fn new(opts: MetaSrvOptions) -> Result<MetaSrvInstance> { let meta_srv = build_meta_srv(&opts).await?; - + let http_srv = Arc::new( + HttpServerBuilder::new(opts.http_opts.clone()) + .with_metrics_handler(MetricsHandler) + .build(), + ); Ok(MetaSrvInstance { meta_srv, + http_srv, opts, signal_sender: None, }) @@ -67,12 +78,24 @@ impl MetaSrvInstance { self.signal_sender = Some(tx); - bootstrap_meta_srv_with_router( + let meta_srv = bootstrap_meta_srv_with_router( &self.opts.bind_addr, router(self.meta_srv.clone()), &mut rx, - ) - .await?; + ); + let addr = self + .opts + .http_opts + .addr + .parse() + .context(error::ParseAddrSnafu { + addr: &self.opts.http_opts.addr, + })?; + let http_srv = self.http_srv.start(addr); + select! { + v = meta_srv => v?, + v = http_srv => v.map(|_| ()).context(error::StartMetricsExportSnafu)?, + } Ok(()) } @@ -86,7 +109,12 @@ impl MetaSrvInstance { } self.meta_srv.shutdown(); - + self.http_srv + .shutdown() + .await + .context(error::ShutdownServerSnafu { + server: self.http_srv.name(), + })?; Ok(()) } } diff --git a/src/meta-srv/src/error.rs b/src/meta-srv/src/error.rs index d311e5bb8fc5..4ab353842d06 100644 --- a/src/meta-srv/src/error.rs +++ b/src/meta-srv/src/error.rs @@ -25,6 +25,13 @@ pub enum Error { #[snafu(display("Failed to send shutdown signal"))] SendShutdownSignal { source: SendError<()> }, + #[snafu(display("Failed to shutdown {} server, source: {}", server, source))] + ShutdownServer { + #[snafu(backtrace)] + source: servers::error::Error, + server: String, + }, + #[snafu(display("Error stream request next is None"))] StreamNone { backtrace: Backtrace }, @@ -55,7 +62,16 @@ pub enum Error { source: tonic::transport::Error, backtrace: Backtrace, }, - + #[snafu(display("Failed to start gRPC server, source: {}", source))] + StartMetricsExport { + #[snafu(backtrace)] + source: servers::error::Error, + }, + #[snafu(display("Failed to parse address {}, source: {}", addr, source))] + ParseAddr { + addr: String, + source: std::net::AddrParseError, + }, #[snafu(display("Empty table name"))] EmptyTableName { backtrace: Backtrace }, @@ -323,6 +339,7 @@ impl ErrorExt for Error { | Error::LockNotConfig { .. } | Error::ExceededRetryLimit { .. } | Error::SendShutdownSignal { .. } + | Error::ParseAddr { .. } | Error::StartGrpc { .. } => StatusCode::Internal, Error::EmptyKey { .. } | Error::MissingRequiredParameter { .. } @@ -348,6 +365,9 @@ impl ErrorExt for Error { Error::InvalidCatalogValue { source, .. } => source.status_code(), Error::MetaInternal { source } => source.status_code(), Error::RecoverProcedure { source } => source.status_code(), + Error::ShutdownServer { source, .. } | Error::StartMetricsExport { source } => { + source.status_code() + } } } } diff --git a/src/meta-srv/src/metasrv.rs b/src/meta-srv/src/metasrv.rs index 951dd393d228..d6a095042d4b 100644 --- a/src/meta-srv/src/metasrv.rs +++ b/src/meta-srv/src/metasrv.rs @@ -21,6 +21,7 @@ use api::v1::meta::Peer; use common_procedure::ProcedureManagerRef; use common_telemetry::{error, info, warn}; use serde::{Deserialize, Serialize}; +use servers::http::HttpOptions; use snafu::ResultExt; use tokio::sync::broadcast::error::RecvError; @@ -44,6 +45,7 @@ pub struct MetaSrvOptions { pub datanode_lease_secs: i64, pub selector: SelectorType, pub use_memory_store: bool, + pub http_opts: HttpOptions, } impl Default for MetaSrvOptions { @@ -55,6 +57,7 @@ impl Default for MetaSrvOptions { datanode_lease_secs: 15, selector: SelectorType::default(), use_memory_store: false, + http_opts: HttpOptions::default(), } } } diff --git a/src/query/src/datafusion.rs b/src/query/src/datafusion.rs index 30cde4e20fa0..8e147a26b74d 100644 --- a/src/query/src/datafusion.rs +++ b/src/query/src/datafusion.rs @@ -58,7 +58,7 @@ use crate::physical_planner::PhysicalPlanner; use crate::plan::LogicalPlan; use crate::planner::{DfLogicalPlanner, LogicalPlanner}; use crate::query_engine::{QueryEngineContext, QueryEngineState}; -use crate::{metric, QueryEngine}; +use crate::{metrics, QueryEngine}; pub struct DatafusionQueryEngine { state: Arc<QueryEngineState>, @@ -254,7 +254,7 @@ impl QueryEngine for DatafusionQueryEngine { impl LogicalOptimizer for DatafusionQueryEngine { fn optimize(&self, plan: &LogicalPlan) -> Result<LogicalPlan> { - let _timer = timer!(metric::METRIC_OPTIMIZE_LOGICAL_ELAPSED); + let _timer = timer!(metrics::METRIC_OPTIMIZE_LOGICAL_ELAPSED); match plan { LogicalPlan::DfPlan(df_plan) => { let optimized_plan = self @@ -280,7 +280,7 @@ impl PhysicalPlanner for DatafusionQueryEngine { ctx: &mut QueryEngineContext, logical_plan: &LogicalPlan, ) -> Result<Arc<dyn PhysicalPlan>> { - let _timer = timer!(metric::METRIC_CREATE_PHYSICAL_ELAPSED); + let _timer = timer!(metrics::METRIC_CREATE_PHYSICAL_ELAPSED); match logical_plan { LogicalPlan::DfPlan(df_plan) => { let state = ctx.state(); @@ -315,7 +315,7 @@ impl PhysicalOptimizer for DatafusionQueryEngine { ctx: &mut QueryEngineContext, plan: Arc<dyn PhysicalPlan>, ) -> Result<Arc<dyn PhysicalPlan>> { - let _timer = timer!(metric::METRIC_OPTIMIZE_PHYSICAL_ELAPSED); + let _timer = timer!(metrics::METRIC_OPTIMIZE_PHYSICAL_ELAPSED); let mut new_plan = plan .as_any() @@ -342,7 +342,7 @@ impl QueryExecutor for DatafusionQueryEngine { ctx: &QueryEngineContext, plan: &Arc<dyn PhysicalPlan>, ) -> Result<SendableRecordBatchStream> { - let _timer = timer!(metric::METRIC_EXEC_PLAN_ELAPSED); + let _timer = timer!(metrics::METRIC_EXEC_PLAN_ELAPSED); match plan.output_partitioning().partition_count() { 0 => Ok(Box::pin(EmptyRecordBatchStream::new(plan.schema()))), 1 => Ok(plan diff --git a/src/query/src/lib.rs b/src/query/src/lib.rs index 6d7775614ffe..878b54dec35a 100644 --- a/src/query/src/lib.rs +++ b/src/query/src/lib.rs @@ -16,7 +16,7 @@ pub mod datafusion; pub mod error; pub mod executor; pub mod logical_optimizer; -mod metric; +mod metrics; mod optimizer; pub mod parser; pub mod physical_optimizer; diff --git a/src/query/src/metric.rs b/src/query/src/metrics.rs similarity index 97% rename from src/query/src/metric.rs rename to src/query/src/metrics.rs index ae306ff768c4..489e7fb62a56 100644 --- a/src/query/src/metric.rs +++ b/src/query/src/metrics.rs @@ -12,8 +12,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! query engine metrics - pub static METRIC_PARSE_SQL_ELAPSED: &str = "query.parse_sql_elapsed"; pub static METRIC_PARSE_PROMQL_ELAPSED: &str = "query.parse_promql_elapsed"; pub static METRIC_OPTIMIZE_LOGICAL_ELAPSED: &str = "query.optimize_logicalplan_elapsed"; diff --git a/src/query/src/parser.rs b/src/query/src/parser.rs index e7559f03e7b7..799dd6997b64 100644 --- a/src/query/src/parser.rs +++ b/src/query/src/parser.rs @@ -28,7 +28,7 @@ use sql::statements::statement::Statement; use crate::error::{ MultipleStatementsSnafu, ParseFloatSnafu, ParseTimestampSnafu, QueryParseSnafu, Result, }; -use crate::metric::{METRIC_PARSE_PROMQL_ELAPSED, METRIC_PARSE_SQL_ELAPSED}; +use crate::metrics::{METRIC_PARSE_PROMQL_ELAPSED, METRIC_PARSE_SQL_ELAPSED}; const DEFAULT_LOOKBACK: u64 = 5 * 60; // 5m diff --git a/src/servers/src/http.rs b/src/servers/src/http.rs index 8d5b05326233..aae858b59ad5 100644 --- a/src/servers/src/http.rs +++ b/src/servers/src/http.rs @@ -60,6 +60,7 @@ use self::influxdb::{influxdb_health, influxdb_ping, influxdb_write}; use crate::auth::UserProviderRef; use crate::error::{AlreadyStartedSnafu, Result, StartHttpSnafu}; use crate::http::admin::flush; +use crate::metrics_handler::MetricsHandler; use crate::query_handler::grpc::ServerGrpcQueryHandlerRef; use crate::query_handler::sql::ServerSqlQueryHandlerRef; use crate::query_handler::{ @@ -99,9 +100,10 @@ pub const HTTP_API_PREFIX: &str = "/v1/"; // TODO(fys): This is a temporary workaround, it will be improved later pub static PUBLIC_APIS: [&str; 2] = ["/v1/influxdb/ping", "/v1/influxdb/health"]; +#[derive(Default)] pub struct HttpServer { - sql_handler: ServerSqlQueryHandlerRef, - grpc_handler: ServerGrpcQueryHandlerRef, + sql_handler: Option<ServerSqlQueryHandlerRef>, + grpc_handler: Option<ServerGrpcQueryHandlerRef>, options: HttpOptions, influxdb_handler: Option<InfluxdbLineProtocolHandlerRef>, opentsdb_handler: Option<OpentsdbProtocolHandlerRef>, @@ -109,9 +111,11 @@ pub struct HttpServer { script_handler: Option<ScriptHandlerRef>, shutdown_tx: Mutex<Option<Sender<()>>>, user_provider: Option<UserProviderRef>, + metrics_handler: Option<MetricsHandler>, } -#[derive(Clone, Debug, Serialize, Deserialize)] +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +#[serde(default)] pub struct HttpOptions { pub addr: String, #[serde(with = "humantime_serde")] @@ -354,65 +358,74 @@ pub struct ApiState { pub script_handler: Option<ScriptHandlerRef>, } -impl HttpServer { - pub fn new( - sql_handler: ServerSqlQueryHandlerRef, - grpc_handler: ServerGrpcQueryHandlerRef, - options: HttpOptions, - ) -> Self { +#[derive(Default)] +pub struct HttpServerBuilder { + inner: HttpServer, +} + +impl HttpServerBuilder { + pub fn new(options: HttpOptions) -> Self { Self { - sql_handler, - grpc_handler, - options, - opentsdb_handler: None, - influxdb_handler: None, - prom_handler: None, - user_provider: None, - script_handler: None, - shutdown_tx: Mutex::new(None), + inner: HttpServer { + sql_handler: None, + grpc_handler: None, + options, + opentsdb_handler: None, + influxdb_handler: None, + prom_handler: None, + user_provider: None, + script_handler: None, + metrics_handler: None, + shutdown_tx: Mutex::new(None), + }, } } - pub fn set_opentsdb_handler(&mut self, handler: OpentsdbProtocolHandlerRef) { - debug_assert!( - self.opentsdb_handler.is_none(), - "OpenTSDB handler can be set only once!" - ); - self.opentsdb_handler.get_or_insert(handler); + pub fn with_sql_handler(&mut self, handler: ServerSqlQueryHandlerRef) -> &mut Self { + self.inner.sql_handler.get_or_insert(handler); + self } - pub fn set_script_handler(&mut self, handler: ScriptHandlerRef) { - debug_assert!( - self.script_handler.is_none(), - "Script handler can be set only once!" - ); - self.script_handler.get_or_insert(handler); + pub fn with_grpc_handler(&mut self, handler: ServerGrpcQueryHandlerRef) -> &mut Self { + self.inner.grpc_handler.get_or_insert(handler); + self } - pub fn set_influxdb_handler(&mut self, handler: InfluxdbLineProtocolHandlerRef) { - debug_assert!( - self.influxdb_handler.is_none(), - "Influxdb line protocol handler can be set only once!" - ); - self.influxdb_handler.get_or_insert(handler); + pub fn with_opentsdb_handler(&mut self, handler: OpentsdbProtocolHandlerRef) -> &mut Self { + self.inner.opentsdb_handler.get_or_insert(handler); + self } - pub fn set_prom_handler(&mut self, handler: PrometheusProtocolHandlerRef) { - debug_assert!( - self.prom_handler.is_none(), - "Prometheus protocol handler can be set only once!" - ); - self.prom_handler.get_or_insert(handler); + pub fn with_script_handler(&mut self, handler: ScriptHandlerRef) -> &mut Self { + self.inner.script_handler.get_or_insert(handler); + self } - pub fn set_user_provider(&mut self, user_provider: UserProviderRef) { - debug_assert!( - self.user_provider.is_none(), - "User provider can be set only once!" - ); - self.user_provider.get_or_insert(user_provider); + pub fn with_influxdb_handler(&mut self, handler: InfluxdbLineProtocolHandlerRef) -> &mut Self { + self.inner.influxdb_handler.get_or_insert(handler); + self } + pub fn with_prom_handler(&mut self, handler: PrometheusProtocolHandlerRef) -> &mut Self { + self.inner.prom_handler.get_or_insert(handler); + self + } + + pub fn with_user_provider(&mut self, user_provider: UserProviderRef) -> &mut Self { + self.inner.user_provider.get_or_insert(user_provider); + self + } + + pub fn with_metrics_handler(&mut self, handler: MetricsHandler) -> &mut Self { + self.inner.metrics_handler.get_or_insert(handler); + self + } + pub fn build(&mut self) -> HttpServer { + std::mem::take(self).inner + } +} + +impl HttpServer { pub fn make_app(&self) -> Router { let mut api = OpenApi { info: Info { @@ -428,19 +441,25 @@ impl HttpServer { ..OpenApi::default() }; - let sql_router = self - .route_sql(ApiState { - sql_handler: self.sql_handler.clone(), - script_handler: self.script_handler.clone(), - }) - .finish_api(&mut api) - .layer(Extension(api)); + let mut router = Router::new(); + + if let Some(sql_handler) = self.sql_handler.clone() { + let sql_router = self + .route_sql(ApiState { + sql_handler, + script_handler: self.script_handler.clone(), + }) + .finish_api(&mut api) + .layer(Extension(api)); + router = router.nest(&format!("/{HTTP_API_VERSION}"), sql_router); + } - let mut router = Router::new().nest(&format!("/{HTTP_API_VERSION}"), sql_router); - router = router.nest( - &format!("/{HTTP_API_VERSION}/admin"), - self.route_admin(self.grpc_handler.clone()), - ); + if let Some(grpc_handler) = self.grpc_handler.clone() { + router = router.nest( + &format!("/{HTTP_API_VERSION}/admin"), + self.route_admin(grpc_handler.clone()), + ); + } if let Some(opentsdb_handler) = self.opentsdb_handler.clone() { router = router.nest( @@ -472,7 +491,9 @@ impl HttpServer { ); } - router = router.route("/metrics", routing::get(handler::metrics)); + if let Some(metrics_handler) = self.metrics_handler { + router = router.nest("", self.route_metrics(metrics_handler)); + } router = router.route( "/health", @@ -498,6 +519,12 @@ impl HttpServer { ) } + fn route_metrics<S>(&self, metrics_handler: MetricsHandler) -> Router<S> { + Router::new() + .route("/metrics", routing::get(handler::metrics)) + .with_state(metrics_handler) + } + fn route_sql<S>(&self, api_state: ApiState) -> ApiRouter<S> { ApiRouter::new() .api_route( @@ -680,8 +707,10 @@ mod test { let instance = Arc::new(DummyInstance { _tx: tx }); let sql_instance = ServerSqlQueryHandlerAdaptor::arc(instance.clone()); let grpc_instance = ServerGrpcQueryHandlerAdaptor::arc(instance); - - let server = HttpServer::new(sql_instance, grpc_instance, HttpOptions::default()); + let server = HttpServerBuilder::new(HttpOptions::default()) + .with_sql_handler(sql_instance) + .with_grpc_handler(grpc_instance) + .build(); server.make_app().route( "/test/timeout", get(forever.layer( diff --git a/src/servers/src/http/handler.rs b/src/servers/src/http/handler.rs index 8f7ede1c9183..606c06e23b5b 100644 --- a/src/servers/src/http/handler.rs +++ b/src/servers/src/http/handler.rs @@ -19,13 +19,13 @@ use aide::transform::TransformOperation; use axum::extract::{Json, Query, State}; use axum::{Extension, Form}; use common_error::status_code::StatusCode; -use common_telemetry::metric; use query::parser::PromQuery; use schemars::JsonSchema; use serde::{Deserialize, Serialize}; use session::context::UserInfo; use crate::http::{ApiState, JsonResponse}; +use crate::metrics_handler::MetricsHandler; #[derive(Debug, Default, Serialize, Deserialize, JsonSchema)] pub struct SqlQuery { @@ -114,12 +114,11 @@ pub(crate) fn sql_docs(op: TransformOperation) -> TransformOperation { /// Handler to export metrics #[axum_macros::debug_handler] -pub async fn metrics(Query(_params): Query<HashMap<String, String>>) -> String { - if let Some(handle) = metric::try_handle() { - handle.render() - } else { - "Prometheus handle not initialized.".to_owned() - } +pub async fn metrics( + State(state): State<MetricsHandler>, + Query(_params): Query<HashMap<String, String>>, +) -> String { + state.render() } #[derive(Debug, Serialize, Deserialize, JsonSchema)] diff --git a/src/servers/src/lib.rs b/src/servers/src/lib.rs index 8260da6f2749..63411234645b 100644 --- a/src/servers/src/lib.rs +++ b/src/servers/src/lib.rs @@ -25,6 +25,7 @@ pub mod http; pub mod influxdb; pub mod interceptor; pub mod line_writer; +pub mod metrics_handler; pub mod mysql; pub mod opentsdb; pub mod postgres; diff --git a/src/servers/src/metrics_handler.rs b/src/servers/src/metrics_handler.rs new file mode 100644 index 000000000000..89970cf308c7 --- /dev/null +++ b/src/servers/src/metrics_handler.rs @@ -0,0 +1,30 @@ +// Copyright 2023 Greptime Team +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use common_telemetry::metric; + +/// a server that serves metrics +/// only start when datanode starts in distributed mode +#[derive(Copy, Clone)] +pub struct MetricsHandler; + +impl MetricsHandler { + pub fn render(&self) -> String { + if let Some(handle) = metric::try_handle() { + handle.render() + } else { + "Prometheus handle not initialized.".to_owned() + } + } +} diff --git a/src/servers/tests/http/http_handler_test.rs b/src/servers/tests/http/http_handler_test.rs index 6ad18a9ac899..4d3698c150b8 100644 --- a/src/servers/tests/http/http_handler_test.rs +++ b/src/servers/tests/http/http_handler_test.rs @@ -20,6 +20,7 @@ use axum::Form; use common_telemetry::metric; use metrics::counter; use servers::http::{handler as http_handler, script as script_handler, ApiState, JsonOutput}; +use servers::metrics_handler::MetricsHandler; use session::context::UserInfo; use table::test_util::MemTable; @@ -146,8 +147,8 @@ async fn test_metrics() { metric::init_default_metrics_recorder(); counter!("test_metrics", 1); - - let text = http_handler::metrics(Query(HashMap::default())).await; + let stats = MetricsHandler; + let text = http_handler::metrics(axum::extract::State(stats), Query(HashMap::default())).await; assert!(text.contains("test_metrics counter")); } diff --git a/src/servers/tests/http/http_test.rs b/src/servers/tests/http/http_test.rs index c6398022b050..b6b3a9fa13b1 100644 --- a/src/servers/tests/http/http_test.rs +++ b/src/servers/tests/http/http_test.rs @@ -14,17 +14,20 @@ use axum::Router; use axum_test_helper::TestClient; -use servers::http::{HttpOptions, HttpServer}; +use servers::http::{HttpOptions, HttpServerBuilder}; use table::test_util::MemTable; use crate::{create_testing_grpc_query_handler, create_testing_sql_query_handler}; fn make_test_app() -> Router { - let server = HttpServer::new( - create_testing_sql_query_handler(MemTable::default_numbers_table()), - create_testing_grpc_query_handler(MemTable::default_numbers_table()), - HttpOptions::default(), - ); + let server = HttpServerBuilder::new(HttpOptions::default()) + .with_sql_handler(create_testing_sql_query_handler( + MemTable::default_numbers_table(), + )) + .with_grpc_handler(create_testing_grpc_query_handler( + MemTable::default_numbers_table(), + )) + .build(); server.make_app() } diff --git a/src/servers/tests/http/influxdb_test.rs b/src/servers/tests/http/influxdb_test.rs index 086c6403c54b..da5d9d9a1f16 100644 --- a/src/servers/tests/http/influxdb_test.rs +++ b/src/servers/tests/http/influxdb_test.rs @@ -23,7 +23,7 @@ use common_query::Output; use datatypes::schema::Schema; use query::parser::PromQuery; use servers::error::{Error, Result}; -use servers::http::{HttpOptions, HttpServer}; +use servers::http::{HttpOptions, HttpServerBuilder}; use servers::influxdb::InfluxdbRequest; use servers::query_handler::grpc::GrpcQueryHandler; use servers::query_handler::sql::SqlQueryHandler; @@ -94,7 +94,9 @@ impl SqlQueryHandler for DummyInstance { fn make_test_app(tx: Arc<mpsc::Sender<(String, String)>>, db_name: Option<&str>) -> Router { let instance = Arc::new(DummyInstance { tx }); - let mut server = HttpServer::new(instance.clone(), instance.clone(), HttpOptions::default()); + let mut server_builder = HttpServerBuilder::new(HttpOptions::default()); + server_builder.with_sql_handler(instance.clone()); + server_builder.with_grpc_handler(instance.clone()); let mut user_provider = MockUserProvider::default(); if let Some(name) = db_name { user_provider.set_authorization_info(DatabaseAuthInfo { @@ -103,9 +105,10 @@ fn make_test_app(tx: Arc<mpsc::Sender<(String, String)>>, db_name: Option<&str>) username: "greptime", }) } - server.set_user_provider(Arc::new(user_provider)); + server_builder.with_user_provider(Arc::new(user_provider)); - server.set_influxdb_handler(instance); + server_builder.with_influxdb_handler(instance); + let server = server_builder.build(); server.make_app() } diff --git a/src/servers/tests/http/opentsdb_test.rs b/src/servers/tests/http/opentsdb_test.rs index 694751635eb5..8a0ed5998622 100644 --- a/src/servers/tests/http/opentsdb_test.rs +++ b/src/servers/tests/http/opentsdb_test.rs @@ -22,7 +22,7 @@ use common_query::Output; use datatypes::schema::Schema; use query::parser::PromQuery; use servers::error::{self, Result}; -use servers::http::{HttpOptions, HttpServer}; +use servers::http::{HttpOptions, HttpServerBuilder}; use servers::opentsdb::codec::DataPoint; use servers::query_handler::grpc::GrpcQueryHandler; use servers::query_handler::sql::SqlQueryHandler; @@ -92,8 +92,11 @@ impl SqlQueryHandler for DummyInstance { fn make_test_app(tx: mpsc::Sender<String>) -> Router { let instance = Arc::new(DummyInstance { tx }); - let mut server = HttpServer::new(instance.clone(), instance.clone(), HttpOptions::default()); - server.set_opentsdb_handler(instance); + let server = HttpServerBuilder::new(HttpOptions::default()) + .with_grpc_handler(instance.clone()) + .with_sql_handler(instance.clone()) + .with_opentsdb_handler(instance) + .build(); server.make_app() } diff --git a/src/servers/tests/http/prometheus_test.rs b/src/servers/tests/http/prometheus_test.rs index 173382b1e44b..69d4fb8046e9 100644 --- a/src/servers/tests/http/prometheus_test.rs +++ b/src/servers/tests/http/prometheus_test.rs @@ -26,7 +26,7 @@ use datatypes::schema::Schema; use prost::Message; use query::parser::PromQuery; use servers::error::{Error, Result}; -use servers::http::{HttpOptions, HttpServer}; +use servers::http::{HttpOptions, HttpServerBuilder}; use servers::prometheus; use servers::prometheus::{snappy_compress, Metrics}; use servers::query_handler::grpc::GrpcQueryHandler; @@ -117,8 +117,11 @@ impl SqlQueryHandler for DummyInstance { fn make_test_app(tx: mpsc::Sender<(String, Vec<u8>)>) -> Router { let instance = Arc::new(DummyInstance { tx }); - let mut server = HttpServer::new(instance.clone(), instance.clone(), HttpOptions::default()); - server.set_prom_handler(instance); + let server = HttpServerBuilder::new(HttpOptions::default()) + .with_grpc_handler(instance.clone()) + .with_sql_handler(instance.clone()) + .with_prom_handler(instance) + .build(); server.make_app() } diff --git a/tests-integration/src/test_util.rs b/tests-integration/src/test_util.rs index 090311bd563f..69b0999b23c9 100644 --- a/tests-integration/src/test_util.rs +++ b/tests-integration/src/test_util.rs @@ -38,7 +38,8 @@ use object_store::ObjectStore; use once_cell::sync::OnceCell; use rand::Rng; use servers::grpc::GrpcServer; -use servers::http::{HttpOptions, HttpServer}; +use servers::http::{HttpOptions, HttpServerBuilder}; +use servers::metrics_handler::MetricsHandler; use servers::prom::PromServer; use servers::query_handler::grpc::ServerGrpcQueryHandlerAdaptor; use servers::query_handler::sql::ServerSqlQueryHandlerAdaptor; @@ -271,11 +272,13 @@ pub async fn setup_test_http_app(store_type: StorageType, name: &str) -> (Router ) .await .unwrap(); - let http_server = HttpServer::new( - ServerSqlQueryHandlerAdaptor::arc(Arc::new(build_frontend_instance(instance.clone()))), - ServerGrpcQueryHandlerAdaptor::arc(instance.clone()), - HttpOptions::default(), - ); + let http_server = HttpServerBuilder::new(HttpOptions::default()) + .with_sql_handler(ServerSqlQueryHandlerAdaptor::arc(Arc::new( + build_frontend_instance(instance.clone()), + ))) + .with_grpc_handler(ServerGrpcQueryHandlerAdaptor::arc(instance.clone())) + .with_metrics_handler(MetricsHandler) + .build(); (http_server.make_app(), guard) } @@ -295,12 +298,11 @@ pub async fn setup_test_http_app_with_frontend( .await .unwrap(); let frontend_ref = Arc::new(frontend); - let mut http_server = HttpServer::new( - ServerSqlQueryHandlerAdaptor::arc(frontend_ref.clone()), - ServerGrpcQueryHandlerAdaptor::arc(frontend_ref), - HttpOptions::default(), - ); - http_server.set_script_handler(instance.clone()); + let http_server = HttpServerBuilder::new(HttpOptions::default()) + .with_sql_handler(ServerSqlQueryHandlerAdaptor::arc(frontend_ref.clone())) + .with_grpc_handler(ServerGrpcQueryHandlerAdaptor::arc(frontend_ref)) + .with_script_handler(instance.clone()) + .build(); let app = http_server.make_app(); (app, guard) } diff --git a/tests/runner/src/env.rs b/tests/runner/src/env.rs index 3a819b8d3ae7..e3d96c278689 100644 --- a/tests/runner/src/env.rs +++ b/tests/runner/src/env.rs @@ -161,10 +161,16 @@ impl Env { "datanode" | "standalone" => { args.push("-c".to_string()); args.push(Self::generate_config_file(subcommand, db_ctx)); + args.push("--http-addr=0.0.0.0:5001".to_string()); + } + "frontend" => { + args.push("--metasrv-addr=0.0.0.0:3002".to_string()); + args.push("--http-addr=0.0.0.0:5000".to_string()); + } + "metasrv" => { + args.push("--use-memory-store".to_string()); + args.push("--http-addr=0.0.0.0:5002".to_string()); } - "frontend" => args.push("--metasrv-addr=0.0.0.0:3002".to_string()), - "metasrv" => args.push("--use-memory-store".to_string()), - _ => panic!("Unexpected subcommand: {subcommand}"), }
chore
add http metrics server in datanode node when greptime start in distributed mode (#1256)
09fff24ac4149a22f530a0e25df60c1aab0f5f73
2024-07-05 20:16:44
discord9
feat: make flow distributed work&tests (#4256)
false
diff --git a/Cargo.lock b/Cargo.lock index c1613f97f3eb..64c95c3cc051 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1760,6 +1760,7 @@ dependencies = [ "tikv-jemallocator", "tokio", "toml 0.8.14", + "tonic 0.11.0", "tracing-appender", ] @@ -3756,6 +3757,7 @@ dependencies = [ "async-trait", "bytes", "catalog", + "client", "common-base", "common-catalog", "common-config", diff --git a/config/config-docs-template.md b/config/config-docs-template.md index b70c20184d31..81020133f87b 100644 --- a/config/config-docs-template.md +++ b/config/config-docs-template.md @@ -1,10 +1,12 @@ # Configurations -- [Standalone Mode](#standalone-mode) -- [Distributed Mode](#distributed-mode) +- [Configurations](#configurations) + - [Standalone Mode](#standalone-mode) + - [Distributed Mode](#distributed-mode) - [Frontend](#frontend) - [Metasrv](#metasrv) - [Datanode](#datanode) + - [Flownode](#flownode) ## Standalone Mode @@ -23,3 +25,7 @@ ### Datanode {{ toml2docs "./datanode.example.toml" }} + +### Flownode + +{{ toml2docs "./flownode.example.toml"}} \ No newline at end of file diff --git a/config/config.md b/config/config.md index 32f34304c6c0..132fc7aff8dc 100644 --- a/config/config.md +++ b/config/config.md @@ -1,10 +1,12 @@ # Configurations -- [Standalone Mode](#standalone-mode) -- [Distributed Mode](#distributed-mode) +- [Configurations](#configurations) + - [Standalone Mode](#standalone-mode) + - [Distributed Mode](#distributed-mode) - [Frontend](#frontend) - [Metasrv](#metasrv) - [Datanode](#datanode) + - [Flownode](#flownode) ## Standalone Mode @@ -434,3 +436,41 @@ | `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. | | `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. | | `tracing.tokio_console_addr` | String | `None` | The tokio console address. | + + +### Flownode + +| Key | Type | Default | Descriptions | +| --- | -----| ------- | ----------- | +| `mode` | String | `distributed` | The running mode of the flownode. It can be `standalone` or `distributed`. | +| `node_id` | Integer | `None` | The flownode identifier and should be unique in the cluster. | +| `frontend_addr` | String | `http://127.0.0.1:4001` | Frontend grpc address. Used by flownode to write result back to frontend. | +| `grpc` | -- | -- | The gRPC server options. | +| `grpc.addr` | String | `127.0.0.1:6800` | The address to bind the gRPC server. | +| `grpc.hostname` | String | `127.0.0.1` | The hostname advertised to the metasrv,<br/>and used for connections from outside the host | +| `grpc.runtime_size` | Integer | `2` | The number of server worker threads. | +| `grpc.max_recv_message_size` | String | `512MB` | The maximum receive message size for gRPC server. | +| `grpc.max_send_message_size` | String | `512MB` | The maximum send message size for gRPC server. | +| `meta_client` | -- | -- | The metasrv client options. | +| `meta_client.metasrv_addrs` | Array | -- | The addresses of the metasrv. | +| `meta_client.timeout` | String | `3s` | Operation timeout. | +| `meta_client.heartbeat_timeout` | String | `500ms` | Heartbeat timeout. | +| `meta_client.ddl_timeout` | String | `10s` | DDL timeout. | +| `meta_client.connect_timeout` | String | `1s` | Connect server timeout. | +| `meta_client.tcp_nodelay` | Bool | `true` | `TCP_NODELAY` option for accepted connections. | +| `meta_client.metadata_cache_max_capacity` | Integer | `100000` | The configuration about the cache of the metadata. | +| `meta_client.metadata_cache_ttl` | String | `10m` | TTL of the metadata cache. | +| `meta_client.metadata_cache_tti` | String | `5m` | -- | +| `heartbeat` | -- | -- | The heartbeat options. | +| `heartbeat.interval` | String | `3s` | Interval for sending heartbeat messages to the metasrv. | +| `heartbeat.retry_interval` | String | `3s` | Interval for retrying to send heartbeat messages to the metasrv. | +| `logging` | -- | -- | The logging options. | +| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. | +| `logging.level` | String | `None` | The log level. Can be `info`/`debug`/`warn`/`error`. | +| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. | +| `logging.otlp_endpoint` | String | `None` | The OTLP tracing endpoint. | +| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. | +| `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 | +| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- | +| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. | +| `tracing.tokio_console_addr` | String | `None` | The tokio console address. | diff --git a/config/flownode.example.toml b/config/flownode.example.toml new file mode 100644 index 000000000000..69bcd94a2296 --- /dev/null +++ b/config/flownode.example.toml @@ -0,0 +1,93 @@ +## The running mode of the flownode. It can be `standalone` or `distributed`. +mode = "distributed" + +## The flownode identifier and should be unique in the cluster. +## +toml2docs:none-default +node_id = 14 + +## Frontend grpc address. Used by flownode to write result back to frontend. +frontend_addr = "http://127.0.0.1:4001" + +## The gRPC server options. +[grpc] +## The address to bind the gRPC server. +addr = "127.0.0.1:6800" +## The hostname advertised to the metasrv, +## and used for connections from outside the host +hostname = "127.0.0.1" +## The number of server worker threads. +runtime_size = 2 +## The maximum receive message size for gRPC server. +max_recv_message_size = "512MB" +## The maximum send message size for gRPC server. +max_send_message_size = "512MB" + + +## The metasrv client options. +[meta_client] +## The addresses of the metasrv. +metasrv_addrs = ["127.0.0.1:3002"] + +## Operation timeout. +timeout = "3s" + +## Heartbeat timeout. +heartbeat_timeout = "500ms" + +## DDL timeout. +ddl_timeout = "10s" + +## Connect server timeout. +connect_timeout = "1s" + +## `TCP_NODELAY` option for accepted connections. +tcp_nodelay = true + +## The configuration about the cache of the metadata. +metadata_cache_max_capacity = 100000 + +## TTL of the metadata cache. +metadata_cache_ttl = "10m" + +# TTI of the metadata cache. +metadata_cache_tti = "5m" + +## The heartbeat options. +[heartbeat] +## Interval for sending heartbeat messages to the metasrv. +interval = "3s" + +## Interval for retrying to send heartbeat messages to the metasrv. +retry_interval = "3s" + +## The logging options. +[logging] +## The directory to store the log files. +dir = "/tmp/greptimedb/logs" + +## The log level. Can be `info`/`debug`/`warn`/`error`. +## +toml2docs:none-default +level = "info" + +## Enable OTLP tracing. +enable_otlp_tracing = false + +## The OTLP tracing endpoint. +## +toml2docs:none-default +otlp_endpoint = "" + +## Whether to append logs to stdout. +append_stdout = true + +## The percentage of tracing will be sampled and exported. +## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1. +## ratio > 1 are treated as 1. Fractions < 0 are treated as 0 +[logging.tracing_sample_ratio] +default_ratio = 1.0 + +## The tracing options. Only effect when compiled with `tokio-console` feature. +[tracing] +## The tokio console address. +## +toml2docs:none-default +tokio_console_addr = "127.0.0.1" + diff --git a/src/cmd/Cargo.toml b/src/cmd/Cargo.toml index 9bc3d77564ad..6a0ef4a2f23c 100644 --- a/src/cmd/Cargo.toml +++ b/src/cmd/Cargo.toml @@ -74,6 +74,7 @@ substrait.workspace = true table.workspace = true tokio.workspace = true toml.workspace = true +tonic.workspace = true tracing-appender = "0.2" [target.'cfg(not(windows))'.dependencies] diff --git a/src/cmd/src/error.rs b/src/cmd/src/error.rs index 1bc3d0b8eb81..2e4e252cc4d8 100644 --- a/src/cmd/src/error.rs +++ b/src/cmd/src/error.rs @@ -346,6 +346,15 @@ pub enum Error { location: Location, source: meta_client::error::Error, }, + + #[snafu(display("Tonic transport error: {error:?} with msg: {msg:?}"))] + TonicTransport { + #[snafu(implicit)] + location: Location, + #[snafu(source)] + error: tonic::transport::Error, + msg: Option<String>, + }, } pub type Result<T> = std::result::Result<T, Error>; @@ -405,6 +414,7 @@ impl ErrorExt for Error { source.status_code() } Error::MetaClientInit { source, .. } => source.status_code(), + Error::TonicTransport { .. } => StatusCode::Internal, } } diff --git a/src/cmd/src/flownode.rs b/src/cmd/src/flownode.rs index c0de61564a12..8ad90b2f7743 100644 --- a/src/cmd/src/flownode.rs +++ b/src/cmd/src/flownode.rs @@ -31,11 +31,12 @@ use frontend::heartbeat::handler::invalidate_table_cache::InvalidateTableCacheHa use meta_client::{MetaClientOptions, MetaClientType}; use servers::Mode; use snafu::{OptionExt, ResultExt}; +use tonic::transport::Endpoint; use tracing_appender::non_blocking::WorkerGuard; use crate::error::{ BuildCacheRegistrySnafu, InitMetadataSnafu, LoadLayeredConfigSnafu, MetaClientInitSnafu, - MissingConfigSnafu, Result, ShutdownFlownodeSnafu, StartFlownodeSnafu, + MissingConfigSnafu, Result, ShutdownFlownodeSnafu, StartFlownodeSnafu, TonicTransportSnafu, }; use crate::options::{GlobalOptions, GreptimeOptions}; use crate::{log_versions, App}; @@ -119,16 +120,26 @@ impl SubCommand { #[derive(Debug, Parser, Default)] struct StartCommand { + /// Flownode's id #[clap(long)] node_id: Option<u64>, + /// Bind address for the gRPC server. #[clap(long)] rpc_addr: Option<String>, + /// Hostname for the gRPC server. #[clap(long)] rpc_hostname: Option<String>, + /// Metasrv address list; #[clap(long, value_delimiter = ',', num_args = 1..)] metasrv_addrs: Option<Vec<String>>, + /// The gprc address of the frontend server used for writing results back to the database. + /// Need prefix i.e. "http://" + #[clap(long)] + frontend_addr: Option<String>, + /// The configuration file for flownode #[clap(short, long)] config_file: Option<String>, + /// The prefix of environment variables, default is `GREPTIMEDB_FLOWNODE`; #[clap(long, default_value = "GREPTIMEDB_FLOWNODE")] env_prefix: String, } @@ -175,6 +186,10 @@ impl StartCommand { opts.grpc.hostname.clone_from(hostname); } + if let Some(fe_addr) = &self.frontend_addr { + opts.frontend_addr = Some(fe_addr.clone()); + } + if let Some(node_id) = self.node_id { opts.node_id = Some(node_id); } @@ -213,10 +228,13 @@ impl StartCommand { let opts = opts.component; - let cluster_id = opts.cluster_id.context(MissingConfigSnafu { - msg: "'cluster_id'", + let frontend_addr = opts.frontend_addr.clone().context(MissingConfigSnafu { + msg: "'frontend_addr'", })?; + // TODO(discord9): make it not optionale after cluster id is required + let cluster_id = opts.cluster_id.unwrap_or(0); + let member_id = opts .node_id .context(MissingConfigSnafu { msg: "'node_id'" })?; @@ -298,6 +316,22 @@ impl StartCommand { let flownode = flownode_builder.build().await.context(StartFlownodeSnafu)?; + // set up the lazy connection to the frontend server + // TODO(discord9): consider move this to start() or pre_start()? + let endpoint = + Endpoint::from_shared(frontend_addr.clone()).context(TonicTransportSnafu { + msg: Some(format!("Fail to create from addr={}", frontend_addr)), + })?; + let chnl = endpoint.connect().await.context(TonicTransportSnafu { + msg: Some("Fail to connect to frontend".to_string()), + })?; + info!("Connected to frontend server: {:?}", frontend_addr); + let client = flow::FrontendClient::new(chnl); + flownode + .flow_worker_manager() + .set_frontend_invoker(Box::new(client)) + .await; + Ok(Instance::new(flownode, guard)) } } diff --git a/src/flow/Cargo.toml b/src/flow/Cargo.toml index 395f9cf07510..b7aefb62040d 100644 --- a/src/flow/Cargo.toml +++ b/src/flow/Cargo.toml @@ -14,6 +14,7 @@ async-recursion = "1.0" async-trait.workspace = true bytes.workspace = true catalog.workspace = true +client.workspace = true common-base.workspace = true common-config.workspace = true common-decimal.workspace = true diff --git a/src/flow/src/adapter.rs b/src/flow/src/adapter.rs index c9d1ca570a76..00b071f474aa 100644 --- a/src/flow/src/adapter.rs +++ b/src/flow/src/adapter.rs @@ -84,6 +84,7 @@ pub struct FlownodeOptions { pub cluster_id: Option<u64>, pub node_id: Option<u64>, pub grpc: GrpcOptions, + pub frontend_addr: Option<String>, pub meta_client: Option<MetaClientOptions>, pub logging: LoggingOptions, pub tracing: TracingOptions, @@ -97,6 +98,7 @@ impl Default for FlownodeOptions { cluster_id: None, node_id: None, grpc: GrpcOptions::default().with_addr("127.0.0.1:3004"), + frontend_addr: None, meta_client: None, logging: LoggingOptions::default(), tracing: TracingOptions::default(), @@ -133,10 +135,7 @@ pub struct FlowWorkerManager { /// Building FlownodeManager impl FlowWorkerManager { /// set frontend invoker - pub async fn set_frontend_invoker( - self: &Arc<Self>, - frontend: Box<dyn FrontendInvoker + Send + Sync>, - ) { + pub async fn set_frontend_invoker(&self, frontend: Box<dyn FrontendInvoker + Send + Sync>) { *self.frontend_invoker.write().await = Some(frontend); } diff --git a/src/flow/src/fe_client.rs b/src/flow/src/fe_client.rs new file mode 100644 index 000000000000..2eac6853e815 --- /dev/null +++ b/src/flow/src/fe_client.rs @@ -0,0 +1,109 @@ +// Copyright 2023 Greptime Team +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Frontend Client for flownode, used for writing result back to database + +use api::v1::greptime_database_client::GreptimeDatabaseClient; +use api::v1::greptime_request::Request; +use api::v1::{ + GreptimeRequest, GreptimeResponse, RequestHeader, RowDeleteRequests, RowInsertRequests, +}; +use common_error::ext::BoxedError; +use common_frontend::handler::FrontendInvoker; +use common_query::Output; +use common_telemetry::tracing_context::{TracingContext, W3cTrace}; +use session::context::{QueryContext, QueryContextRef}; +use snafu::IntoError; +use tokio::sync::Mutex; + +use crate::{Error, Result}; + +/// Frontend client for writing result back to database +pub struct FrontendClient { + client: GreptimeDatabaseClient<tonic::transport::Channel>, +} + +impl FrontendClient { + pub fn new(channel: tonic::transport::Channel) -> Self { + Self { + client: GreptimeDatabaseClient::new(channel), + } + } +} + +fn to_rpc_request(request: Request, ctx: &QueryContext) -> GreptimeRequest { + let header = RequestHeader { + catalog: ctx.current_catalog().to_string(), + schema: ctx.current_schema().to_string(), + authorization: None, + // dbname is empty so that header use catalog+schema to determine the database + // see `create_query_context` in `greptime_handler.rs` + dbname: "".to_string(), + timezone: ctx.timezone().to_string(), + tracing_context: TracingContext::from_current_span().to_w3c(), + }; + GreptimeRequest { + header: Some(header), + request: Some(request), + } +} + +fn from_rpc_error(e: tonic::Status) -> common_frontend::error::Error { + common_frontend::error::ExternalSnafu {} + .into_error(BoxedError::new(client::error::Error::from(e))) +} + +fn resp_to_output(resp: GreptimeResponse) -> Output { + let affect_rows = resp + .response + .map(|r| match r { + api::v1::greptime_response::Response::AffectedRows(r) => r.value, + }) + .unwrap_or(0); + + Output::new_with_affected_rows(affect_rows as usize) +} + +#[async_trait::async_trait] +impl FrontendInvoker for FrontendClient { + async fn row_inserts( + &self, + requests: RowInsertRequests, + ctx: QueryContextRef, + ) -> common_frontend::error::Result<Output> { + let req = to_rpc_request(Request::RowInserts(requests), &ctx); + let resp = self + .client + .clone() + .handle(req) + .await + .map_err(from_rpc_error)?; + Ok(resp_to_output(resp.into_inner())) + } + + async fn row_deletes( + &self, + requests: RowDeleteRequests, + ctx: QueryContextRef, + ) -> common_frontend::error::Result<Output> { + let req = to_rpc_request(Request::RowDeletes(requests), &ctx); + let resp = self + .client + .clone() + .handle(req) + .await + .map_err(from_rpc_error)?; + Ok(resp_to_output(resp.into_inner())) + } +} diff --git a/src/flow/src/lib.rs b/src/flow/src/lib.rs index 636a722b04c2..23a401d3931b 100644 --- a/src/flow/src/lib.rs +++ b/src/flow/src/lib.rs @@ -27,6 +27,7 @@ mod adapter; mod compute; mod error; mod expr; +mod fe_client; pub mod heartbeat; mod plan; mod repr; @@ -36,4 +37,5 @@ mod utils; pub use adapter::{FlowWorkerManager, FlowWorkerManagerRef, FlownodeOptions}; pub use error::{Error, Result}; +pub use fe_client::FrontendClient; pub use server::{FlownodeBuilder, FlownodeInstance, FlownodeServer}; diff --git a/src/flow/src/server.rs b/src/flow/src/server.rs index 166f6b5f5f63..fb0f679c1f56 100644 --- a/src/flow/src/server.rs +++ b/src/flow/src/server.rs @@ -37,6 +37,7 @@ use servers::server::Server; use snafu::{ensure, ResultExt}; use tokio::net::TcpListener; use tokio::sync::{oneshot, Mutex}; +use tonic::codec::CompressionEncoding; use tonic::transport::server::TcpIncoming; use tonic::{Request, Response, Status}; @@ -120,6 +121,10 @@ impl FlownodeServer { impl FlownodeServer { pub fn create_flow_service(&self) -> flow_server::FlowServer<impl flow_server::Flow> { flow_server::FlowServer::new(self.flow_service.clone()) + .accept_compressed(CompressionEncoding::Gzip) + .send_compressed(CompressionEncoding::Gzip) + .accept_compressed(CompressionEncoding::Zstd) + .send_compressed(CompressionEncoding::Zstd) } } diff --git a/src/meta-srv/src/error.rs b/src/meta-srv/src/error.rs index 5c38156a71f0..15b4b24fc074 100644 --- a/src/meta-srv/src/error.rs +++ b/src/meta-srv/src/error.rs @@ -26,6 +26,7 @@ use table::metadata::TableId; use tokio::sync::mpsc::error::SendError; use tonic::codegen::http; +use crate::metasrv::SelectTarget; use crate::pubsub::Message; #[derive(Snafu)] @@ -175,15 +176,17 @@ pub enum Error { }, #[snafu(display( - "Failed to request Datanode, required: {}, but only {} available", + "Failed to request {}, required: {}, but only {} available", + select_target, required, available ))] - NoEnoughAvailableDatanode { + NoEnoughAvailableNode { #[snafu(implicit)] location: Location, required: usize, available: usize, + select_target: SelectTarget, }, #[snafu(display("Failed to request Datanode {}", peer))] @@ -895,7 +898,7 @@ impl ErrorExt for Error { | Error::RetryLaterWithSource { .. } | Error::StartGrpc { .. } | Error::UpdateTableMetadata { .. } - | Error::NoEnoughAvailableDatanode { .. } + | Error::NoEnoughAvailableNode { .. } | Error::PublishMessage { .. } | Error::Join { .. } | Error::WeightArray { .. } diff --git a/src/meta-srv/src/handler/collect_stats_handler.rs b/src/meta-srv/src/handler/collect_stats_handler.rs index 14df81f028be..1389e6896fd2 100644 --- a/src/meta-srv/src/handler/collect_stats_handler.rs +++ b/src/meta-srv/src/handler/collect_stats_handler.rs @@ -28,7 +28,7 @@ use crate::metasrv::Context; const MAX_CACHED_STATS_PER_KEY: usize = 10; -#[derive(Default)] +#[derive(Debug, Default)] struct EpochStats { stats: Vec<Stat>, epoch: Option<u64>, diff --git a/src/meta-srv/src/metasrv.rs b/src/meta-srv/src/metasrv.rs index 9e1727f5cf44..9105ca048959 100644 --- a/src/meta-srv/src/metasrv.rs +++ b/src/meta-srv/src/metasrv.rs @@ -14,6 +14,7 @@ pub mod builder; +use std::fmt::Display; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::Arc; use std::time::Duration; @@ -248,6 +249,15 @@ pub enum SelectTarget { Flownode, } +impl Display for SelectTarget { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + SelectTarget::Datanode => write!(f, "datanode"), + SelectTarget::Flownode => write!(f, "flownode"), + } + } +} + #[derive(Clone)] pub struct SelectorContext { pub server_addr: String, diff --git a/src/meta-srv/src/selector/common.rs b/src/meta-srv/src/selector/common.rs index cccdcd391282..f1d127eea011 100644 --- a/src/meta-srv/src/selector/common.rs +++ b/src/meta-srv/src/selector/common.rs @@ -20,6 +20,7 @@ use snafu::ensure; use super::weighted_choose::{WeightedChoose, WeightedItem}; use crate::error; use crate::error::Result; +use crate::metasrv::SelectTarget; use crate::selector::SelectorOptions; /// According to the `opts`, choose peers from the `weight_array` through `weighted_choose`. @@ -34,9 +35,10 @@ where let min_required_items = opts.min_required_items; ensure!( !weight_array.is_empty(), - error::NoEnoughAvailableDatanodeSnafu { + error::NoEnoughAvailableNodeSnafu { required: min_required_items, available: 0_usize, + select_target: SelectTarget::Datanode } ); @@ -52,9 +54,10 @@ where // or equal to min_required_items, otherwise it may cause an infinite loop. ensure!( weight_array_len >= min_required_items, - error::NoEnoughAvailableDatanodeSnafu { + error::NoEnoughAvailableNodeSnafu { required: min_required_items, available: weight_array_len, + select_target: SelectTarget::Datanode } ); diff --git a/src/meta-srv/src/selector/round_robin.rs b/src/meta-srv/src/selector/round_robin.rs index dba8b556a399..b50823cb02ee 100644 --- a/src/meta-srv/src/selector/round_robin.rs +++ b/src/meta-srv/src/selector/round_robin.rs @@ -17,7 +17,7 @@ use std::sync::atomic::AtomicUsize; use common_meta::peer::Peer; use snafu::ensure; -use crate::error::{NoEnoughAvailableDatanodeSnafu, Result}; +use crate::error::{NoEnoughAvailableNodeSnafu, Result}; use crate::lease; use crate::metasrv::{SelectTarget, SelectorContext}; use crate::selector::{Namespace, Selector, SelectorOptions}; @@ -86,9 +86,10 @@ impl RoundRobinSelector { ensure!( !peers.is_empty(), - NoEnoughAvailableDatanodeSnafu { + NoEnoughAvailableNodeSnafu { required: min_required_items, available: 0usize, + select_target: self.select_target } ); diff --git a/src/meta-srv/src/table_meta_alloc.rs b/src/meta-srv/src/table_meta_alloc.rs index 03cbff663a90..a8f4b4406967 100644 --- a/src/meta-srv/src/table_meta_alloc.rs +++ b/src/meta-srv/src/table_meta_alloc.rs @@ -22,7 +22,7 @@ use snafu::{ensure, ResultExt}; use store_api::storage::MAX_REGION_SEQ; use crate::error::{self, Result, TooManyPartitionsSnafu}; -use crate::metasrv::{SelectorContext, SelectorRef}; +use crate::metasrv::{SelectTarget, SelectorContext, SelectorRef}; use crate::selector::SelectorOptions; pub struct MetasrvPeerAllocator { @@ -64,9 +64,10 @@ impl MetasrvPeerAllocator { ensure!( peers.len() >= regions, - error::NoEnoughAvailableDatanodeSnafu { + error::NoEnoughAvailableNodeSnafu { required: regions, available: peers.len(), + select_target: SelectTarget::Datanode } ); diff --git a/src/operator/src/insert.rs b/src/operator/src/insert.rs index 9f15519c4425..37d4d4440ca1 100644 --- a/src/operator/src/insert.rs +++ b/src/operator/src/insert.rs @@ -334,7 +334,7 @@ impl Inserter { // already know this is not source table Some(None) => continue, _ => { - // TODO(discord9): determine where to store the flow node address in distributed mode + // TODO(discord9): query metasrv for actual peer address let peers = self .table_flownode_set_cache .get(table_id) diff --git a/tests/cases/distributed/information_schema/cluster_info.result b/tests/cases/distributed/information_schema/cluster_info.result index 0cb23706194b..db05b043ac8f 100644 --- a/tests/cases/distributed/information_schema/cluster_info.result +++ b/tests/cases/distributed/information_schema/cluster_info.result @@ -25,7 +25,7 @@ DESC TABLE CLUSTER_INFO; -- SQLNESS REPLACE [\s\-]+ SELECT * FROM CLUSTER_INFO ORDER BY peer_type; -+++++++++|peer_id|peer_type|peer_addr|node_version|git_commit|start_time|uptime|active_time|+++++++++|1|DATANODE|127.0.0.1:4101|Version|Hash|Start_time|Duration|Duration||2|DATANODE|127.0.0.1:4102|Version|Hash|Start_time|Duration|Duration||3|DATANODE|127.0.0.1:4103|Version|Hash|Start_time|Duration|Duration||1|FRONTEND|127.0.0.1:4001|Version|Hash|Start_time|Duration|Duration||1|METASRV|127.0.0.1:3002|Version|Hash|Start_time|Duration||+++++++++ ++++++++++|peer_id|peer_type|peer_addr|node_version|git_commit|start_time|uptime|active_time|+++++++++|1|DATANODE|127.0.0.1:4101|Version|Hash|Start_time|Duration|Duration||2|DATANODE|127.0.0.1:4102|Version|Hash|Start_time|Duration|Duration||3|DATANODE|127.0.0.1:4103|Version|Hash|Start_time|Duration|Duration||0|FLOWNODE|127.0.0.1:6800|Version|Hash|Start_time|Duration|Duration||1|FRONTEND|127.0.0.1:4001|Version|Hash|Start_time|Duration|Duration||1|METASRV|127.0.0.1:3002|Version|Hash|Start_time|Duration||+++++++++ -- SQLNESS REPLACE version node_version -- SQLNESS REPLACE (\s\d\.\d\.\d\s) Version @@ -55,7 +55,7 @@ SELECT * FROM CLUSTER_INFO WHERE PEER_TYPE = 'FRONTEND' ORDER BY peer_type; -- SQLNESS REPLACE [\s\-]+ SELECT * FROM CLUSTER_INFO WHERE PEER_TYPE != 'FRONTEND' ORDER BY peer_type; -+++++++++|peer_id|peer_type|peer_addr|node_version|git_commit|start_time|uptime|active_time|+++++++++|1|DATANODE|127.0.0.1:4101|Version|Hash|Start_time|Duration|Duration||2|DATANODE|127.0.0.1:4102|Version|Hash|Start_time|Duration|Duration||3|DATANODE|127.0.0.1:4103|Version|Hash|Start_time|Duration|Duration||1|METASRV|127.0.0.1:3002|Version|Hash|Start_time|Duration||+++++++++ ++++++++++|peer_id|peer_type|peer_addr|node_version|git_commit|start_time|uptime|active_time|+++++++++|1|DATANODE|127.0.0.1:4101|Version|Hash|Start_time|Duration|Duration||2|DATANODE|127.0.0.1:4102|Version|Hash|Start_time|Duration|Duration||3|DATANODE|127.0.0.1:4103|Version|Hash|Start_time|Duration|Duration||0|FLOWNODE|127.0.0.1:6800|Version|Hash|Start_time|Duration|Duration||1|METASRV|127.0.0.1:3002|Version|Hash|Start_time|Duration||+++++++++ -- SQLNESS REPLACE version node_version -- SQLNESS REPLACE (\s\d\.\d\.\d\s) Version diff --git a/tests/cases/standalone/flow/basic.result b/tests/cases/standalone/common/flow/basic.result similarity index 100% rename from tests/cases/standalone/flow/basic.result rename to tests/cases/standalone/common/flow/basic.result diff --git a/tests/cases/standalone/flow/basic.sql b/tests/cases/standalone/common/flow/basic.sql similarity index 100% rename from tests/cases/standalone/flow/basic.sql rename to tests/cases/standalone/common/flow/basic.sql diff --git a/tests/cases/standalone/flow/df_func.result b/tests/cases/standalone/common/flow/df_func.result similarity index 100% rename from tests/cases/standalone/flow/df_func.result rename to tests/cases/standalone/common/flow/df_func.result diff --git a/tests/cases/standalone/flow/df_func.sql b/tests/cases/standalone/common/flow/df_func.sql similarity index 100% rename from tests/cases/standalone/flow/df_func.sql rename to tests/cases/standalone/common/flow/df_func.sql diff --git a/tests/cases/standalone/show_create_flow.result b/tests/cases/standalone/common/flow/show_create_flow.result similarity index 100% rename from tests/cases/standalone/show_create_flow.result rename to tests/cases/standalone/common/flow/show_create_flow.result diff --git a/tests/cases/standalone/show_create_flow.sql b/tests/cases/standalone/common/flow/show_create_flow.sql similarity index 100% rename from tests/cases/standalone/show_create_flow.sql rename to tests/cases/standalone/common/flow/show_create_flow.sql diff --git a/tests/runner/src/env.rs b/tests/runner/src/env.rs index 60f6d132f247..97791c592301 100644 --- a/tests/runner/src/env.rs +++ b/tests/runner/src/env.rs @@ -115,6 +115,7 @@ impl Env { server_processes: Some(Arc::new(Mutex::new(vec![server_process]))), metasrv_process: None, frontend_process: None, + flownode_process: None, client: TokioMutex::new(db), ctx: db_ctx, is_standalone: true, @@ -141,6 +142,8 @@ impl Env { let frontend = self.start_server("frontend", &db_ctx, true).await; + let flownode = self.start_server("flownode", &db_ctx, true).await; + let client = Client::with_urls(vec![SERVER_ADDR]); let db = DB::new(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, client); @@ -150,6 +153,7 @@ impl Env { ]))), metasrv_process: Some(meta_server), frontend_process: Some(frontend), + flownode_process: Some(flownode), client: TokioMutex::new(db), ctx: db_ctx, is_standalone: false, @@ -166,6 +170,7 @@ impl Env { server_processes: None, metasrv_process: None, frontend_process: None, + flownode_process: None, ctx: GreptimeDBContext { time: 0, datanode_id: Default::default(), @@ -192,6 +197,8 @@ impl Env { db_ctx.incr_datanode_id(); format!("greptime-sqlness-datanode-{}.log", db_ctx.datanode_id()) } + // The flownode id is always 0 for now + "flownode" => "greptime-sqlness-flownode.log".to_string(), "frontend" => "greptime-sqlness-frontend.log".to_string(), "metasrv" => "greptime-sqlness-metasrv.log".to_string(), "standalone" => "greptime-sqlness-standalone.log".to_string(), @@ -211,6 +218,7 @@ impl Env { let (args, check_ip_addr) = match subcommand { "datanode" => self.datanode_start_args(db_ctx), + "flownode" => self.flownode_start_args(db_ctx), "standalone" => { let args = vec![ DEFAULT_LOG_LEVEL.to_string(), @@ -307,6 +315,22 @@ impl Env { (args, format!("127.0.0.1:410{id}")) } + fn flownode_start_args(&self, _db_ctx: &GreptimeDBContext) -> (Vec<String>, String) { + let id = 0; + + let subcommand = "flownode"; + let mut args = vec![ + DEFAULT_LOG_LEVEL.to_string(), + subcommand.to_string(), + "start".to_string(), + ]; + args.push(format!("--rpc-addr=127.0.0.1:680{id}")); + args.push(format!("--node-id={id}")); + args.push("--metasrv-addrs=127.0.0.1:3002".to_string()); + args.push("--frontend-addr=http://127.0.0.1:4001".to_string()); + (args, format!("127.0.0.1:680{id}")) + } + /// stop and restart the server process async fn restart_server(&self, db: &GreptimeDB) { { @@ -421,6 +445,7 @@ pub struct GreptimeDB { server_processes: Option<Arc<Mutex<Vec<Child>>>>, metasrv_process: Option<Child>, frontend_process: Option<Child>, + flownode_process: Option<Child>, client: TokioMutex<DB>, ctx: GreptimeDBContext, is_standalone: bool, @@ -516,6 +541,10 @@ impl GreptimeDB { Env::stop_server(&mut frontend); println!("Frontend (pid = {}) is stopped", frontend.id()); } + if let Some(mut flownode) = self.flownode_process.take() { + Env::stop_server(&mut flownode); + println!("Flownode (pid = {}) is stopped", flownode.id()); + } if matches!(self.ctx.wal, WalConfig::Kafka { needs_kafka_cluster, .. } if needs_kafka_cluster) { util::teardown_wal();
feat
make flow distributed work&tests (#4256)
97d2aa4bfd184fb81f2ba392f37ef15a69f08e96
2022-09-01 18:08:39
evenyag
feat: script engine and python impl (#219)
false
diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml index a5bae56e5fbe..779dba5854e5 100644 --- a/.github/workflows/coverage.yml +++ b/.github/workflows/coverage.yml @@ -9,7 +9,7 @@ on: name: Code coverage env: - RUST_TOOLCHAIN: nightly-2022-04-03 + RUST_TOOLCHAIN: nightly-2022-07-14 jobs: grcov: diff --git a/.github/workflows/develop.yml b/.github/workflows/develop.yml index 6e577ab8905d..4df745140407 100644 --- a/.github/workflows/develop.yml +++ b/.github/workflows/develop.yml @@ -5,7 +5,7 @@ on: name: Continuous integration for developing env: - RUST_TOOLCHAIN: nightly-2022-04-03 + RUST_TOOLCHAIN: nightly-2022-07-14 jobs: check: diff --git a/.gitignore b/.gitignore index 4e9d971ad199..9670cc7f2298 100644 --- a/.gitignore +++ b/.gitignore @@ -22,3 +22,9 @@ debug/ # Logs **/__unittest_logs logs/ + +.DS_store +.gitignore + +# cpython's generated python byte code +**/__pycache__/ diff --git a/Cargo.lock b/Cargo.lock index db49f1d960db..d0ed1ef2da3b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2,6 +2,12 @@ # It is not intended for manual editing. version = 3 +[[package]] +name = "Inflector" +version = "0.11.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fe438c63458706e03479442743baae6c88256498e6431708f6dfc520a26515d3" + [[package]] name = "addr2line" version = "0.17.0" @@ -17,6 +23,12 @@ version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" +[[package]] +name = "adler32" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aae1277d39aeec15cb388266ecc24b11c80469deae6067e17a1a7aa9e5c1f234" + [[package]] name = "ahash" version = "0.7.6" @@ -157,6 +169,12 @@ dependencies = [ "strength_reduce", ] +[[package]] +name = "ascii" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbf56136a5198c7b01a49e3afcbef6cf84597273d298f54432926024107b0109" + [[package]] name = "async-channel" version = "1.6.1" @@ -369,6 +387,15 @@ dependencies = [ "num-traits", ] +[[package]] +name = "bincode" +version = "1.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1f45e9417d87227c7a56d22e471c6206462cba514c7590c09aff4cf6d1ddcad" +dependencies = [ + "serde", +] + [[package]] name = "bindgen" version = "0.59.2" @@ -527,6 +554,16 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c1db59621ec70f09c5e9b597b220c7a2b43611f4710dc03ceb8748637775692c" +[[package]] +name = "caseless" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "808dab3318747be122cb31d36de18d4d1c81277a76f8332a02b81a3d73463d7f" +dependencies = [ + "regex", + "unicode-normalization", +] + [[package]] name = "cast" version = "0.2.7" @@ -594,11 +631,13 @@ version = "0.4.19" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "670ad68c9088c2a963aaa298cb369688cf3f9465ce5e2d4ca10e6e0098a1ce73" dependencies = [ + "js-sys", "libc", "num-integer", "num-traits", "serde", "time 0.1.43", + "wasm-bindgen", "winapi", ] @@ -652,16 +691,16 @@ dependencies = [ [[package]] name = "clap" -version = "3.1.17" +version = "3.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "47582c09be7c8b32c0ab3a6181825ababb713fde6fff20fc573a3870dd45c6a0" +checksum = "a3dbbb6653e7c55cc8595ad3e1f7be8f32aba4eb7ff7f0fd1163d4f3d137c0a9" dependencies = [ "atty", "bitflags", "clap_derive", "clap_lex", "indexmap", - "lazy_static", + "once_cell", "strsim 0.10.0", "termcolor", "textwrap 0.15.0", @@ -669,9 +708,9 @@ dependencies = [ [[package]] name = "clap_derive" -version = "3.1.7" +version = "3.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3aab4734e083b809aaf5794e14e756d1c798d2c69c7f7de7a09a2f5214993c1" +checksum = "9ba52acd3b0a5c33aeada5cdaa3267cdc7c594a98731d4268cdc1532f4264cb4" dependencies = [ "heck 0.4.0", "proc-macro-error", @@ -682,9 +721,9 @@ dependencies = [ [[package]] name = "clap_lex" -version = "0.2.0" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a37c35f1112dad5e6e0b1adaff798507497a18fceeb30cceb3bae7d1427b9213" +checksum = "2850f2f5a82cbf437dd5af4d49848fbdfc27c157c3d010345776f952765261c5" dependencies = [ "os_str_bytes", ] @@ -704,6 +743,17 @@ dependencies = [ "tracing-subscriber", ] +[[package]] +name = "clipboard-win" +version = "4.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c4ab1b92798304eedc095b53942963240037c0516452cb11aeba709d420b2219" +dependencies = [ + "error-code", + "str-buf", + "winapi", +] + [[package]] name = "cmake" version = "0.1.48" @@ -717,7 +767,7 @@ dependencies = [ name = "cmd" version = "0.1.0" dependencies = [ - "clap 3.1.17", + "clap 3.2.16", "common-error", "common-telemetry", "datanode", @@ -734,8 +784,8 @@ version = "5.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b103d85ca6e209388771bfb7aa6b68a7aeec4afbf6f0a0264bfbf50360e5212e" dependencies = [ - "strum", - "strum_macros", + "strum 0.23.0", + "strum_macros 0.23.1", "unicode-width", ] @@ -769,10 +819,19 @@ dependencies = [ "common-query", "datafusion-common", "datatypes", + "libc", "num", "num-traits", "once_cell", "paste", + "ron", + "rustpython-ast", + "rustpython-bytecode", + "rustpython-compiler", + "rustpython-compiler-core", + "rustpython-parser", + "rustpython-vm", + "serde", "snafu", "statrs", ] @@ -876,6 +935,20 @@ dependencies = [ "cache-padded", ] +[[package]] +name = "console" +version = "0.15.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "89eab4d20ce20cea182308bca13088fecea9c05f6776cf287205d41a0ed3c847" +dependencies = [ + "encode_unicode", + "libc", + "once_cell", + "terminal_size", + "unicode-width", + "winapi", +] + [[package]] name = "console-api" version = "0.2.0" @@ -1200,7 +1273,7 @@ dependencies = [ "datafusion-expr", "datafusion-physical-expr", "futures", - "hashbrown 0.12.1", + "hashbrown", "lazy_static", "log", "num_cpus", @@ -1251,7 +1324,7 @@ dependencies = [ "chrono", "datafusion-common", "datafusion-expr", - "hashbrown 0.12.1", + "hashbrown", "lazy_static", "md-5", "ordered-float 2.10.0", @@ -1289,6 +1362,7 @@ dependencies = [ "metrics 0.20.1", "object-store", "query", + "script", "serde", "serde_json", "servers", @@ -1376,6 +1450,16 @@ dependencies = [ "dirs-sys", ] +[[package]] +name = "dirs-next" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b98cf8ebf19c3d1b223e151f99a4f9f0690dca41414773390fc824184ac833e1" +dependencies = [ + "cfg-if", + "dirs-sys-next", +] + [[package]] name = "dirs-sys" version = "0.3.7" @@ -1387,6 +1471,17 @@ dependencies = [ "winapi", ] +[[package]] +name = "dirs-sys-next" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ebda144c4fe02d1f7ea1a7d9641b6fc6b580adcfa024ae48797ecdeb6825b4d" +dependencies = [ + "libc", + "redox_users", + "winapi", +] + [[package]] name = "dlv-list" version = "0.3.0" @@ -1405,6 +1500,12 @@ version = "1.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e78d4f1cc4ae33bbfc157ed5d5a5ef3bc29227303d595861deb238fcec4e9457" +[[package]] +name = "encode_unicode" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a357d28ed41a50f9c765dbfe56cbc04a64e53e5fc58ba79fbc34c10ef3df831f" + [[package]] name = "encoding_rs" version = "0.8.31" @@ -1414,6 +1515,12 @@ dependencies = [ "cfg-if", ] +[[package]] +name = "endian-type" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c34f04666d835ff5d62e058c3995147c06f42fe86ff053337632bca83e42702d" + [[package]] name = "enum_dispatch" version = "0.3.8" @@ -1426,12 +1533,49 @@ dependencies = [ "syn", ] +[[package]] +name = "errno" +version = "0.2.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f639046355ee4f37944e44f60642c6f3a7efa3cf6b78c78a0d989a8ce6c396a1" +dependencies = [ + "errno-dragonfly", + "libc", + "winapi", +] + +[[package]] +name = "errno-dragonfly" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aa68f1b12764fab894d2755d2518754e71b4fd80ecfb822714a1206c2aab39bf" +dependencies = [ + "cc", + "libc", +] + +[[package]] +name = "error-code" +version = "2.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "64f18991e7bf11e7ffee451b5318b5c1a73c52d0d0ada6e5a3017c8c1ced6a21" +dependencies = [ + "libc", + "str-buf", +] + [[package]] name = "event-listener" version = "2.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "77f3309417938f28bf8228fcff79a4a37103981e3e186d2ccd19c74b38f4eb71" +[[package]] +name = "exitcode" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "de853764b47027c2e862a995c34978ffa63c1501f2e15f987ba11bd4f9bba193" + [[package]] name = "fallible-streaming-iterator" version = "0.1.9" @@ -1447,6 +1591,17 @@ dependencies = [ "instant", ] +[[package]] +name = "fd-lock" +version = "3.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e11dcc7e4d79a8c89b9ab4c6f5c30b1fc4a83c420792da3542fd31179ed5f517" +dependencies = [ + "cfg-if", + "rustix", + "windows-sys", +] + [[package]] name = "fixedbitset" version = "0.4.1" @@ -1713,8 +1868,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9be70c98951c83b8d2f8f60d7065fa6d5146873094452a1008da8c2f1e4205ad" dependencies = [ "cfg-if", + "js-sys", "libc", "wasi 0.10.2+wasi-snapshot-preview1", + "wasm-bindgen", ] [[package]] @@ -1760,12 +1917,6 @@ version = "2.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "74721d007512d0cb3338cd20f0654ac913920061a4c4d0d8708edb3f2a698c0c" -[[package]] -name = "hashbrown" -version = "0.11.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab5ef0d4909ef3724cc8cce6ccc8572c5c817592e9285f5464f8e86f8bd3726e" - [[package]] name = "hashbrown" version = "0.12.1" @@ -1819,6 +1970,12 @@ version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" +[[package]] +name = "hexf-parse" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dfa686283ad6dd069f105e5ab091b04c62850d3e4cf5d67debad1933f55023df" + [[package]] name = "hmac" version = "0.12.1" @@ -1942,12 +2099,12 @@ dependencies = [ [[package]] name = "indexmap" -version = "1.8.1" +version = "1.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f647032dfaa1f8b6dc29bd3edb7bbef4861b8b8007ebb118d6db284fd59f6ee" +checksum = "10a35a97730320ffe8e2d410b5d3b69279b98d2c14bdb8b70ea89ecf7888d41e" dependencies = [ "autocfg", - "hashbrown 0.11.2", + "hashbrown", ] [[package]] @@ -1969,6 +2126,12 @@ dependencies = [ "futures-util", ] +[[package]] +name = "io-lifetimes" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "24c3f4eff5495aee4c0399d7b6a0dc2b6e81be84242ffbfcf253ebacccc1d0cb" + [[package]] name = "ipnet" version = "2.5.0" @@ -1984,6 +2147,19 @@ dependencies = [ "nom", ] +[[package]] +name = "is-macro" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1c068d4c6b922cd6284c609cfa6dec0e41615c9c5a1a4ba729a970d8daba05fb" +dependencies = [ + "Inflector", + "pmutil", + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "isahc" version = "1.7.2" @@ -2064,6 +2240,12 @@ dependencies = [ "simple_asn1", ] +[[package]] +name = "lalrpop-util" +version = "0.19.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bcf796c978e9b4d983414f4caedc9273aa33ee214c5b887bd55fde84c85d2dc4" + [[package]] name = "lazy_static" version = "1.4.0" @@ -2151,9 +2333,9 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.125" +version = "0.2.126" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5916d2ae698f6de9bfb891ad7a8d65c09d232dc58cc4ac433c7da3b2fd84bc2b" +checksum = "349d5a591cd28b49e1d1037471617a32ddcda5731b99419008085f72d5a53836" [[package]] name = "libloading" @@ -2193,6 +2375,12 @@ dependencies = [ "vcpkg", ] +[[package]] +name = "linux-raw-sys" +version = "0.0.46" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d4d2456c373231a208ad294c33dc5bff30051eafd954cd4caae83a712b12854d" + [[package]] name = "lock_api" version = "0.4.7" @@ -2247,7 +2435,7 @@ version = "0.7.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e999beba7b6e8345721bd280141ed958096a2e4abdf74f67ff4ce49b4b54e47a" dependencies = [ - "hashbrown 0.12.1", + "hashbrown", ] [[package]] @@ -2270,6 +2458,15 @@ dependencies = [ "libc", ] +[[package]] +name = "lz4_flex" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "74141c8af4bb8136dafb5705826bdd9dce823021db897c1129191804140ddf84" +dependencies = [ + "twox-hash", +] + [[package]] name = "mach" version = "0.3.2" @@ -2279,6 +2476,12 @@ dependencies = [ "libc", ] +[[package]] +name = "maplit" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3e2e65a1a2e43cfcb47a895c4c8b10d1f4a61097f9f254f183aee60cad9c651d" + [[package]] name = "matchers" version = "0.1.0" @@ -2405,7 +2608,7 @@ checksum = "f7d24dc2dbae22bff6f1f9326ffce828c9f07ef9cc1e8002e5279f845432a30a" dependencies = [ "crossbeam-epoch", "crossbeam-utils", - "hashbrown 0.12.1", + "hashbrown", "metrics 0.20.1", "num_cpus", "parking_lot 0.12.0", @@ -2639,6 +2842,28 @@ dependencies = [ "tempfile", ] +[[package]] +name = "nibble_vec" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77a5d83df9f36fe23f0c3648c6bbb8b0298bb5f1939c8f2704431371f4b84d43" +dependencies = [ + "smallvec", +] + +[[package]] +name = "nix" +version = "0.23.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9f866317acbd3a240710c63f065ffb1e4fd466259045ccb504130b7f668f35c6" +dependencies = [ + "bitflags", + "cc", + "cfg-if", + "libc", + "memoffset", +] + [[package]] name = "nom" version = "7.1.1" @@ -2672,6 +2897,7 @@ dependencies = [ "autocfg", "num-integer", "num-traits", + "serde", ] [[package]] @@ -2681,6 +2907,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "97fbc387afefefd5e9e39493299f3069e14a140dd34dc19b4c1c1a8fddb6a790" dependencies = [ "num-traits", + "serde", ] [[package]] @@ -2736,6 +2963,27 @@ dependencies = [ "libc", ] +[[package]] +name = "num_enum" +version = "0.5.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cf5395665662ef45796a4ff5486c5d41d29e0c09640af4c5f17fd94ee2c119c9" +dependencies = [ + "num_enum_derive", +] + +[[package]] +name = "num_enum_derive" +version = "0.5.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b0498641e53dd6ac1a4f22547548caa6864cc4933784319cd1775271c5a46ce" +dependencies = [ + "proc-macro-crate", + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "num_threads" version = "0.1.6" @@ -2913,6 +3161,12 @@ dependencies = [ "opentelemetry", ] +[[package]] +name = "optional" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "978aa494585d3ca4ad74929863093e87cac9790d81fe7aba2b3dc2890643a0fc" + [[package]] name = "ordered-float" version = "1.1.1" @@ -2948,7 +3202,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ccd746e37177e1711c20dd619a1620f34f5c8b569c53590a72dedd5344d8924a" dependencies = [ "dlv-list", - "hashbrown 0.12.1", + "hashbrown", ] [[package]] @@ -3205,6 +3459,17 @@ dependencies = [ "plotters-backend", ] +[[package]] +name = "pmutil" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3894e5d549cccbe44afecf72922f277f603cd4bb0219c8342631ef18fffbe004" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "polling" version = "2.2.0" @@ -3240,6 +3505,16 @@ dependencies = [ "syn", ] +[[package]] +name = "proc-macro-crate" +version = "1.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e17d47ce914bf4de440332250b0edd23ce48c005f59fab39d3335866b114f11a" +dependencies = [ + "thiserror", + "toml", +] + [[package]] name = "proc-macro-error" version = "1.0.4" @@ -3453,6 +3728,16 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dc33ff2d4973d518d823d61aa239014831e521c75da58e3df4840d3f47749d09" +[[package]] +name = "radix_trie" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c069c179fcdc6a2fe24d8d18305cf085fdbd4f922c041943e203685d6a1c58fd" +dependencies = [ + "endian-type", + "nibble_vec", +] + [[package]] name = "rand" version = "0.4.6" @@ -3591,9 +3876,9 @@ dependencies = [ [[package]] name = "regex" -version = "1.5.5" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a11647b6b25ff05a515cb92c365cec08801e83423a235b51e231e1808747286" +checksum = "4c4eb3267174b8c6c2f654116623910a0fef09c4753f8dd83db29c48a0df988b" dependencies = [ "aho-corasick", "memchr", @@ -3611,9 +3896,9 @@ dependencies = [ [[package]] name = "regex-syntax" -version = "0.6.25" +version = "0.6.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f497285884f3fcff424ffc933e56d7cbca511def0c9831a7f9b5f6153e3cc89b" +checksum = "a3f87b73ce11b1619a3c6332f45341e0047173771e8b8b73f87bfeefb7b56244" [[package]] name = "remove_dir_all" @@ -3690,6 +3975,28 @@ dependencies = [ "winreg", ] +[[package]] +name = "result-like" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f95d927de9fa384eaf3e5b10e86065dd0a8a272b61cede64ffe7e83d2827073c" +dependencies = [ + "result-like-derive", +] + +[[package]] +name = "result-like-derive" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6dac91550a14a4b4ec485260b40d83b25059130f564d7f598604e0c7b1a8b9e6" +dependencies = [ + "pmutil", + "proc-macro2", + "quote", + "syn", + "syn-ext", +] + [[package]] name = "retain_mut" version = "0.1.7" @@ -3711,6 +4018,17 @@ dependencies = [ "winapi", ] +[[package]] +name = "ron" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "88073939a61e5b7680558e6be56b419e208420c2adb92be54921fa6b72283f1a" +dependencies = [ + "base64", + "bitflags", + "serde", +] + [[package]] name = "rust-ini" version = "0.18.0" @@ -3754,29 +4072,266 @@ dependencies = [ ] [[package]] -name = "rustversion" -version = "1.0.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2cc38e8fa666e2de3c4aba7edeb5ffc5246c1c2ed0e3d17e560aeeba736b23f" - -[[package]] -name = "ryu" -version = "1.0.9" +name = "rustix" +version = "0.35.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73b4b750c782965c211b42f022f59af1fbceabdd026623714f104152f1ec149f" +checksum = "d51cc38aa10f6bbb377ed28197aa052aa4e2b762c22be9d3153d01822587e787" +dependencies = [ + "bitflags", + "errno", + "io-lifetimes", + "libc", + "linux-raw-sys", + "windows-sys", +] [[package]] -name = "same-file" -version = "1.0.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502" +name = "rustpython-ast" +version = "0.1.0" +source = "git+https://github.com/RustPython/RustPython?rev=02a1d1d#02a1d1d7db57afbb78049599c2585cc7cd59e6d3" dependencies = [ - "winapi-util", + "num-bigint", + "rustpython-common", ] [[package]] -name = "saturating" -version = "0.1.0" +name = "rustpython-bytecode" +version = "0.1.2" +source = "git+https://github.com/RustPython/RustPython?rev=02a1d1d#02a1d1d7db57afbb78049599c2585cc7cd59e6d3" +dependencies = [ + "bincode", + "bitflags", + "bstr", + "itertools", + "lz4_flex", + "num-bigint", + "num-complex", + "serde", + "static_assertions", +] + +[[package]] +name = "rustpython-common" +version = "0.0.0" +source = "git+https://github.com/RustPython/RustPython?rev=02a1d1d#02a1d1d7db57afbb78049599c2585cc7cd59e6d3" +dependencies = [ + "ascii", + "cfg-if", + "hexf-parse", + "lexical-parse-float", + "libc", + "lock_api", + "num-bigint", + "num-complex", + "num-traits", + "once_cell", + "radium", + "rand 0.8.5", + "siphasher", + "unic-ucd-category", + "volatile", + "widestring", +] + +[[package]] +name = "rustpython-compiler" +version = "0.1.2" +source = "git+https://github.com/RustPython/RustPython?rev=02a1d1d#02a1d1d7db57afbb78049599c2585cc7cd59e6d3" +dependencies = [ + "rustpython-bytecode", + "rustpython-compiler-core", + "rustpython-parser", + "thiserror", +] + +[[package]] +name = "rustpython-compiler-core" +version = "0.1.2" +source = "git+https://github.com/RustPython/RustPython?rev=02a1d1d#02a1d1d7db57afbb78049599c2585cc7cd59e6d3" +dependencies = [ + "ahash", + "indexmap", + "itertools", + "log", + "num-complex", + "num-traits", + "rustpython-ast", + "rustpython-bytecode", +] + +[[package]] +name = "rustpython-derive" +version = "0.1.2" +source = "git+https://github.com/RustPython/RustPython?rev=02a1d1d#02a1d1d7db57afbb78049599c2585cc7cd59e6d3" +dependencies = [ + "indexmap", + "itertools", + "maplit", + "once_cell", + "proc-macro2", + "quote", + "rustpython-bytecode", + "rustpython-compiler", + "rustpython-doc", + "syn", + "syn-ext", + "textwrap 0.15.0", +] + +[[package]] +name = "rustpython-doc" +version = "0.1.0" +source = "git+https://github.com/RustPython/__doc__?branch=main#66be54cd61cc5eb29bb4870314160c337a296a32" +dependencies = [ + "once_cell", +] + +[[package]] +name = "rustpython-parser" +version = "0.1.2" +source = "git+https://github.com/RustPython/RustPython?rev=02a1d1d#02a1d1d7db57afbb78049599c2585cc7cd59e6d3" +dependencies = [ + "ahash", + "lalrpop-util", + "log", + "num-bigint", + "num-traits", + "phf", + "phf_codegen", + "rustpython-ast", + "tiny-keccak", + "unic-emoji-char", + "unic-ucd-ident", + "unicode_names2", +] + +[[package]] +name = "rustpython-pylib" +version = "0.1.0" +source = "git+https://github.com/RustPython/RustPython?rev=02a1d1d#02a1d1d7db57afbb78049599c2585cc7cd59e6d3" + +[[package]] +name = "rustpython-vm" +version = "0.1.2" +source = "git+https://github.com/RustPython/RustPython?rev=02a1d1d#02a1d1d7db57afbb78049599c2585cc7cd59e6d3" +dependencies = [ + "adler32", + "ahash", + "ascii", + "atty", + "bitflags", + "bstr", + "caseless", + "cfg-if", + "chrono", + "crossbeam-utils", + "exitcode", + "flate2", + "getrandom", + "half", + "hex", + "hexf-parse", + "indexmap", + "is-macro", + "itertools", + "libc", + "log", + "memchr", + "memoffset", + "nix", + "num-bigint", + "num-complex", + "num-integer", + "num-rational", + "num-traits", + "num_cpus", + "num_enum", + "once_cell", + "optional", + "parking_lot 0.12.0", + "paste", + "rand 0.8.5", + "result-like", + "rustc_version", + "rustpython-ast", + "rustpython-bytecode", + "rustpython-common", + "rustpython-compiler", + "rustpython-compiler-core", + "rustpython-derive", + "rustpython-parser", + "rustpython-pylib", + "rustyline", + "schannel", + "serde", + "sre-engine", + "static_assertions", + "strum 0.24.1", + "strum_macros 0.24.2", + "thiserror", + "thread_local", + "timsort", + "uname", + "unic-ucd-bidi", + "unic-ucd-category", + "unic-ucd-ident", + "unicode-casing", + "unicode_names2", + "wasm-bindgen", + "which", + "widestring", + "winapi", + "winreg", +] + +[[package]] +name = "rustversion" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2cc38e8fa666e2de3c4aba7edeb5ffc5246c1c2ed0e3d17e560aeeba736b23f" + +[[package]] +name = "rustyline" +version = "9.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db7826789c0e25614b03e5a54a0717a86f9ff6e6e5247f92b369472869320039" +dependencies = [ + "bitflags", + "cfg-if", + "clipboard-win", + "dirs-next", + "fd-lock", + "libc", + "log", + "memchr", + "nix", + "radix_trie", + "scopeguard", + "smallvec", + "unicode-segmentation", + "unicode-width", + "utf8parse", + "winapi", +] + +[[package]] +name = "ryu" +version = "1.0.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "73b4b750c782965c211b42f022f59af1fbceabdd026623714f104152f1ec149f" + +[[package]] +name = "same-file" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502" +dependencies = [ + "winapi-util", +] + +[[package]] +name = "saturating" +version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ece8e78b2f38ec51c51f5d475df0a7187ba5111b2a28bdc761ee05b075d40a71" @@ -3796,6 +4351,40 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" +[[package]] +name = "script" +version = "0.1.0" +dependencies = [ + "async-trait", + "catalog", + "common-error", + "common-function", + "common-query", + "common-recordbatch", + "console", + "datafusion", + "datafusion-common", + "datafusion-expr", + "datafusion-physical-expr", + "datatypes", + "futures", + "futures-util", + "query", + "ron", + "rustpython-ast", + "rustpython-bytecode", + "rustpython-compiler", + "rustpython-compiler-core", + "rustpython-parser", + "rustpython-vm", + "serde", + "snafu", + "sql", + "table", + "tokio", + "tokio-test", +] + [[package]] name = "security-framework" version = "2.6.1" @@ -3902,6 +4491,7 @@ dependencies = [ "opensrv-mysql", "query", "rand 0.8.5", + "script", "serde", "serde_json", "snafu", @@ -4088,6 +4678,16 @@ dependencies = [ "log", ] +[[package]] +name = "sre-engine" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e5872399287c284fed4bc773cb7f6041623ac88213774f5e11e89e2131681fc1" +dependencies = [ + "bitflags", + "num_enum", +] + [[package]] name = "static_assertions" version = "1.1.0" @@ -4175,6 +4775,12 @@ dependencies = [ "tokio", ] +[[package]] +name = "str-buf" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e08d8363704e6c71fc928674353e6b7c23dcea9d82d7012c8faf2a3a025f8d0" + [[package]] name = "streaming-decompression" version = "0.1.0" @@ -4223,6 +4829,12 @@ version = "0.23.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cae14b91c7d11c9a851d3fbc80a963198998c2a64eec840477fa92d8ce9b70bb" +[[package]] +name = "strum" +version = "0.24.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "063e6045c0e62079840579a7e47a355ae92f60eb74daaf156fb1e84ba164e63f" + [[package]] name = "strum_macros" version = "0.23.1" @@ -4236,6 +4848,19 @@ dependencies = [ "syn", ] +[[package]] +name = "strum_macros" +version = "0.24.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4faebde00e8ff94316c01800f9054fd2ba77d30d9e922541913051d1d978918b" +dependencies = [ + "heck 0.4.0", + "proc-macro2", + "quote", + "rustversion", + "syn", +] + [[package]] name = "subprocess" version = "0.2.9" @@ -4263,6 +4888,15 @@ dependencies = [ "unicode-xid", ] +[[package]] +name = "syn-ext" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b86cb2b68c5b3c078cac02588bc23f3c04bb828c5d3aedd17980876ec6a7be6" +dependencies = [ + "syn", +] + [[package]] name = "sync_wrapper" version = "0.1.1" @@ -4354,6 +4988,16 @@ dependencies = [ "winapi-util", ] +[[package]] +name = "terminal_size" +version = "0.1.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "633c1a546cee861a1a6d0dc69ebeca693bf4296661ba7852b9d21d159e0506df" +dependencies = [ + "libc", + "winapi", +] + [[package]] name = "test-util" version = "0.1.0" @@ -4476,6 +5120,21 @@ version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "42657b1a6f4d817cda8e7a0ace261fe0cc946cf3a80314390b22cc61ae080792" +[[package]] +name = "timsort" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3cb4fa83bb73adf1c7219f4fe4bf3c0ac5635e4e51e070fad5df745a41bedfb8" + +[[package]] +name = "tiny-keccak" +version = "2.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2c9d3793400a45f954c52e73d068316d76b6f4e36977e3fcebb13a2721e80237" +dependencies = [ + "crunchy", +] + [[package]] name = "tinytemplate" version = "1.2.1" @@ -4884,7 +5543,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "97fee6b57c6a41524a810daee9286c02d7752c4253064d0b05472833a438f675" dependencies = [ "cfg-if", - "rand 0.8.5", + "rand 0.4.6", "static_assertions", ] @@ -4894,6 +5553,15 @@ version = "1.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dcf81ac59edc17cc8697ff311e8f5ef2d99fcbd9817b34cec66f90b6c3dfd987" +[[package]] +name = "uname" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b72f89f0ca32e4db1c04e2a72f5345d59796d4866a1ee0609084569f73683dc8" +dependencies = [ + "libc", +] + [[package]] name = "uncased" version = "0.9.7" @@ -4903,6 +5571,81 @@ dependencies = [ "version_check", ] +[[package]] +name = "unic-char-property" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8c57a407d9b6fa02b4795eb81c5b6652060a15a7903ea981f3d723e6c0be221" +dependencies = [ + "unic-char-range", +] + +[[package]] +name = "unic-char-range" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0398022d5f700414f6b899e10b8348231abf9173fa93144cbc1a43b9793c1fbc" + +[[package]] +name = "unic-common" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "80d7ff825a6a654ee85a63e80f92f054f904f21e7d12da4e22f9834a4aaa35bc" + +[[package]] +name = "unic-emoji-char" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b07221e68897210270a38bde4babb655869637af0f69407f96053a34f76494d" +dependencies = [ + "unic-char-property", + "unic-char-range", + "unic-ucd-version", +] + +[[package]] +name = "unic-ucd-bidi" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d1d568b51222484e1f8209ce48caa6b430bf352962b877d592c29ab31fb53d8c" +dependencies = [ + "unic-char-property", + "unic-char-range", + "unic-ucd-version", +] + +[[package]] +name = "unic-ucd-category" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b8d4591f5fcfe1bd4453baaf803c40e1b1e69ff8455c47620440b46efef91c0" +dependencies = [ + "matches", + "unic-char-property", + "unic-char-range", + "unic-ucd-version", +] + +[[package]] +name = "unic-ucd-ident" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e230a37c0381caa9219d67cf063aa3a375ffed5bf541a452db16e744bdab6987" +dependencies = [ + "unic-char-property", + "unic-char-range", + "unic-ucd-version", +] + +[[package]] +name = "unic-ucd-version" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96bd2f2237fe450fcd0a1d2f5f4e91711124f7857ba2e964247776ebeeb7b0c4" +dependencies = [ + "unic-common", +] + [[package]] name = "unicase" version = "2.6.0" @@ -4918,6 +5661,12 @@ version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "099b7128301d285f79ddd55b9a83d5e6b9e97c92e0ea0daebee7263e932de992" +[[package]] +name = "unicode-casing" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "623f59e6af2a98bdafeb93fa277ac8e1e40440973001ca15cf4ae1541cd16d56" + [[package]] name = "unicode-normalization" version = "0.1.19" @@ -4945,6 +5694,12 @@ version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "957e51f3646910546462e67d5f7599b9e4fb8acdd304b087a6494730f9eebf04" +[[package]] +name = "unicode_names2" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eec8e807a365e5c972debc47b8f06d361b37b94cfd18d48f7adc715fb86404dd" + [[package]] name = "untrusted" version = "0.7.1" @@ -4963,6 +5718,12 @@ dependencies = [ "percent-encoding", ] +[[package]] +name = "utf8parse" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "936e4b492acfd135421d8dca4b1aa80a7bfc26e702ef3af710e0752684df5372" + [[package]] name = "uuid" version = "1.1.2" @@ -4996,6 +5757,12 @@ version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" +[[package]] +name = "volatile" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8e76fae08f03f96e166d2dfda232190638c10e0383841252416f9cfe2ae60e6" + [[package]] name = "waker-fn" version = "1.1.0" @@ -5137,6 +5904,12 @@ dependencies = [ "libc", ] +[[package]] +name = "widestring" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "17882f045410753661207383517a6f62ec3dbeb6a4ed2acce01f0728238d1983" + [[package]] name = "winapi" version = "0.3.9" diff --git a/Cargo.toml b/Cargo.toml index 4c9782eec055..0b1bd2b318c4 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -19,6 +19,7 @@ members = [ "src/logical-plans", "src/object-store", "src/query", + "src/script", "src/servers", "src/sql", "src/storage", diff --git a/component/script/python/__init__.py b/component/script/python/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/component/script/python/example/__init__.py b/component/script/python/example/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/component/script/python/example/calc_rv.py b/component/script/python/example/calc_rv.py new file mode 100644 index 000000000000..934503324132 --- /dev/null +++ b/component/script/python/example/calc_rv.py @@ -0,0 +1,69 @@ +import sys +# for annoying releative import beyond top-level package +sys.path.insert(0, "../") +from greptime import mock_tester, coprocessor, greptime as gt_builtin +from greptime.greptime import interval, vector, log, prev, sqrt, datetime +import greptime.greptime as greptime +import json +import numpy as np + + +def data_sample(k_lines, symbol, density=5 * 30 * 86400): + """ + Only return close data for simplicty for now + """ + k_lines = k_lines["result"] if k_lines["ret_msg"] == "OK" else None + if k_lines is None: + raise Exception("Expect a `OK`ed message") + close = [float(i["close"]) for i in k_lines] + + return interval(close, density, "prev") + + +def as_table(kline: list): + col_len = len(kline) + ret = { + k: vector([fn(row[k]) for row in kline], str(ty)) + for k, fn, ty in + [ + ("symbol", str, "str"), + ("period", str, "str"), + ("open_time", int, "int"), + ("open", float, "float"), + ("high", float, "float"), + ("low", float, "float"), + ("close", float, "float") + ] + } + return ret + +@coprocessor(args=["open_time", "close"], returns=[ + "rv_7d", + "rv_15d", + "rv_30d", + "rv_60d", + "rv_90d", + "rv_180d" +], +sql="select open_time, close from k_line") +def calc_rvs(open_time, close): + from greptime import vector, log, prev, sqrt, datetime, pow, sum + def calc_rv(close, open_time, time, interval): + mask = (open_time < time) & (open_time > time - interval) + close = close[mask] + + avg_time_interval = (open_time[-1] - open_time[0])/(len(open_time)-1) + ref = log(close/prev(close)) + var = sum(pow(ref, 2)/(len(ref)-1)) + return sqrt(var/avg_time_interval) + + # how to get env var, + # maybe through accessing scope and serde then send to remote? + timepoint = open_time[-1] + rv_7d = calc_rv(close, open_time, timepoint, datetime("7d")) + rv_15d = calc_rv(close, open_time, timepoint, datetime("15d")) + rv_30d = calc_rv(close, open_time, timepoint, datetime("30d")) + rv_60d = calc_rv(close, open_time, timepoint, datetime("60d")) + rv_90d = calc_rv(close, open_time, timepoint, datetime("90d")) + rv_180d = calc_rv(close, open_time, timepoint, datetime("180d")) + return rv_7d, rv_15d, rv_30d, rv_60d, rv_90d, rv_180d diff --git a/component/script/python/example/fetch_kline.sh b/component/script/python/example/fetch_kline.sh new file mode 100755 index 000000000000..e044a635509d --- /dev/null +++ b/component/script/python/example/fetch_kline.sh @@ -0,0 +1 @@ +curl "https://api.bybit.com/v2/public/index-price-kline?symbol=BTCUSD&interval=1&limit=$1&from=1581231260" > kline.json diff --git a/component/script/python/example/kline.json b/component/script/python/example/kline.json new file mode 100644 index 000000000000..9928fceca4ef --- /dev/null +++ b/component/script/python/example/kline.json @@ -0,0 +1,108 @@ +{ + "ret_code": 0, + "ret_msg": "OK", + "ext_code": "", + "ext_info": "", + "result": [ + { + "symbol": "BTCUSD", + "period": "1", + "open_time": 1581231300, + "open": "10107", + "high": "10109.34", + "low": "10106.71", + "close": "10106.79" + }, + { + "symbol": "BTCUSD", + "period": "1", + "open_time": 1581231360, + "open": "10106.79", + "high": "10109.27", + "low": "10105.92", + "close": "10106.09" + }, + { + "symbol": "BTCUSD", + "period": "1", + "open_time": 1581231420, + "open": "10106.09", + "high": "10108.75", + "low": "10104.66", + "close": "10108.73" + }, + { + "symbol": "BTCUSD", + "period": "1", + "open_time": 1581231480, + "open": "10108.73", + "high": "10109.52", + "low": "10106.07", + "close": "10106.38" + }, + { + "symbol": "BTCUSD", + "period": "1", + "open_time": 1581231540, + "open": "10106.38", + "high": "10109.48", + "low": "10104.81", + "close": "10106.95" + }, + { + "symbol": "BTCUSD", + "period": "1", + "open_time": 1581231600, + "open": "10106.95", + "high": "10109.48", + "low": "10106.6", + "close": "10107.55" + }, + { + "symbol": "BTCUSD", + "period": "1", + "open_time": 1581231660, + "open": "10107.55", + "high": "10109.28", + "low": "10104.68", + "close": "10104.68" + }, + { + "symbol": "BTCUSD", + "period": "1", + "open_time": 1581231720, + "open": "10104.68", + "high": "10109.18", + "low": "10104.14", + "close": "10108.8" + }, + { + "symbol": "BTCUSD", + "period": "1", + "open_time": 1581231780, + "open": "10108.8", + "high": "10117.36", + "low": "10108.8", + "close": "10115.96" + }, + { + "symbol": "BTCUSD", + "period": "1", + "open_time": 1581231840, + "open": "10115.96", + "high": "10119.19", + "low": "10115.96", + "close": "10117.08" + }, + { + "symbol": "BTCUSD", + "period": "1", + "open_time": 1581231900, + "open": "10117.08", + "high": "10120.73", + "low": "10116.96", + "close": "10120.43" + } + ], + "time_now": "1661225351.158190" +} diff --git a/component/script/python/greptime/__init__.py b/component/script/python/greptime/__init__.py new file mode 100644 index 000000000000..8db592523486 --- /dev/null +++ b/component/script/python/greptime/__init__.py @@ -0,0 +1,4 @@ +from .greptime import coprocessor, copr +from .greptime import vector, log, prev, sqrt, pow, datetime, sum +from .mock import mock_tester +from .cfg import set_conn_addr, get_conn_addr diff --git a/component/script/python/greptime/cfg.py b/component/script/python/greptime/cfg.py new file mode 100644 index 000000000000..4a1aa8dc83f4 --- /dev/null +++ b/component/script/python/greptime/cfg.py @@ -0,0 +1,11 @@ +GREPTIME_DB_CONN_ADDRESS = "localhost:3000" +"""The Global Variable for address for conntect to database""" + +def set_conn_addr(addr: str): + """set database address to given `addr`""" + global GREPTIME_DB_CONN_ADDRESS + GREPTIME_DB_CONN_ADDRESS = addr + +def get_conn_addr()->str: + global GREPTIME_DB_CONN_ADDRESS + return GREPTIME_DB_CONN_ADDRESS diff --git a/component/script/python/greptime/greptime.py b/component/script/python/greptime/greptime.py new file mode 100644 index 000000000000..8ac0a41c3b49 --- /dev/null +++ b/component/script/python/greptime/greptime.py @@ -0,0 +1,215 @@ +""" +Be note that this is a mock library, if not connected to database, +it can only run on mock data and mock function which is supported by numpy +""" +import functools +import numpy as np +import json +from urllib import request +import inspect +import requests + +from .cfg import set_conn_addr, get_conn_addr + +log = np.log +sum = np.nansum +sqrt = np.sqrt +pow = np.power +nan = np.nan + + +class TimeStamp(str): + """ + TODO: impl date time + """ + pass + + +class i32(int): + """ + For Python Coprocessor Type Annotation ONLY + A signed 32-bit integer. + """ + + def __repr__(self) -> str: + return "i32" + + +class i64(int): + """ + For Python Coprocessor Type Annotation ONLY + A signed 64-bit integer. + """ + + def __repr__(self) -> str: + return "i64" + + +class f32(float): + """ + For Python Coprocessor Type Annotation ONLY + A 32-bit floating point number. + """ + + def __repr__(self) -> str: + return "f32" + + +class f64(float): + """ + For Python Coprocessor Type Annotation ONLY + A 64-bit floating point number. + """ + + def __repr__(self) -> str: + return "f64" + + +class vector(np.ndarray): + """ + A compact Vector with all elements of same Data type. + """ + _datatype: str | None = None + + def __new__( + cls, + lst, + dtype=None + ) -> ...: + self = np.asarray(lst).view(cls) + self._datatype = dtype + return self + + def __str__(self) -> str: + return "vector({}, \"{}\")".format(super().__str__(), self.datatype()) + + def datatype(self): + return self._datatype + + def filter(self, lst_bool): + return self[lst_bool] + + +def prev(lst): + ret = np.zeros(len(lst)) + ret[1:] = lst[0:-1] + ret[0] = nan + return ret + + +def query(sql: str): + pass + + +def interval(arr: list, duration: int, fill, step: None | int = None, explicitOffset=False): + """ + Note that this is a mock function with same functionailty to the actual Python Coprocessor + `arr` is a vector of integral or temporal type. + + `duration` is the length of sliding window + + `step` being the length when sliding window take a step + + `fill` indicate how to fill missing value: + - "prev": use previous + - "post": next + - "linear": linear interpolation, if not possible to interpolate certain types, fallback to prev + - "null": use null + - "none": do not interpolate + """ + if step is None: + step = duration + + tot_len = int(np.ceil(len(arr) // step)) + slices = np.zeros((tot_len, int(duration))) + for idx, start in enumerate(range(0, len(arr), step)): + slices[idx] = arr[start:(start + duration)] + return slices + + +def factor(unit: str) -> int: + if unit == "d": + return 24 * 60 * 60 + elif unit == "h": + return 60 * 60 + elif unit == "m": + return 60 + elif unit == "s": + return 1 + else: + raise Exception("Only d,h,m,s, found{}".format(unit)) + + +def datetime(input_time: str) -> int: + """ + support `d`(day) `h`(hour) `m`(minute) `s`(second) + + support format: + `12s` `7d` `12d2h7m` + """ + + prev = 0 + cur = 0 + state = "Num" + parse_res = [] + for idx, ch in enumerate(input_time): + if ch.isdigit(): + cur = idx + + if state != "Num": + parse_res.append((state, input_time[prev:cur], (prev, cur))) + prev = idx + state = "Num" + else: + cur = idx + if state != "Symbol": + parse_res.append((state, input_time[prev:cur], (prev, cur))) + prev = idx + state = "Symbol" + parse_res.append((state, input_time[prev:cur+1], (prev, cur+1))) + + cur_idx = 0 + res_time = 0 + while cur_idx < len(parse_res): + pair = parse_res[cur_idx] + if pair[0] == "Num": + val = int(pair[1]) + nxt = parse_res[cur_idx+1] + res_time += val * factor(nxt[1]) + cur_idx += 2 + else: + raise Exception("Two symbol in a row is impossible") + + return res_time + + +def coprocessor(args=None, returns=None, sql=None): + """ + The actual coprocessor, which will connect to database and update + whatever function decorated with `@coprocessor(args=[...], returns=[...], sql=...)` + """ + def decorator_copr(func): + @functools.wraps(func) + def wrapper_do_actual(*args, **kwargs): + if len(args)!=0 or len(kwargs)!=0: + raise Exception("Expect call with no arguements(for all args are given by coprocessor itself)") + source = inspect.getsource(func) + url = "http://{}/v1/scripts".format(get_conn_addr()) + print("Posting to {}".format(url)) + data = { + "script": source, + "engine": None, + } + + res = requests.post( + url, + headers={"Content-Type": "application/json"}, + json=data + ) + return res + return wrapper_do_actual + return decorator_copr + + +# make a alias for short +copr = coprocessor diff --git a/component/script/python/greptime/mock.py b/component/script/python/greptime/mock.py new file mode 100644 index 000000000000..fed5a21a47ac --- /dev/null +++ b/component/script/python/greptime/mock.py @@ -0,0 +1,82 @@ +""" +Note this is a mock library, if not connected to database, +it can only run on mock data and support by numpy +""" +from typing import Any +import numpy as np +from .greptime import i32,i64,f32,f64, vector, interval, query, prev, datetime, log, sum, sqrt, pow, nan, copr, coprocessor + +import inspect +import functools +import ast + + + +def mock_tester( + func, + env:dict, + table=None +): + """ + Mock tester helper function, + What it does is replace `@coprocessor` with `@mock_cpor` and add a keyword `env=env` + like `@mock_copr(args=...,returns=...,env=env)` + """ + code = inspect.getsource(func) + tree = ast.parse(code) + tree = HackyReplaceDecorator("env").visit(tree) + new_func = tree.body[0] + fn_name = new_func.name + + code_obj = compile(tree, "<embedded>", "exec") + exec(code_obj) + + ret = eval("{}()".format(fn_name)) + return ret + +def mock_copr(args, returns, sql=None, env:None|dict=None): + """ + This should not be used directly by user + """ + def decorator_copr(func): + @functools.wraps(func) + def wrapper_do_actual(*fn_args, **fn_kwargs): + + real_args = [env[name] for name in args] + ret = func(*real_args) + return ret + + return wrapper_do_actual + return decorator_copr + +class HackyReplaceDecorator(ast.NodeTransformer): + """ + This class accept a `env` dict for environment to extract args from, + and put `env` dict in the param list of `mock_copr` decorator, i.e: + + a `@copr(args=["a", "b"], returns=["c"])` with call like mock_helper(abc, env={"a":2, "b":3}) + + will be transform into `@mock_copr(args=["a", "b"], returns=["c"], env={"a":2, "b":3})` + """ + def __init__(self, env: str) -> None: + # just for add `env` keyword + self.env = env + + def visit_FunctionDef(self, node: ast.FunctionDef) -> Any: + new_node = node + decorator_list = new_node.decorator_list + if len(decorator_list)!=1: + return node + + deco = decorator_list[0] + if deco.func.id!="coprocessor" and deco.func.id !="copr": + raise Exception("Expect a @copr or @coprocessor, found {}.".format(deco.func.id)) + deco.func = ast.Name(id="mock_copr", ctx=ast.Load()) + new_kw = ast.keyword(arg="env", value=ast.Name(id=self.env, ctx=ast.Load())) + deco.keywords.append(new_kw) + + # Tie up loose ends in the AST. + ast.copy_location(new_node, node) + ast.fix_missing_locations(new_node) + self.generic_visit(node) + return new_node diff --git a/component/script/python/test.py b/component/script/python/test.py new file mode 100644 index 000000000000..e1e32079536a --- /dev/null +++ b/component/script/python/test.py @@ -0,0 +1,55 @@ +from example.calc_rv import as_table, calc_rvs +from greptime import coprocessor, set_conn_addr, get_conn_addr, mock_tester +import sys +import json +import requests +''' +To run this script, you need to first start a http server of greptime, and +` +python3 component/script/python/test.py 地址:端口 +` + +''' +@coprocessor(sql='select number from numbers limit 10', args=['number'], returns=['n']) +def test(n): + return n+2 + +def init_table(close, open_time): + req_init = "/v1/sql?sql=create table k_line (close double, open_time bigint, TIME INDEX (open_time))" + print(get_db(req_init).text) + for c1, c2 in zip(close, open_time): + req = "/v1/sql?sql=INSERT INTO k_line(close, open_time) VALUES ({}, {})".format(c1, c2) + print(get_db(req).text) + print(get_db("/v1/sql?sql=select * from k_line").text) + +def get_db(req:str): + return requests.get("http://{}{}".format(get_conn_addr(), req)) + +if __name__ == "__main__": + if len(sys.argv)!=2: + raise Exception("Expect only one address as cmd's args") + set_conn_addr(sys.argv[1]) + res = test() + print(res.headers) + print(res.text) + with open("component/script/python/example/kline.json", "r") as kline_file: + kline = json.load(kline_file) + # vec = vector([1,2,3], int) + # print(vec, vec.datatype()) + table = as_table(kline["result"]) + # print(table) + close = table["close"] + open_time = table["open_time"] + init_table(close, open_time) + + # print(repr(close), repr(open_time)) + # print("calc_rv:", calc_rv(close, open_time, open_time[-1]+datetime("10m"), datetime("7d"))) + env = {"close":close, "open_time": open_time} + # print("env:", env) + print("Mock result:", mock_tester(calc_rvs, env=env)) + real = calc_rvs() + print(real) + try: + print(real.text["error"]) + except: + print(real.text) diff --git a/src/api/src/v1.rs b/src/api/src/v1.rs index b729dce3c3d9..059593da5c39 100644 --- a/src/api/src/v1.rs +++ b/src/api/src/v1.rs @@ -1,4 +1,4 @@ -#![allow(clippy::all)] +#![allow(clippy::derive_partial_eq_without_eq)] tonic::include_proto!("greptime.v1"); pub mod codec { diff --git a/src/common/function/Cargo.toml b/src/common/function/Cargo.toml index fd94bf1104dc..e462b6be173d 100644 --- a/src/common/function/Cargo.toml +++ b/src/common/function/Cargo.toml @@ -1,14 +1,12 @@ [package] +edition = "2021" name = "common-function" version = "0.1.0" -edition = "2021" -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies.arrow] -package = "arrow2" -version="0.10" features = ["io_csv", "io_json", "io_parquet", "io_parquet_compression", "io_ipc", "ahash", "compute", "serde_types"] - +package = "arrow2" +version = "0.10" [dependencies] arc-swap = "1.0" @@ -17,9 +15,20 @@ common-error = { path = "../error" } common-query = { path = "../query" } datafusion-common = { git = "https://github.com/apache/arrow-datafusion.git" , branch = "arrow2" } datatypes = { path = "../../datatypes" } -num = "0.4.0" -num-traits = "0.2.14" +libc = "0.2" +num = "0.4" +num-traits = "0.2" once_cell = "1.10" paste = "1.0" +rustpython-ast = {git = "https://github.com/RustPython/RustPython", optional = true, rev = "02a1d1d"} +rustpython-bytecode = {git = "https://github.com/RustPython/RustPython", optional = true, rev = "02a1d1d"} +rustpython-compiler = {git = "https://github.com/RustPython/RustPython", optional = true, rev = "02a1d1d"} +rustpython-compiler-core = {git = "https://github.com/RustPython/RustPython", optional = true, rev = "02a1d1d"} +rustpython-parser = {git = "https://github.com/RustPython/RustPython", optional = true, rev = "02a1d1d"} +rustpython-vm = {git = "https://github.com/RustPython/RustPython", optional = true, rev = "02a1d1d"} snafu = { version = "0.7", features = ["backtraces"] } -statrs = "0.15" \ No newline at end of file +statrs = "0.15" + +[dev-dependencies] +ron = "0.7" +serde = {version = "1.0", features = ["derive"]} diff --git a/src/common/function/src/scalars/math/mod.rs b/src/common/function/src/scalars/math/mod.rs index d454b15ee623..bd9f60b85557 100644 --- a/src/common/function/src/scalars/math/mod.rs +++ b/src/common/function/src/scalars/math/mod.rs @@ -2,7 +2,7 @@ mod pow; use std::sync::Arc; -use pow::PowFunction; +pub use pow::PowFunction; use crate::scalars::function_registry::FunctionRegistry; diff --git a/src/common/recordbatch/src/error.rs b/src/common/recordbatch/src/error.rs index 383b848ba5a9..c4e5d41836d0 100644 --- a/src/common/recordbatch/src/error.rs +++ b/src/common/recordbatch/src/error.rs @@ -1,8 +1,8 @@ //! Error of record batch. use std::any::Any; +use common_error::ext::BoxedError; use common_error::prelude::*; - common_error::define_opaque_error!(Error); pub type Result<T> = std::result::Result<T, Error>; @@ -21,6 +21,12 @@ pub enum InnerError { #[snafu(backtrace)] source: datatypes::error::Error, }, + + #[snafu(display("External error, source: {}", source))] + External { + #[snafu(backtrace)] + source: BoxedError, + }, } impl ErrorExt for InnerError { @@ -28,6 +34,7 @@ impl ErrorExt for InnerError { match self { InnerError::NewDfRecordBatch { .. } => StatusCode::InvalidArguments, InnerError::DataTypes { .. } => StatusCode::Internal, + InnerError::External { source } => source.status_code(), } } diff --git a/src/datanode/Cargo.toml b/src/datanode/Cargo.toml index e03447f78282..d6b45edb9735 100644 --- a/src/datanode/Cargo.toml +++ b/src/datanode/Cargo.toml @@ -10,6 +10,12 @@ package = "arrow2" version = "0.10" features = ["io_csv", "io_json", "io_parquet", "io_parquet_compression", "io_ipc", "ahash", "compute", "serde_types"] +[features] +default = ["python"] +python = [ + "dep:script" +] + [dependencies] api = { path = "../api" } async-trait = "0.1" @@ -30,6 +36,7 @@ log-store = { path = "../log-store" } metrics = "0.20" object-store = { path = "../object-store" } query = { path = "../query" } +script = { path = "../script", features = ["python"], optional = true } serde = "1.0" serde_json = "1.0" servers = { path = "../servers" } diff --git a/src/datanode/src/instance.rs b/src/datanode/src/instance.rs index f2963584d2c9..9b36ff586014 100644 --- a/src/datanode/src/instance.rs +++ b/src/datanode/src/instance.rs @@ -23,6 +23,7 @@ use crate::error::{ UnsupportedExprSnafu, }; use crate::metric; +use crate::script::ScriptExecutor; use crate::server::grpc::handler::{build_err_result, ObjectResultBuilder}; use crate::server::grpc::insert::insertion_expr_to_request; use crate::server::grpc::plan::PhysicalPlanner; @@ -39,6 +40,7 @@ pub struct Instance { // Catalog list catalog_manager: CatalogManagerRef, physical_planner: PhysicalPlanner, + script_executor: ScriptExecutor, } pub type InstanceRef = Arc<Instance>; @@ -64,12 +66,14 @@ impl Instance { ); let factory = QueryEngineFactory::new(catalog_manager.clone()); let query_engine = factory.query_engine().clone(); + let script_executor = ScriptExecutor::new(query_engine.clone()); Ok(Self { query_engine: query_engine.clone(), sql_handler: SqlHandler::new(table_engine, catalog_manager.clone()), catalog_manager, physical_planner: PhysicalPlanner::new(query_engine), + script_executor, }) } @@ -251,6 +255,10 @@ impl SqlQueryHandler for Instance { }) .context(servers::error::ExecuteQuerySnafu { query }) } + + async fn execute_script(&self, script: &str) -> servers::error::Result<Output> { + self.script_executor.execute_script(script).await + } } #[async_trait] diff --git a/src/datanode/src/lib.rs b/src/datanode/src/lib.rs index 4fe0ef90175e..91812978875a 100644 --- a/src/datanode/src/lib.rs +++ b/src/datanode/src/lib.rs @@ -4,6 +4,7 @@ pub mod datanode; pub mod error; pub mod instance; mod metric; +mod script; pub mod server; mod sql; #[cfg(test)] diff --git a/src/datanode/src/script.rs b/src/datanode/src/script.rs new file mode 100644 index 000000000000..f18aa73574b2 --- /dev/null +++ b/src/datanode/src/script.rs @@ -0,0 +1,70 @@ +use query::Output; +use query::QueryEngineRef; + +#[cfg(not(feature = "python"))] +mod dummy { + use super::*; + + pub struct ScriptExecutor; + + impl ScriptExecutor { + pub fn new(_query_engine: QueryEngineRef) -> Self { + Self {} + } + + pub async fn execute_script(&self, _script: &str) -> servers::error::Result<Output> { + servers::error::NotSupportedSnafu { feat: "script" }.fail() + } + } +} + +#[cfg(feature = "python")] +mod python { + use common_error::prelude::BoxedError; + use common_telemetry::logging::error; + use script::{ + engine::{CompileContext, EvalContext, Script, ScriptEngine}, + python::PyEngine, + }; + use snafu::ResultExt; + + use super::*; + + pub struct ScriptExecutor { + py_engine: PyEngine, + } + + impl ScriptExecutor { + pub fn new(query_engine: QueryEngineRef) -> Self { + Self { + py_engine: PyEngine::new(query_engine), + } + } + + pub async fn execute_script(&self, script: &str) -> servers::error::Result<Output> { + let py_script = self + .py_engine + .compile(script, CompileContext::default()) + .await + .map_err(|e| { + error!(e; "Instance failed to execute script"); + BoxedError::new(e) + }) + .context(servers::error::ExecuteScriptSnafu { script })?; + + py_script + .evaluate(EvalContext::default()) + .await + .map_err(|e| { + error!(e; "Instance failed to execute script"); + BoxedError::new(e) + }) + .context(servers::error::ExecuteScriptSnafu { script }) + } + } +} + +#[cfg(not(feature = "python"))] +pub use self::dummy::*; +#[cfg(feature = "python")] +pub use self::python::*; diff --git a/src/datanode/src/tests/http_test.rs b/src/datanode/src/tests/http_test.rs index e4e0d7617148..1d882dd764f0 100644 --- a/src/datanode/src/tests/http_test.rs +++ b/src/datanode/src/tests/http_test.rs @@ -1,9 +1,12 @@ +use std::net::SocketAddr; use std::sync::Arc; use axum::http::StatusCode; use axum::Router; use axum_test_helper::TestClient; +use servers::http::handler::ScriptExecution; use servers::http::HttpServer; +use servers::server::Server; use test_util::TestGuard; use crate::instance::Instance; @@ -23,7 +26,7 @@ async fn test_sql_api() { common_telemetry::init_default_ut_logging(); let (app, _guard) = make_test_app().await; let client = TestClient::new(app); - let res = client.get("/sql").send().await; + let res = client.get("/v1/sql").send().await; assert_eq!(res.status(), StatusCode::OK); let body = res.text().await; @@ -33,7 +36,7 @@ async fn test_sql_api() { ); let res = client - .get("/sql?sql=select * from numbers limit 10") + .get("/v1/sql?sql=select * from numbers limit 10") .send() .await; assert_eq!(res.status(), StatusCode::OK); @@ -46,14 +49,14 @@ async fn test_sql_api() { // test insert and select let res = client - .get("/sql?sql=insert into demo values('host', 66.6, 1024, 0)") + .get("/v1/sql?sql=insert into demo values('host', 66.6, 1024, 0)") .send() .await; assert_eq!(res.status(), StatusCode::OK); // select * let res = client - .get("/sql?sql=select * from demo limit 10") + .get("/v1/sql?sql=select * from demo limit 10") .send() .await; assert_eq!(res.status(), StatusCode::OK); @@ -66,7 +69,7 @@ async fn test_sql_api() { // select with projections let res = client - .get("/sql?sql=select cpu, ts from demo limit 10") + .get("/v1/sql?sql=select cpu, ts from demo limit 10") .send() .await; assert_eq!(res.status(), StatusCode::OK); @@ -87,7 +90,7 @@ async fn test_metrics_api() { // Send a sql let res = client - .get("/sql?sql=select * from numbers limit 10") + .get("/v1/sql?sql=select * from numbers limit 10") .send() .await; assert_eq!(res.status(), StatusCode::OK); @@ -98,3 +101,50 @@ async fn test_metrics_api() { let body = res.text().await; assert!(body.contains("datanode_handle_sql_elapsed")); } + +#[tokio::test] +async fn test_scripts_api() { + common_telemetry::init_default_ut_logging(); + let (app, _guard) = make_test_app().await; + let client = TestClient::new(app); + let res = client + .post("/v1/scripts") + .json(&ScriptExecution { + script: r#" +@copr(sql='select number from numbers limit 10', args=['number'], returns=['n']) +def test(n): + return n; +"# + .to_string(), + }) + .send() + .await; + assert_eq!(res.status(), StatusCode::OK); + + let body = res.text().await; + assert_eq!( + body, + r#"{"success":true,"output":{"Rows":[{"schema":{"fields":[{"name":"n","data_type":"UInt32","is_nullable":false,"metadata":{}}],"metadata":{}},"columns":[[0,1,2,3,4,5,6,7,8,9]]}]}}"# + ); +} + +async fn start_test_app(addr: &str) -> (SocketAddr, TestGuard) { + let (opts, guard) = test_util::create_tmp_dir_and_datanode_opts(); + let instance = Arc::new(Instance::new(&opts).await.unwrap()); + instance.start().await.unwrap(); + let mut http_server = HttpServer::new(instance); + ( + http_server.start(addr.parse().unwrap()).await.unwrap(), + guard, + ) +} + +#[allow(unused)] +#[tokio::test] +async fn test_py_side_scripts_api() { + // TODO(discord9): make a working test case, it will require python3 with numpy installed, complex environment setup expected.... + common_telemetry::init_default_ut_logging(); + let server = start_test_app("127.0.0.1:21830"); + // let (app, _guard) = server.await; + // dbg!(app); +} diff --git a/src/script/Cargo.toml b/src/script/Cargo.toml new file mode 100644 index 000000000000..a88007dd2f4f --- /dev/null +++ b/src/script/Cargo.toml @@ -0,0 +1,50 @@ +[package] +edition = "2021" +name = "script" +version = "0.1.0" + +[features] +default = ["python"] +python = [ + "dep:datafusion", + "dep:datafusion-expr", + "dep:datafusion-physical-expr", + "dep:rustpython-vm", + "dep:rustpython-parser", + "dep:rustpython-compiler", + "dep:rustpython-compiler-core", + "dep:rustpython-bytecode", + "dep:rustpython-ast", +] + +[dependencies] +async-trait = "0.1" +common-error = {path = "../common/error"} +common-function = { path = "../common/function" } +common-query = {path = "../common/query"} +common-recordbatch = {path = "../common/recordbatch" } +console = "0.15" +datafusion = {git = "https://github.com/apache/arrow-datafusion.git", branch = "arrow2", optional = true} +datafusion-common = {git = "https://github.com/apache/arrow-datafusion.git", branch = "arrow2"} +datafusion-expr = {git = "https://github.com/apache/arrow-datafusion.git", branch = "arrow2", optional = true} +datafusion-physical-expr = {git = "https://github.com/apache/arrow-datafusion.git", branch = "arrow2", optional = true} +datatypes = {path = "../datatypes"} +futures-util = "0.3" +futures = "0.3" +query = { path = "../query" } +rustpython-ast = {git = "https://github.com/RustPython/RustPython", optional = true, rev = "02a1d1d"} +rustpython-bytecode = {git = "https://github.com/RustPython/RustPython", optional = true, rev = "02a1d1d"} +rustpython-compiler = {git = "https://github.com/RustPython/RustPython", optional = true, rev = "02a1d1d"} +rustpython-compiler-core = {git = "https://github.com/RustPython/RustPython", optional = true, rev = "02a1d1d"} +rustpython-parser = {git = "https://github.com/RustPython/RustPython", optional = true, rev = "02a1d1d"} +rustpython-vm = {git = "https://github.com/RustPython/RustPython", optional = true, rev = "02a1d1d"} +snafu = {version = "0.7", features = ["backtraces"]} +sql = { path = "../sql" } + +[dev-dependencies] +catalog = { path = "../catalog" } +ron = "0.7" +serde = {version = "1.0", features = ["derive"]} +table = { path = "../table" } +tokio = { version = "1.18", features = ["full"] } +tokio-test = "0.4" diff --git a/src/script/src/engine.rs b/src/script/src/engine.rs new file mode 100644 index 000000000000..3c6850bbf656 --- /dev/null +++ b/src/script/src/engine.rs @@ -0,0 +1,46 @@ +//! Script engine + +use std::any::Any; + +use async_trait::async_trait; +use common_error::ext::ErrorExt; +use query::Output; + +#[async_trait] +pub trait Script { + type Error: ErrorExt + Send + Sync; + + /// Returns the script engine name such as `python` etc. + fn engine_name(&self) -> &str; + + fn as_any(&self) -> &dyn Any; + + /// Evaluate the script and returns the output. + async fn evaluate(&self, ctx: EvalContext) -> std::result::Result<Output, Self::Error>; +} + +#[async_trait] +pub trait ScriptEngine { + type Error: ErrorExt + Send + Sync; + type Script: Script<Error = Self::Error>; + + /// Returns the script engine name such as `python` etc. + fn name(&self) -> &str; + + fn as_any(&self) -> &dyn Any; + + /// Compile a script text into a script instance. + async fn compile( + &self, + script: &str, + ctx: CompileContext, + ) -> std::result::Result<Self::Script, Self::Error>; +} + +/// Evalute script context +#[derive(Debug, Default)] +pub struct EvalContext {} + +/// Compile script context +#[derive(Debug, Default)] +pub struct CompileContext {} diff --git a/src/script/src/lib.rs b/src/script/src/lib.rs new file mode 100644 index 000000000000..b11a118d8cd5 --- /dev/null +++ b/src/script/src/lib.rs @@ -0,0 +1,3 @@ +pub mod engine; +#[cfg(feature = "python")] +pub mod python; diff --git a/src/script/src/python.rs b/src/script/src/python.rs new file mode 100644 index 000000000000..05f7fc3374e3 --- /dev/null +++ b/src/script/src/python.rs @@ -0,0 +1,13 @@ +//! Python script coprocessor + +mod builtins; +pub(crate) mod coprocessor; +mod engine; +pub mod error; +#[cfg(test)] +mod test; +pub(crate) mod utils; +mod vector; + +pub use self::engine::{PyEngine, PyScript}; +pub use self::vector::PyVector; diff --git a/src/script/src/python/builtins/mod.rs b/src/script/src/python/builtins/mod.rs new file mode 100644 index 000000000000..f2f48a374f36 --- /dev/null +++ b/src/script/src/python/builtins/mod.rs @@ -0,0 +1,768 @@ +//! Builtin module contains GreptimeDB builtin udf/udaf +#[cfg(test)] +#[allow(clippy::print_stdout)] +mod test; + +use datafusion_common::{DataFusionError, ScalarValue}; +use datafusion_expr::ColumnarValue as DFColValue; +use datafusion_physical_expr::AggregateExpr; +use datatypes::arrow; +use datatypes::arrow::array::ArrayRef; +use datatypes::arrow::compute::cast::CastOptions; +use datatypes::arrow::datatypes::DataType; +use datatypes::vectors::Helper as HelperVec; +use rustpython_vm::builtins::PyList; +use rustpython_vm::pymodule; +use rustpython_vm::{ + builtins::{PyBaseExceptionRef, PyBool, PyFloat, PyInt}, + AsObject, PyObjectRef, PyPayload, PyResult, VirtualMachine, +}; + +use crate::python::utils::is_instance; +use crate::python::PyVector; + +/// "Can't cast operand of type `{name}` into `{ty}`." +fn type_cast_error(name: &str, ty: &str, vm: &VirtualMachine) -> PyBaseExceptionRef { + vm.new_type_error(format!("Can't cast operand of type `{name}` into `{ty}`.")) +} + +fn collect_diff_types_string(values: &[ScalarValue], ty: &DataType) -> String { + values + .iter() + .enumerate() + .filter_map(|(idx, val)| { + if val.get_datatype() != *ty { + Some((idx, val.get_datatype())) + } else { + None + } + }) + .map(|(idx, ty)| format!(" {:?} at {}th location\n", ty, idx + 1)) + .reduce(|mut acc, item| { + acc.push_str(&item); + acc + }) + .unwrap_or_else(|| "Nothing".to_string()) +} + +/// try to turn a Python Object into a PyVector or a scalar that can be use for calculate +/// +/// supported scalar are(leftside is python data type, right side is rust type): +/// +/// | Python | Rust | +/// | ------ | ---- | +/// | integer| i64 | +/// | float | f64 | +/// | bool | bool | +/// | vector | array| +/// | list | `ScalarValue::List` | +fn try_into_columnar_value(obj: PyObjectRef, vm: &VirtualMachine) -> PyResult<DFColValue> { + if is_instance::<PyVector>(&obj, vm) { + let ret = obj + .payload::<PyVector>() + .ok_or_else(|| type_cast_error(&obj.class().name(), "vector", vm))?; + Ok(DFColValue::Array(ret.to_arrow_array())) + } else if is_instance::<PyBool>(&obj, vm) { + // Note that a `PyBool` is also a `PyInt`, so check if it is a bool first to get a more precise type + let ret = obj.try_into_value::<bool>(vm)?; + Ok(DFColValue::Scalar(ScalarValue::Boolean(Some(ret)))) + } else if is_instance::<PyInt>(&obj, vm) { + let ret = obj.try_into_value::<i64>(vm)?; + Ok(DFColValue::Scalar(ScalarValue::Int64(Some(ret)))) + } else if is_instance::<PyFloat>(&obj, vm) { + let ret = obj.try_into_value::<f64>(vm)?; + Ok(DFColValue::Scalar(ScalarValue::Float64(Some(ret)))) + } else if is_instance::<PyList>(&obj, vm) { + let ret = obj + .payload::<PyList>() + .ok_or_else(|| type_cast_error(&obj.class().name(), "vector", vm))?; + let ret: Vec<ScalarValue> = ret + .borrow_vec() + .iter() + .map(|obj| -> PyResult<ScalarValue> { + let col = try_into_columnar_value(obj.to_owned(), vm)?; + match col { + DFColValue::Array(arr) => Err(vm.new_type_error(format!( + "Expect only scalar value in a list, found a vector of type {:?} nested in list", arr.data_type() + ))), + DFColValue::Scalar(val) => Ok(val), + } + }) + .collect::<Result<_, _>>()?; + + if ret.is_empty() { + //TODO(dennis): empty list, we set type as f64. + return Ok(DFColValue::Scalar(ScalarValue::List( + None, + Box::new(DataType::Float64), + ))); + } + + let ty = ret[0].get_datatype(); + if ret.iter().any(|i| i.get_datatype() != ty) { + return Err(vm.new_type_error(format!( + "All elements in a list should be same type to cast to Datafusion list!\nExpect {ty:?}, found {}", + collect_diff_types_string(&ret, &ty) + ))); + } + Ok(DFColValue::Scalar(ScalarValue::List( + Some(Box::new(ret)), + Box::new(ty), + ))) + } else { + Err(vm.new_type_error(format!( + "Can't cast object of type {} into vector or scalar", + obj.class().name() + ))) + } +} + +/// cast a columnar value into python object +/// +/// | Rust | Python | +/// | ------ | --------------- | +/// | Array | PyVector | +/// | Scalar | int/float/bool | +fn try_into_py_obj(col: DFColValue, vm: &VirtualMachine) -> PyResult<PyObjectRef> { + match col { + DFColValue::Array(arr) => { + let ret = PyVector::from( + HelperVec::try_into_vector(arr) + .map_err(|err| vm.new_type_error(format!("Unsupported type: {:#?}", err)))?, + ) + .into_pyobject(vm); + Ok(ret) + } + DFColValue::Scalar(val) => scalar_val_try_into_py_obj(val, vm), + } +} + +/// turn a ScalarValue into a Python Object, currently support +/// +/// ScalarValue -> Python Type +/// - Float64 -> PyFloat +/// - Int64 -> PyInt +/// - UInt64 -> PyInt +/// - List -> PyList(of inner ScalarValue) +fn scalar_val_try_into_py_obj(val: ScalarValue, vm: &VirtualMachine) -> PyResult<PyObjectRef> { + match val { + ScalarValue::Float32(Some(v)) => Ok(vm.ctx.new_float(v.into()).into()), + ScalarValue::Float64(Some(v)) => Ok(PyFloat::from(v).into_pyobject(vm)), + ScalarValue::Int64(Some(v)) => Ok(PyInt::from(v).into_pyobject(vm)), + ScalarValue::UInt64(Some(v)) => Ok(PyInt::from(v).into_pyobject(vm)), + ScalarValue::List(Some(col), _) => { + let list = col + .into_iter() + .map(|v| scalar_val_try_into_py_obj(v, vm)) + .collect::<Result<_, _>>()?; + let list = vm.ctx.new_list(list); + Ok(list.into()) + } + _ => Err(vm.new_type_error(format!( + "Can't cast a Scalar Value `{val:#?}` of type {:#?} to a Python Object", + val.get_datatype() + ))), + } +} + +/// Because most of the datafusion's UDF only support f32/64, so cast all to f64 to use datafusion's UDF +fn all_to_f64(col: DFColValue, vm: &VirtualMachine) -> PyResult<DFColValue> { + match col { + DFColValue::Array(arr) => { + let res = arrow::compute::cast::cast( + arr.as_ref(), + &DataType::Float64, + CastOptions { + wrapped: true, + partial: true, + }, + ) + .map_err(|err| { + vm.new_type_error(format!( + "Arrow Type Cast Fail(from {:#?} to {:#?}): {err:#?}", + arr.data_type(), + DataType::Float64 + )) + })?; + Ok(DFColValue::Array(res.into())) + } + DFColValue::Scalar(val) => { + let val_in_f64 = match val { + ScalarValue::Float64(Some(v)) => v, + ScalarValue::Int64(Some(v)) => v as f64, + ScalarValue::Boolean(Some(v)) => v as i64 as f64, + _ => { + return Err(vm.new_type_error(format!( + "Can't cast type {:#?} to {:#?}", + val.get_datatype(), + DataType::Float64 + ))) + } + }; + Ok(DFColValue::Scalar(ScalarValue::Float64(Some(val_in_f64)))) + } + } +} + +/// use to bind to Data Fusion's UDF function +/// P.S: seems due to proc macro issues, can't just use #[pyfunction] in here +macro_rules! bind_call_unary_math_function { + ($DF_FUNC: ident, $vm: ident $(,$ARG: ident)*) => { + fn inner_fn($($ARG: PyObjectRef,)* vm: &VirtualMachine) -> PyResult<PyObjectRef> { + let args = &[$(all_to_f64(try_into_columnar_value($ARG, vm)?, vm)?,)*]; + let res = math_expressions::$DF_FUNC(args).map_err(|err| from_df_err(err, vm))?; + let ret = try_into_py_obj(res, vm)?; + Ok(ret) + } + return inner_fn($($ARG,)* $vm); + }; +} + +/// The macro for binding function in `datafusion_physical_expr::expressions`(most of them are aggregate function) +/// +/// - first arguements is the name of datafusion expression function like `Avg` +/// - second is the python virtual machine ident `vm` +/// - following is the actual args passing in(as a slice).i.e.`&[values.to_arrow_array()]` +/// - the data type of passing in args, i.e: `Datatype::Float64` +/// - lastly ARE names given to expr of those function, i.e. `expr0, expr1,`.... +macro_rules! bind_aggr_fn { + ($AGGR_FUNC: ident, $VM: ident, $ARGS:expr, $DATA_TYPE: expr $(, $EXPR_ARGS: ident)*) => { + // just a place holder, we just want the inner `XXXAccumulator`'s function + // so its expr is irrelevant + return eval_aggr_fn( + expressions::$AGGR_FUNC::new( + $( + Arc::new(expressions::Column::new(stringify!($EXPR_ARGS), 0)) as _, + )* + stringify!($AGGR_FUNC), $DATA_TYPE.to_owned()), + $ARGS, $VM) + }; +} + +#[inline] +fn from_df_err(err: DataFusionError, vm: &VirtualMachine) -> PyBaseExceptionRef { + vm.new_runtime_error(format!("Data Fusion Error: {err:#?}")) +} + +/// evalute Aggregate Expr using its backing accumulator +fn eval_aggr_fn<T: AggregateExpr>( + aggr: T, + values: &[ArrayRef], + vm: &VirtualMachine, +) -> PyResult<PyObjectRef> { + // acquire the accumulator, where the actual implement of aggregate expr layers + let mut acc = aggr + .create_accumulator() + .map_err(|err| from_df_err(err, vm))?; + acc.update_batch(values) + .map_err(|err| from_df_err(err, vm))?; + let res = acc.evaluate().map_err(|err| from_df_err(err, vm))?; + scalar_val_try_into_py_obj(res, vm) +} + +/// GrepTime User Define Function module +/// +/// allow Python Coprocessor Function to use already implemented udf functions from datafusion and GrepTime DB itself +/// +#[pymodule] +pub(crate) mod greptime_builtin { + // P.S.: not extract to file because not-inlined proc macro attribute is *unstable* + use std::sync::Arc; + + use common_function::scalars::math::PowFunction; + use common_function::scalars::{function::FunctionContext, Function}; + use datafusion::physical_plan::expressions; + use datafusion_expr::ColumnarValue as DFColValue; + use datafusion_physical_expr::math_expressions; + use datatypes::arrow; + use datatypes::arrow::array::{ArrayRef, NullArray}; + use datatypes::arrow::compute; + use datatypes::vectors::{ConstantVector, Float64Vector, Helper, Int64Vector}; + use rustpython_vm::builtins::{PyFloat, PyInt, PyStr}; + use rustpython_vm::function::OptionalArg; + use rustpython_vm::{AsObject, PyObjectRef, PyResult, VirtualMachine}; + + use crate::python::builtins::{ + all_to_f64, eval_aggr_fn, from_df_err, try_into_columnar_value, try_into_py_obj, + type_cast_error, + }; + use crate::python::utils::is_instance; + use crate::python::utils::PyVectorRef; + use crate::python::PyVector; + + #[pyfunction] + fn vector(args: OptionalArg<PyObjectRef>, vm: &VirtualMachine) -> PyResult<PyVector> { + PyVector::new(args, vm) + } + + // the main binding code, due to proc macro things, can't directly use a simpler macro + // because pyfunction is not a attr? + + // The math function return a general PyObjectRef + // so it can return both PyVector or a scalar PyInt/Float/Bool + + /// simple math function, the backing implement is datafusion's `sqrt` math function + #[pyfunction] + fn sqrt(val: PyObjectRef, vm: &VirtualMachine) -> PyResult<PyObjectRef> { + bind_call_unary_math_function!(sqrt, vm, val); + } + + /// simple math function, the backing implement is datafusion's `sin` math function + #[pyfunction] + fn sin(val: PyObjectRef, vm: &VirtualMachine) -> PyResult<PyObjectRef> { + bind_call_unary_math_function!(sin, vm, val); + } + + /// simple math function, the backing implement is datafusion's `cos` math function + #[pyfunction] + fn cos(val: PyObjectRef, vm: &VirtualMachine) -> PyResult<PyObjectRef> { + bind_call_unary_math_function!(cos, vm, val); + } + + /// simple math function, the backing implement is datafusion's `tan` math function + #[pyfunction] + fn tan(val: PyObjectRef, vm: &VirtualMachine) -> PyResult<PyObjectRef> { + bind_call_unary_math_function!(tan, vm, val); + } + + /// simple math function, the backing implement is datafusion's `asin` math function + #[pyfunction] + fn asin(val: PyObjectRef, vm: &VirtualMachine) -> PyResult<PyObjectRef> { + bind_call_unary_math_function!(asin, vm, val); + } + + /// simple math function, the backing implement is datafusion's `acos` math function + #[pyfunction] + fn acos(val: PyObjectRef, vm: &VirtualMachine) -> PyResult<PyObjectRef> { + bind_call_unary_math_function!(acos, vm, val); + } + + /// simple math function, the backing implement is datafusion's `atan` math function + #[pyfunction] + fn atan(val: PyObjectRef, vm: &VirtualMachine) -> PyResult<PyObjectRef> { + bind_call_unary_math_function!(atan, vm, val); + } + + /// simple math function, the backing implement is datafusion's `floor` math function + #[pyfunction] + fn floor(val: PyObjectRef, vm: &VirtualMachine) -> PyResult<PyObjectRef> { + bind_call_unary_math_function!(floor, vm, val); + } + /// simple math function, the backing implement is datafusion's `ceil` math function + #[pyfunction] + fn ceil(val: PyObjectRef, vm: &VirtualMachine) -> PyResult<PyObjectRef> { + bind_call_unary_math_function!(ceil, vm, val); + } + + /// simple math function, the backing implement is datafusion's `round` math function + #[pyfunction] + fn round(val: PyObjectRef, vm: &VirtualMachine) -> PyResult<PyObjectRef> { + bind_call_unary_math_function!(round, vm, val); + } + + /// simple math function, the backing implement is datafusion's `trunc` math function + #[pyfunction] + fn trunc(val: PyObjectRef, vm: &VirtualMachine) -> PyResult<PyObjectRef> { + bind_call_unary_math_function!(trunc, vm, val); + } + + /// simple math function, the backing implement is datafusion's `abs` math function + #[pyfunction] + fn abs(val: PyObjectRef, vm: &VirtualMachine) -> PyResult<PyObjectRef> { + bind_call_unary_math_function!(abs, vm, val); + } + + /// simple math function, the backing implement is datafusion's `signum` math function + #[pyfunction] + fn signum(val: PyObjectRef, vm: &VirtualMachine) -> PyResult<PyObjectRef> { + bind_call_unary_math_function!(signum, vm, val); + } + + /// simple math function, the backing implement is datafusion's `exp` math function + #[pyfunction] + fn exp(val: PyObjectRef, vm: &VirtualMachine) -> PyResult<PyObjectRef> { + bind_call_unary_math_function!(exp, vm, val); + } + + /// simple math function, the backing implement is datafusion's `ln` math function + #[pyfunction(name = "log")] + #[pyfunction] + fn ln(val: PyObjectRef, vm: &VirtualMachine) -> PyResult<PyObjectRef> { + bind_call_unary_math_function!(ln, vm, val); + } + + /// simple math function, the backing implement is datafusion's `log2` math function + #[pyfunction] + fn log2(val: PyObjectRef, vm: &VirtualMachine) -> PyResult<PyObjectRef> { + bind_call_unary_math_function!(log2, vm, val); + } + + /// simple math function, the backing implement is datafusion's `log10` math function + #[pyfunction] + fn log10(val: PyObjectRef, vm: &VirtualMachine) -> PyResult<PyObjectRef> { + bind_call_unary_math_function!(log10, vm, val); + } + + /// return a random vector range from 0 to 1 and length of len + #[pyfunction] + fn random(len: usize, vm: &VirtualMachine) -> PyResult<PyObjectRef> { + // This is in a proc macro so using full path to avoid strange things + // more info at: https://doc.rust-lang.org/reference/procedural-macros.html#procedural-macro-hygiene + let arg = NullArray::new(arrow::datatypes::DataType::Null, len); + let args = &[DFColValue::Array(std::sync::Arc::new(arg) as _)]; + let res = math_expressions::random(args).map_err(|err| from_df_err(err, vm))?; + let ret = try_into_py_obj(res, vm)?; + Ok(ret) + } + // UDAF(User Defined Aggregate Function) in datafusion + + #[pyfunction] + fn approx_distinct(values: PyVectorRef, vm: &VirtualMachine) -> PyResult<PyObjectRef> { + bind_aggr_fn!( + ApproxDistinct, + vm, + &[values.to_arrow_array()], + values.to_arrow_array().data_type(), + expr0 + ); + } + + /// Not implement in datafusion + /// TODO(discord9): use greptime's own impl instead + /* + #[pyfunction] + fn approx_median(values: PyVectorRef, vm: &VirtualMachine) -> PyResult<PyObjectRef> { + bind_aggr_fn!( + ApproxMedian, + vm, + &[values.to_arrow_array()], + values.to_arrow_array().data_type(), + expr0 + ); + } + */ + + #[pyfunction] + fn approx_percentile_cont( + values: PyVectorRef, + percent: f64, + vm: &VirtualMachine, + ) -> PyResult<PyObjectRef> { + let percent = + expressions::Literal::new(datafusion_common::ScalarValue::Float64(Some(percent))); + return eval_aggr_fn( + expressions::ApproxPercentileCont::new( + vec![ + Arc::new(expressions::Column::new("expr0", 0)) as _, + Arc::new(percent) as _, + ], + "ApproxPercentileCont", + (values.to_arrow_array().data_type()).to_owned(), + ) + .map_err(|err| from_df_err(err, vm))?, + &[values.to_arrow_array()], + vm, + ); + } + + /// effectively equals to `list(vector)` + #[pyfunction] + fn array_agg(values: PyVectorRef, vm: &VirtualMachine) -> PyResult<PyObjectRef> { + bind_aggr_fn!( + ArrayAgg, + vm, + &[values.to_arrow_array()], + values.to_arrow_array().data_type(), + expr0 + ); + } + + /// directly port from datafusion's `avg` function + #[pyfunction] + fn avg(values: PyVectorRef, vm: &VirtualMachine) -> PyResult<PyObjectRef> { + bind_aggr_fn!( + Avg, + vm, + &[values.to_arrow_array()], + values.to_arrow_array().data_type(), + expr0 + ); + } + + #[pyfunction] + fn correlation( + arg0: PyVectorRef, + arg1: PyVectorRef, + vm: &VirtualMachine, + ) -> PyResult<PyObjectRef> { + bind_aggr_fn!( + Correlation, + vm, + &[arg0.to_arrow_array(), arg1.to_arrow_array()], + arg0.to_arrow_array().data_type(), + expr0, + expr1 + ); + } + + #[pyfunction] + fn count(values: PyVectorRef, vm: &VirtualMachine) -> PyResult<PyObjectRef> { + bind_aggr_fn!( + Count, + vm, + &[values.to_arrow_array()], + values.to_arrow_array().data_type(), + expr0 + ); + } + + #[pyfunction] + fn covariance( + arg0: PyVectorRef, + arg1: PyVectorRef, + vm: &VirtualMachine, + ) -> PyResult<PyObjectRef> { + bind_aggr_fn!( + Covariance, + vm, + &[arg0.to_arrow_array(), arg1.to_arrow_array()], + arg0.to_arrow_array().data_type(), + expr0, + expr1 + ); + } + + #[pyfunction] + fn covariance_pop( + arg0: PyVectorRef, + arg1: PyVectorRef, + vm: &VirtualMachine, + ) -> PyResult<PyObjectRef> { + bind_aggr_fn!( + CovariancePop, + vm, + &[arg0.to_arrow_array(), arg1.to_arrow_array()], + arg0.to_arrow_array().data_type(), + expr0, + expr1 + ); + } + + #[pyfunction] + fn max(values: PyVectorRef, vm: &VirtualMachine) -> PyResult<PyObjectRef> { + bind_aggr_fn!( + Max, + vm, + &[values.to_arrow_array()], + values.to_arrow_array().data_type(), + expr0 + ); + } + + #[pyfunction] + fn min(values: PyVectorRef, vm: &VirtualMachine) -> PyResult<PyObjectRef> { + bind_aggr_fn!( + Min, + vm, + &[values.to_arrow_array()], + values.to_arrow_array().data_type(), + expr0 + ); + } + + #[pyfunction] + fn stddev(values: PyVectorRef, vm: &VirtualMachine) -> PyResult<PyObjectRef> { + bind_aggr_fn!( + Stddev, + vm, + &[values.to_arrow_array()], + values.to_arrow_array().data_type(), + expr0 + ); + } + + #[pyfunction] + fn stddev_pop(values: PyVectorRef, vm: &VirtualMachine) -> PyResult<PyObjectRef> { + bind_aggr_fn!( + StddevPop, + vm, + &[values.to_arrow_array()], + values.to_arrow_array().data_type(), + expr0 + ); + } + + #[pyfunction] + fn sum(values: PyVectorRef, vm: &VirtualMachine) -> PyResult<PyObjectRef> { + bind_aggr_fn!( + Sum, + vm, + &[values.to_arrow_array()], + values.to_arrow_array().data_type(), + expr0 + ); + } + + #[pyfunction] + fn variance(values: PyVectorRef, vm: &VirtualMachine) -> PyResult<PyObjectRef> { + bind_aggr_fn!( + Variance, + vm, + &[values.to_arrow_array()], + values.to_arrow_array().data_type(), + expr0 + ); + } + + #[pyfunction] + fn variance_pop(values: PyVectorRef, vm: &VirtualMachine) -> PyResult<PyObjectRef> { + bind_aggr_fn!( + VariancePop, + vm, + &[values.to_arrow_array()], + values.to_arrow_array().data_type(), + expr0 + ); + } + + /// Pow function, bind from gp's [`PowFunction`] + #[pyfunction] + fn pow(base: PyObjectRef, pow: PyObjectRef, vm: &VirtualMachine) -> PyResult<PyVector> { + let base = base + .payload::<PyVector>() + .ok_or_else(|| type_cast_error(&base.class().name(), "vector", vm))?; + let len_base = base.as_vector_ref().len(); + let arg_pow = if is_instance::<PyVector>(&pow, vm) { + let pow = pow + .payload::<PyVector>() + .ok_or_else(|| type_cast_error(&pow.class().name(), "vector", vm))?; + pow.as_vector_ref() + } else if is_instance::<PyFloat>(&pow, vm) { + let pow = pow.try_into_value::<f64>(vm)?; + let ret = + ConstantVector::new(Arc::new(Float64Vector::from_vec(vec![pow])) as _, len_base); + Arc::new(ret) as _ + } else if is_instance::<PyInt>(&pow, vm) { + let pow = pow.try_into_value::<i64>(vm)?; + let ret = + ConstantVector::new(Arc::new(Int64Vector::from_vec(vec![pow])) as _, len_base); + Arc::new(ret) as _ + } else { + return Err(vm.new_type_error(format!("Unsupported type({:#?}) for pow()", pow))); + }; + // pyfunction can return PyResult<...>, args can be like PyObjectRef or anything + // impl IntoPyNativeFunc, see rustpython-vm function for more details + let args = vec![base.as_vector_ref(), arg_pow]; + let res = PowFunction::default() + .eval(FunctionContext::default(), &args) + .unwrap(); + Ok(res.into()) + } + + // TODO: prev, sum, pow, sqrt, datetime, slice, and filter(through boolean array) + + /// TODO: for now prev(arr)[0] == arr[0], need better fill method + #[pyfunction] + fn prev(cur: PyVectorRef, vm: &VirtualMachine) -> PyResult<PyVector> { + let cur: ArrayRef = cur.to_arrow_array(); + if cur.len() == 0 { + return Err( + vm.new_runtime_error("Can't give prev for a zero length array!".to_string()) + ); + } + let cur = cur.slice(0, cur.len() - 1); // except the last one that is + let fill = cur.slice(0, 1); + let ret = compute::concatenate::concatenate(&[&*fill, &*cur]).map_err(|err| { + vm.new_runtime_error(format!("Can't concat array[0] with array[0:-1]!{err:#?}")) + })?; + let ret = Helper::try_into_vector(&*ret).map_err(|e| { + vm.new_type_error(format!( + "Can't cast result into vector, result: {:?}, err: {:?}", + ret, e + )) + })?; + Ok(ret.into()) + } + + #[pyfunction] + fn datetime(input: &PyStr, vm: &VirtualMachine) -> PyResult<i64> { + let mut parsed = Vec::new(); + let mut prev = 0; + #[derive(Debug)] + enum State { + Num(i64), + Separator(String), + } + let mut state = State::Num(Default::default()); + let input = input.as_str(); + for (idx, ch) in input.chars().enumerate() { + match (ch.is_ascii_digit(), &state) { + (true, State::Separator(_)) => { + let res = &input[prev..idx]; + let res = State::Separator(res.to_owned()); + parsed.push(res); + prev = idx; + state = State::Num(Default::default()); + } + (false, State::Num(_)) => { + let res = str::parse(&input[prev..idx]).map_err(|err| { + vm.new_runtime_error(format!("Fail to parse num: {err:#?}")) + })?; + let res = State::Num(res); + parsed.push(res); + prev = idx; + state = State::Separator(Default::default()); + } + _ => continue, + }; + } + let last = match state { + State::Num(_) => { + let res = str::parse(&input[prev..]) + .map_err(|err| vm.new_runtime_error(format!("Fail to parse num: {err:#?}")))?; + State::Num(res) + } + State::Separator(_) => { + let res = &input[prev..]; + State::Separator(res.to_owned()) + } + }; + parsed.push(last); + let mut cur_idx = 0; + let mut tot_time = 0; + fn factor(unit: &str, vm: &VirtualMachine) -> PyResult<i64> { + let ret = match unit { + "d" => 24 * 60 * 60, + "h" => 60 * 60, + "m" => 60, + "s" => 1, + _ => return Err(vm.new_type_error(format!("Unknown time unit: {unit}"))), + }; + Ok(ret) + } + while cur_idx < parsed.len() { + match &parsed[cur_idx] { + State::Num(v) => { + if cur_idx + 1 > parsed.len() { + return Err(vm.new_runtime_error( + "Expect a spearator after number, found nothing!".to_string(), + )); + } + let nxt = &parsed[cur_idx + 1]; + if let State::Separator(sep) = nxt { + tot_time += v * factor(sep, vm)?; + } else { + return Err(vm.new_runtime_error(format!( + "Expect a spearator after number, found `{nxt:#?}`" + ))); + } + cur_idx += 2; + } + State::Separator(sep) => { + return Err(vm.new_runtime_error(format!("Expect a number, found `{sep}`"))) + } + } + } + Ok(tot_time) + } +} diff --git a/src/script/src/python/builtins/test.rs b/src/script/src/python/builtins/test.rs new file mode 100644 index 000000000000..ff3adcaed5ea --- /dev/null +++ b/src/script/src/python/builtins/test.rs @@ -0,0 +1,77 @@ +use std::sync::Arc; + +use arrow::array::PrimitiveArray; +use rustpython_vm::class::PyClassImpl; + +use super::*; +use crate::python::utils::format_py_error; +#[test] +fn convert_scalar_to_py_obj_and_back() { + rustpython_vm::Interpreter::with_init(Default::default(), |vm| { + // this can be in `.enter()` closure, but for clearity, put it in the `with_init()` + PyVector::make_class(&vm.ctx); + }) + .enter(|vm| { + let col = DFColValue::Scalar(ScalarValue::Float64(Some(1.0))); + let to = try_into_py_obj(col, vm).unwrap(); + let back = try_into_columnar_value(to, vm).unwrap(); + if let DFColValue::Scalar(ScalarValue::Float64(Some(v))) = back { + if (v - 1.0).abs() > 2.0 * f64::EPSILON { + panic!("Expect 1.0, found {v}") + } + } else { + panic!("Convert errors, expect 1.0") + } + let col = DFColValue::Scalar(ScalarValue::Int64(Some(1))); + let to = try_into_py_obj(col, vm).unwrap(); + let back = try_into_columnar_value(to, vm).unwrap(); + if let DFColValue::Scalar(ScalarValue::Int64(Some(v))) = back { + assert_eq!(v, 1); + } else { + panic!("Convert errors, expect 1") + } + let col = DFColValue::Scalar(ScalarValue::UInt64(Some(1))); + let to = try_into_py_obj(col, vm).unwrap(); + let back = try_into_columnar_value(to, vm).unwrap(); + if let DFColValue::Scalar(ScalarValue::Int64(Some(v))) = back { + assert_eq!(v, 1); + } else { + panic!("Convert errors, expect 1") + } + let col = DFColValue::Scalar(ScalarValue::List( + Some(Box::new(vec![ + ScalarValue::Int64(Some(1)), + ScalarValue::Int64(Some(2)), + ])), + Box::new(DataType::Int64), + )); + let to = try_into_py_obj(col, vm).unwrap(); + let back = try_into_columnar_value(to, vm).unwrap(); + if let DFColValue::Scalar(ScalarValue::List(Some(list), ty)) = back { + assert_eq!(list.len(), 2); + assert_eq!(ty.as_ref(), &DataType::Int64); + } + let list: Vec<PyObjectRef> = vec![vm.ctx.new_int(1).into(), vm.ctx.new_int(2).into()]; + let nested_list: Vec<PyObjectRef> = + vec![vm.ctx.new_list(list).into(), vm.ctx.new_int(3).into()]; + let list_obj = vm.ctx.new_list(nested_list).into(); + let col = try_into_columnar_value(list_obj, vm); + if let Err(err) = col { + let reason = format_py_error(err, vm); + assert!(format!("{}", reason).contains( + "TypeError: All elements in a list should be same type to cast to Datafusion list!" + )); + } + + let list: PyVector = PyVector::from( + HelperVec::try_into_vector( + Arc::new(PrimitiveArray::from_slice([0.1f64, 0.2, 0.3, 0.4])) as ArrayRef, + ) + .unwrap(), + ); + let nested_list: Vec<PyObjectRef> = vec![list.into_pyobject(vm), vm.ctx.new_int(3).into()]; + let list_obj = vm.ctx.new_list(nested_list).into(); + let expect_err = try_into_columnar_value(list_obj, vm); + assert!(expect_err.is_err()); + }) +} diff --git a/src/script/src/python/builtins/testcases.ron b/src/script/src/python/builtins/testcases.ron new file mode 100644 index 000000000000..2bc6e9fb7dc4 --- /dev/null +++ b/src/script/src/python/builtins/testcases.ron @@ -0,0 +1,784 @@ +// This is the file for UDF&UDAF binding from datafusion, +// including most test for those function(except ApproxMedian which datafusion didn't implement) +// check src/scalars/py_udf_module/test.rs for more information +[ + // math expressions + TestCase( + input: { + "values": Var( + ty: Float64, + value: FloatVec([1.0, 2.0, 3.0]) + ), + "pows": Var( + ty: Int8, + value: IntVec([0, -1, 3]) + ) + }, + script: r#" +from greptime import * +sqrt(values)"#, + expect: Ok(( + value: FloatVec( + [ + 1.0, + 1.4142135623730951, + 1.7320508075688772, + ], + ), + ty: Float64 + )) + ), + TestCase( + input: { + "values": Var( + ty: Float64, + value: FloatVec([1.0, 2.0, 3.0]) + ) + }, + script: r#" +from greptime import * +sin(values)"#, + expect: Ok(( + value: FloatVec( + [ + 0.8414709848078965, + 0.9092974268256817, + 0.1411200080598672, + ], + ), + ty: Float64 + )) + ), + TestCase( + input: { + "values": Var( + ty: Float64, + value: FloatVec([1.0, 2.0, 3.0]) + ) + }, + script: r#" +from greptime import * +cos(values)"#, + expect: Ok(( + value: FloatVec( + [ + 0.5403023058681398, + -0.4161468365471424, + -0.9899924966004454, + ], + ), + ty: Float64 + )) + ), + TestCase( + input: { + "values": Var( + ty: Float64, + value: FloatVec([1.0, 2.0, 3.0]) + ) + }, + script: r#" +from greptime import * +tan(values)"#, + expect: Ok(( + value: FloatVec( + [ + 1.557407724654902, + -2.185039863261519, + -0.1425465430742778, + ], + ), + ty: Float64 + )) + ), + TestCase( + input: { + "values": Var( + ty: Float64, + value: FloatVec([0.3, 0.5, 1.0]) + ) + }, + script: r#" +from greptime import * +asin(values)"#, + expect: Ok(( + value: FloatVec( + [ + 0.30469265401539747, + 0.5235987755982988, + 1.5707963267948966, + ], + ), + ty: Float64 + )) + ), + TestCase( + input: { + "values": Var( + ty: Float64, + value: FloatVec([0.3, 0.5, 1.0]) + ) + }, + script: r#" +from greptime import * +acos(values)"#, + expect: Ok(( + value: FloatVec( + [ + 1.266103672779499, + 1.0471975511965976, + 0.0, + ], + ), + ty: Float64 + )) + ), + TestCase( + input: { + "values": Var( + ty: Float64, + value: FloatVec([0.3, 0.5, 1.1]) + ) + }, + script: r#" +from greptime import * +atan(values)"#, + expect: Ok(( + value: FloatVec( + [ + 0.2914567944778671, + 0.46364760900080615, + 0.8329812666744317, + ], + ), + ty: Float64 + )) + ), + TestCase( + input: { + "values": Var( + ty: Float64, + value: FloatVec([0.3, 0.5, 1.1]) + ) + }, + script: r#" +from greptime import * +floor(values)"#, + expect: Ok(( + value: FloatVec( + [ + 0.0, + 0.0, + 1.0, + ], + ), + ty: Float64 + )) + ), + TestCase( + input: { + "values": Var( + ty: Float64, + value: FloatVec([0.3, 0.5, 1.1]) + ) + }, + script: r#" +from greptime import * +ceil(values)"#, + expect: Ok(( + value: FloatVec( + [ + 1.0, + 1.0, + 2.0, + ], + ), + ty: Float64 + )) + ), + TestCase( + input: { + "values": Var( + ty: Float64, + value: FloatVec([0.3, 0.5, 1.1]) + ) + }, + script: r#" +from greptime import * +round(values)"#, + expect: Ok(( + value: FloatVec( + [ + 0.0, + 1.0, + 1.0, + ], + ), + ty: Float64 + )) + ), + TestCase( + input: { + "values": Var( + ty: Float64, + value: FloatVec([0.3, 0.5, 1.1]) + ) + }, + script: r#" +from greptime import * +trunc(values)"#, + expect: Ok(( + value: FloatVec( + [ + 0.0, + 0.0, + 1.0, + ], + ), + ty: Float64 + )) + ), + TestCase( + input: { + "values": Var( + ty: Float64, + value: FloatVec([-0.3, 0.5, -1.1]) + ) + }, + script: r#" +from greptime import * +abs(values)"#, + expect: Ok(( + value: FloatVec( + [ + 0.3, + 0.5, + 1.1, + ], + ), + ty: Float64 + )) + ), + TestCase( + input: { + "values": Var( + ty: Float64, + value: FloatVec([-0.3, 0.5, -1.1]) + ) + }, + script: r#" +from greptime import * +signum(values)"#, + expect: Ok(( + value: FloatVec( + [ + -1.0, + 1.0, + -1.0, + ], + ), + ty: Float64 + )) + ), + TestCase( + input: { + "values": Var( + ty: Float64, + value: FloatVec([0, 1.0, 2.0]) + ) + }, + script: r#" +from greptime import * +exp(values)"#, + expect: Ok(( + value: FloatVec( + [ + 1.0, + 2.718281828459045, + 7.38905609893065, + ], + ), + ty: Float64 + )) + ), + TestCase( + input: { + "values": Var( + ty: Float64, + value: FloatVec([1.0, 2.0, 3.0]) + ) + }, + script: r#" +from greptime import * +ln(values)"#, + expect: Ok(( + value: FloatVec( + [ + 0.0, + 0.6931471805599453, + 1.0986122886681098, + ], + ), + ty: Float64 + )) + ), + TestCase( + input: { + "values": Var( + ty: Float64, + value: FloatVec([1.0, 2.0, 3.0]) + ) + }, + script: r#" +from greptime import * +log2(values)"#, + expect: Ok(( + value: FloatVec( + [ + 0.0, + 1.0, + 1.584962500721156, + ], + ), + ty: Float64 + )) + ), + TestCase( + input: { + "values": Var( + ty: Float64, + value: FloatVec([1.0, 2.0, 3.0]) + ) + }, + script: r#" +from greptime import * +log10(values)"#, + expect: Ok(( + value: FloatVec( + [ + 0.0, + 0.3010299956639812, + 0.47712125471966244, + ], + ), + ty: Float64 + )) + ), + TestCase( + input: {}, + script: r#" +from greptime import * +random(42)"#, + expect: Ok(( + value: LenFloatVec(42), + ty: Float64 + )) + ), + +// UDAF(Aggerate function) +// approx function is indeterministic + TestCase( + input: { + "values": Var( + ty: Float64, + value: IntVec([1, 2, 2, 3]) + ) + }, + script: r#" +from greptime import * +approx_distinct(values)"#, + expect: Ok(( + value: Int(3), + ty: Float64 + )) + ), + // not impl in datafusion + /* + TestCase( + input: { + "values": Var( + ty: Float64, + value: IntVec([1, 2, 2, 3]) + ) + }, + script: r#" +from greptime import * +approx_median(values)"#, + expect: Ok(( + value: Int(2), + ty: Float64 + )) + ), + */ + TestCase( + input: { + "values": Var( + ty: Float64, + value: IntVec([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) + ) + }, + script: r#" +from greptime import * +approx_percentile_cont(values, 0.6)"#, + expect: Ok(( + value: Int(6), + ty: Float64 + )) + ), + TestCase( + input: { + "values": Var( + ty: Float64, + value: FloatVec([1.0, 2.0, 3.0]) + ) + }, + script: r#" +from greptime import * +array_agg(values)"#, + expect: Ok(( + value: FloatVec([1.0, 2.0, 3.0]), + ty: Float64 + )) + ), + TestCase( + input: { + "values": Var( + ty: Float64, + value: FloatVec([1.0, 2.0, 3.0]) + ) + }, + script: r#" +from greptime import * +avg(values)"#, + expect: Ok(( + value: Float(2.0), + ty: Float64 + )) + ), + TestCase( + input: { + "a": Var( + ty: Float64, + value: FloatVec([1.0, 2.0, 3.0]) + ), + "b": Var( + ty: Float64, + value: FloatVec([1.0, 0.0, -1.0]) + ), + }, + script: r#" +from greptime import * +correlation(a, b)"#, + expect: Ok(( + value: Float(-1.0), + ty: Float64 + )) + ), + TestCase( + input: { + "values": Var( + ty: Float64, + value: IntVec([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) + ) + }, + script: r#" +from greptime import * +count(values)"#, + expect: Ok(( + value: Int(10), + ty: Int64 + )) + ), + TestCase( + input: { + "a": Var( + ty: Float64, + value: FloatVec([1.0, 2.0, 3.0]) + ), + "b": Var( + ty: Float64, + value: FloatVec([1.0, 0.0, -1.0]) + ), + }, + script: r#" +from greptime import * +covariance(a, b)"#, + expect: Ok(( + value: Float(-1.0), + ty: Float64 + )) + ), + TestCase( + input: { + "a": Var( + ty: Float64, + value: FloatVec([1.0, 2.0, 3.0]) + ), + "b": Var( + ty: Float64, + value: FloatVec([1.0, 0.0, -1.0]) + ), + }, + script: r#" +from greptime import * +covariance_pop(a, b)"#, + expect: Ok(( + value: Float(-0.6666666666666666), + ty: Float64 + )) + ), + TestCase( + input: { + "values": Var( + ty: Float64, + value: IntVec([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) + ) + }, + script: r#" +from greptime import * +max(values)"#, + expect: Ok(( + value: Int(10), + ty: Int64 + )) + ), + TestCase( + input: { + "values": Var( + ty: Float64, + value: IntVec([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) + ) + }, + script: r#" +from greptime import * +min(values)"#, + expect: Ok(( + value: Int(1), + ty: Int64 + )) + ), + TestCase( + input: { + "values": Var( + ty: Float64, + value: FloatVec([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) + ) + }, + script: r#" +from greptime import * +stddev(values)"#, + expect: Ok(( + value: Float(3.0276503540974917), + ty: Float64 + )) + ), + TestCase( + input: { + "values": Var( + ty: Float64, + value: FloatVec([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) + ) + }, + script: r#" +from greptime import * +stddev_pop(values)"#, + expect: Ok(( + value: Float(2.8722813232690143), + ty: Float64 + )) + ), + TestCase( + input: { + "values": Var( + ty: Float64, + value: FloatVec([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) + ) + }, + script: r#" +from greptime import * +sum(values)"#, + expect: Ok(( + value: Float(55), + ty: Float64 + )) + ), + TestCase( + input: { + "values": Var( + ty: Float64, + value: FloatVec([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) + ) + }, + script: r#" +from greptime import * +variance(values)"#, + expect: Ok(( + value: Float(9.166666666666666), + ty: Float64 + )) + ), + TestCase( + input: { + "values": Var( + ty: Float64, + value: FloatVec([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) + ) + }, + script: r#" +from greptime import * +variance_pop(values)"#, + expect: Ok(( + value: Float(8.25), + ty: Float64 + )) + ), + + +// GrepTime's own UDF + TestCase( + input: { + "values": Var( + ty: Float64, + value: FloatVec([1.0, 2.0, 3.0]) + ), + "pows": Var( + ty: Int8, + value: IntVec([0, -1, 3]) + ) + }, + script: r#" +from greptime import * +pow(values, pows)"#, + expect: Ok(( + value: FloatVec([ 1.0, 0.5, 27.0]), + ty: Float64 + )) + ), + +// Error handling test + TestCase( + input: { + "values": Var( + ty: Float64, + value: FloatVec([1.0, 2.0, 3.0]) + ), + "pows": Var( + ty: Int8, + value: IntVec([0, 0, 0]) + ) + }, + script: r#" +from greptime import * +pow(values, 1)"#, + expect: Err("TypeError: Can't cast operand of type `int` into `vector`.") + ), + TestCase( + input: { + "values": Var( + ty: Float64, + value: FloatVec([1.0, 2.0, 3.0]) + ), + "pows": Var( + ty: Int8, + value: IntVec([0, 0, 0]) + ), + "num": Var( + ty: Int64, + value: Int(1) + ) + }, + script: r#" +from greptime import * +pow(num, pows)"#, + expect: Err("TypeError: Can't cast operand of type `int` into `vector`") + ), + TestCase( + input: { + "values": Var( + ty: Float64, + value: FloatVec([1.0, 2.0, 3.0]) + ), + "pows": Var( + ty: Int8, + value: IntVec([0, 0, 0]) + ), + "num": Var( + ty: Int64, + value: Int(1) + ) + }, + script: r#" +from greptime import * +asin(num, pows)"#, + expect: Err("TypeError: Expected at most 1 arguments (2 given)") + ), + // Test Type Cast between float, int and bool + TestCase( + input: { + "num": Var( + ty: Int64, + value: Int(1) + ) + }, + script: r#" +from greptime import * +sin(num)"#, + expect: Ok(( + ty: Float64, + value: Float(0.8414709848078965) + )) + ), + TestCase( + input: { + "num": Var( + ty: Float64, + value: Float(1.0) + ) + }, + script: r#" +from greptime import * +sin(num)"#, + expect: Ok(( + ty: Float64, + value: Float(0.8414709848078965) + )) + ), + TestCase( + input: {}, + script: r#" +from greptime import * +sin(True)"#, + expect: Ok(( + ty: Float64, + value: Float(0.8414709848078965) + )) + ), + TestCase( + input: { + "num": Var( + ty: Boolean, + value: Bool(false) + ) + }, + script: r#" +from greptime import * +sin(num)"#, + expect: Ok(( + ty: Float64, + value: Float(0.0) + )) + ), + // test if string returns error correctly + TestCase( + input: { + "num": Var( + ty: Boolean, + value: Str("42") + ) + }, + script: r#" +from greptime import * +sin(num)"#, + expect: Err("Can't cast object of type str into vector or scalar") + ), +] diff --git a/src/script/src/python/coprocessor.rs b/src/script/src/python/coprocessor.rs new file mode 100644 index 000000000000..7107e512771c --- /dev/null +++ b/src/script/src/python/coprocessor.rs @@ -0,0 +1,610 @@ +pub mod parse; + +use std::collections::HashMap; +use std::result::Result as StdResult; +use std::sync::Arc; + +use common_recordbatch::RecordBatch; +use datafusion_common::record_batch::RecordBatch as DfRecordBatch; +use datatypes::arrow; +use datatypes::arrow::array::{Array, ArrayRef, BooleanArray, PrimitiveArray}; +use datatypes::arrow::compute::cast::CastOptions; +use datatypes::arrow::datatypes::{DataType, Field, Schema as ArrowSchema}; +use datatypes::schema::Schema; +use datatypes::vectors::Helper; +use datatypes::vectors::{BooleanVector, Vector, VectorRef}; +use rustpython_bytecode::CodeObject; +use rustpython_compiler_core::compile; +use rustpython_parser::{ + ast, + ast::{Located, Location}, + parser, +}; +use rustpython_vm as vm; +use rustpython_vm::{class::PyClassImpl, AsObject}; +use snafu::{OptionExt, ResultExt}; +use vm::builtins::{PyBaseExceptionRef, PyBool, PyFloat, PyInt, PyTuple}; +use vm::scope::Scope; +use vm::{Interpreter, PyObjectRef, VirtualMachine}; + +use crate::fail_parse_error; +use crate::python::builtins::greptime_builtin; +use crate::python::coprocessor::parse::{ret_parse_error, DecoratorArgs}; +use crate::python::error::{ + ensure, ArrowSnafu, CoprParseSnafu, OtherSnafu, PyCompileSnafu, PyParseSnafu, Result, + TypeCastSnafu, +}; +use crate::python::utils::format_py_error; +use crate::python::{utils::is_instance, PyVector}; + +fn ret_other_error_with(reason: String) -> OtherSnafu<String> { + OtherSnafu { reason } +} + +#[cfg(test)] +use serde::Deserialize; + +#[cfg_attr(test, derive(Deserialize))] +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct AnnotationInfo { + /// if None, use types infered by PyVector + pub datatype: Option<DataType>, + pub is_nullable: bool, +} + +pub type CoprocessorRef = Arc<Coprocessor>; + +#[cfg_attr(test, derive(Deserialize))] +#[derive(Debug, PartialEq, Eq, Clone)] +pub struct Coprocessor { + pub name: String, + pub deco_args: DecoratorArgs, + /// get from python function args' annotation, first is type, second is is_nullable + pub arg_types: Vec<Option<AnnotationInfo>>, + /// get from python function returns' annotation, first is type, second is is_nullable + pub return_types: Vec<Option<AnnotationInfo>>, + /// store its corresponding script, also skip serde when in `cfg(test)` to reduce work in compare + #[cfg_attr(test, serde(skip))] + pub script: String, +} + +impl Coprocessor { + /// generate a call to the coprocessor function + /// with arguments given in decorator's `args` list + /// also set in location in source code to `loc` + fn gen_call(&self, loc: &Location) -> ast::Stmt<()> { + let mut loc = loc.to_owned(); + // adding a line to avoid confusing if any error occurs when calling the function + // then the pretty print will point to the last line in code + // instead of point to any of existing code written by user. + loc.newline(); + let args: Vec<Located<ast::ExprKind>> = self + .deco_args + .arg_names + .iter() + .map(|v| { + let node = ast::ExprKind::Name { + id: v.to_owned(), + ctx: ast::ExprContext::Load, + }; + create_located(node, loc) + }) + .collect(); + let func = ast::ExprKind::Call { + func: Box::new(create_located( + ast::ExprKind::Name { + id: self.name.to_owned(), + ctx: ast::ExprContext::Load, + }, + loc, + )), + args, + keywords: Vec::new(), + }; + let stmt = ast::StmtKind::Expr { + value: Box::new(create_located(func, loc)), + }; + create_located(stmt, loc) + } + + /// check if `Mod` is of one line of statement + fn check_before_compile(top: &ast::Mod) -> Result<()> { + if let ast::Mod::Interactive { body: code } = top { + ensure!( + code.len() == 1, + CoprParseSnafu { + reason: format!( + "Expect only one statement in script, found {} statement", + code.len() + ), + loc: code.first().map(|s| s.location) + } + ); + + if let ast::StmtKind::FunctionDef { + name: _, + args: _, + body: _, + decorator_list: _, + returns: _, + type_comment: __main__, + } = &code[0].node + { + } else { + return fail_parse_error!( + format!("Expect the one and only statement in script as a function def, but instead found: {:?}", code[0].node), + Some(code[0].location) + ); + } + } else { + return fail_parse_error!( + format!("Expect statement in script, found: {:?}", top), + None, + ); + } + Ok(()) + } + + /// stripe the decorator(`@xxxx`) and type annotation(for type checker is done in rust function), add one line in the ast for call function with given parameter, and compiler into `CodeObject` + /// + /// The rationale is that rustpython's vm is not very efficient according to [offical benchmark](https://rustpython.github.io/benchmarks), + /// So we should avoid running too much Python Bytecode, hence in this function we delete `@` decorator(instead of actually write a decorator in python) + /// And add a function call in the end and also + /// strip type annotation + fn strip_append_and_compile(&self) -> Result<CodeObject> { + let script = &self.script; + // note that it's important to use `parser::Mode::Interactive` so the ast can be compile to return a result instead of return None in eval mode + let mut top = parser::parse(script, parser::Mode::Interactive).context(PyParseSnafu)?; + Self::check_before_compile(&top)?; + // erase decorator + if let ast::Mod::Interactive { body } = &mut top { + let code = body; + if let ast::StmtKind::FunctionDef { + name: _, + args, + body: _, + decorator_list, + returns, + type_comment: __main__, + } = &mut code[0].node + { + *decorator_list = Vec::new(); + // strip type annotation + // def a(b: int, c:int) -> int + // will became + // def a(b, c) + *returns = None; + for arg in &mut args.args { + arg.node.annotation = None; + } + } else { + // already done in check function + unreachable!() + } + let loc = code[0].location; + + // This manually construct ast has no corrsponding code + // in the script, so just give it a location that don't exist in orginal script + // (which doesn't matter because Location usually only used in pretty print errors) + code.push(self.gen_call(&loc)); + } else { + // already done in check function + unreachable!() + } + // use `compile::Mode::BlockExpr` so it return the result of statement + compile::compile_top( + &top, + "<embedded>".to_owned(), + compile::Mode::BlockExpr, + compile::CompileOpts { optimize: 0 }, + ) + .context(PyCompileSnafu) + } + + /// generate [`Schema`] according to return names, types, + /// if no annotation + /// the datatypes of the actual columns is used directly + fn gen_schema(&self, cols: &[ArrayRef]) -> Result<Arc<ArrowSchema>> { + let names = &self.deco_args.ret_names; + let anno = &self.return_types; + ensure!( + cols.len() == names.len() && names.len() == anno.len(), + OtherSnafu { + reason: format!( + "Unmatched length for cols({}), names({}) and anno({})", + cols.len(), + names.len(), + anno.len() + ) + } + ); + Ok(Arc::new(ArrowSchema::from( + names + .iter() + .enumerate() + .map(|(idx, name)| { + let real_ty = cols[idx].data_type().to_owned(); + let AnnotationInfo { + datatype: ty, + is_nullable, + } = anno[idx].to_owned().unwrap_or_else(|| + // default to be not nullable and use DataType infered by PyVector itself + AnnotationInfo{ + datatype: Some(real_ty.to_owned()), + is_nullable: false + }); + Field::new( + name, + // if type is like `_` or `_ | None` + ty.unwrap_or(real_ty), + is_nullable, + ) + }) + .collect::<Vec<Field>>(), + ))) + } + + /// check if real types and annotation types(if have) is the same, if not try cast columns to annotated type + fn check_and_cast_type(&self, cols: &mut [ArrayRef]) -> Result<()> { + let return_types = &self.return_types; + // allow ignore Return Type Annotation + if return_types.is_empty() { + return Ok(()); + } + ensure!( + cols.len() == return_types.len(), + OtherSnafu { + reason: format!( + "The number of return Vector is wrong, expect {}, found {}", + return_types.len(), + cols.len() + ) + } + ); + for (col, anno) in cols.iter_mut().zip(return_types) { + if let Some(AnnotationInfo { + datatype: Some(datatype), + is_nullable: _, + }) = anno + { + let real_ty = col.data_type(); + let anno_ty = datatype; + if real_ty != anno_ty { + { + // This`CastOption` allow for overflowly cast and int to float loosely cast etc.., + // check its doc for more information + *col = arrow::compute::cast::cast( + col.as_ref(), + anno_ty, + CastOptions { + wrapped: true, + partial: true, + }, + ) + .context(ArrowSnafu)? + .into(); + } + } + } + } + Ok(()) + } +} + +fn create_located<T>(node: T, loc: Location) -> Located<T> { + Located::new(loc, node) +} + +/// cast a `dyn Array` of type unsigned/int/float into a `dyn Vector` +fn try_into_vector<T: datatypes::types::Primitive + datatypes::types::DataTypeBuilder>( + arg: Arc<dyn Array>, +) -> Result<Arc<dyn Vector>> { + // wrap try_into_vector in here to convert `datatypes::error::Error` to `python::error::Error` + Helper::try_into_vector(arg).context(TypeCastSnafu) +} + +/// convert a `Vec<ArrayRef>` into a `Vec<PyVector>` only when they are of supported types +/// PyVector now only support unsigned&int8/16/32/64, float32/64 and bool when doing meanful arithmetics operation +fn try_into_py_vector(fetch_args: Vec<ArrayRef>) -> Result<Vec<PyVector>> { + let mut args: Vec<PyVector> = Vec::with_capacity(fetch_args.len()); + for (idx, arg) in fetch_args.into_iter().enumerate() { + let v: VectorRef = match arg.data_type() { + DataType::Float32 => try_into_vector::<f32>(arg)?, + DataType::Float64 => try_into_vector::<f64>(arg)?, + DataType::UInt8 => try_into_vector::<u8>(arg)?, + DataType::UInt16 => try_into_vector::<u16>(arg)?, + DataType::UInt32 => try_into_vector::<u32>(arg)?, + DataType::UInt64 => try_into_vector::<u64>(arg)?, + DataType::Int8 => try_into_vector::<i8>(arg)?, + DataType::Int16 => try_into_vector::<i16>(arg)?, + DataType::Int32 => try_into_vector::<i32>(arg)?, + DataType::Int64 => try_into_vector::<i64>(arg)?, + DataType::Boolean => { + let v: VectorRef = + Arc::new(BooleanVector::try_from_arrow_array(arg).context(TypeCastSnafu)?); + v + } + _ => { + return ret_other_error_with(format!( + "Unsupport data type at column {idx}: {:?} for coprocessor", + arg.data_type() + )) + .fail() + } + }; + args.push(PyVector::from(v)); + } + Ok(args) +} + +/// convert a single PyVector or a number(a constant) into a Array(or a constant array) +fn py_vec_to_array_ref(obj: &PyObjectRef, vm: &VirtualMachine, col_len: usize) -> Result<ArrayRef> { + if is_instance::<PyVector>(obj, vm) { + let pyv = obj.payload::<PyVector>().with_context(|| { + ret_other_error_with(format!("can't cast obj {:?} to PyVector", obj)) + })?; + Ok(pyv.to_arrow_array()) + } else if is_instance::<PyInt>(obj, vm) { + let val = obj + .to_owned() + .try_into_value::<i64>(vm) + .map_err(|e| format_py_error(e, vm))?; + + let ret = PrimitiveArray::from_vec(vec![val; col_len]); + Ok(Arc::new(ret) as _) + } else if is_instance::<PyFloat>(obj, vm) { + let val = obj + .to_owned() + .try_into_value::<f64>(vm) + .map_err(|e| format_py_error(e, vm))?; + let ret = PrimitiveArray::from_vec(vec![val; col_len]); + Ok(Arc::new(ret) as _) + } else if is_instance::<PyBool>(obj, vm) { + let val = obj + .to_owned() + .try_into_value::<bool>(vm) + .map_err(|e| format_py_error(e, vm))?; + + let ret = BooleanArray::from_iter(std::iter::repeat(Some(val)).take(5)); + Ok(Arc::new(ret) as _) + } else { + ret_other_error_with(format!("Expect a vector or a constant, found {:?}", obj)).fail() + } +} + +/// convert a tuple of `PyVector` or one `PyVector`(wrapped in a Python Object Ref[`PyObjectRef`]) +/// to a `Vec<ArrayRef>` +fn try_into_columns( + obj: &PyObjectRef, + vm: &VirtualMachine, + col_len: usize, +) -> Result<Vec<ArrayRef>> { + if is_instance::<PyTuple>(obj, vm) { + let tuple = obj.payload::<PyTuple>().with_context(|| { + ret_other_error_with(format!("can't cast obj {:?} to PyTuple)", obj)) + })?; + let cols = tuple + .iter() + .map(|obj| py_vec_to_array_ref(obj, vm, col_len)) + .collect::<Result<Vec<ArrayRef>>>()?; + Ok(cols) + } else { + let col = py_vec_to_array_ref(obj, vm, col_len)?; + Ok(vec![col]) + } +} + +/// select columns according to `fetch_names` from `rb` +/// and cast them into a Vec of PyVector +fn select_from_rb(rb: &DfRecordBatch, fetch_names: &[String]) -> Result<Vec<PyVector>> { + let field_map: HashMap<&String, usize> = rb + .schema() + .fields + .iter() + .enumerate() + .map(|(idx, field)| (&field.name, idx)) + .collect(); + let fetch_idx: Vec<usize> = fetch_names + .iter() + .map(|field| { + field_map.get(field).copied().context(OtherSnafu { + reason: format!("Can't found field name {field}"), + }) + }) + .collect::<Result<Vec<usize>>>()?; + let fetch_args: Vec<Arc<dyn Array>> = fetch_idx + .into_iter() + .map(|idx| rb.column(idx).clone()) + .collect(); + try_into_py_vector(fetch_args) +} + +/// match between arguments' real type and annotation types +/// if type anno is vector[_] then use real type +fn check_args_anno_real_type( + args: &[PyVector], + copr: &Coprocessor, + rb: &DfRecordBatch, +) -> Result<()> { + for (idx, arg) in args.iter().enumerate() { + let anno_ty = copr.arg_types[idx].to_owned(); + let real_ty = arg.to_arrow_array().data_type().to_owned(); + let is_nullable: bool = rb.schema().fields[idx].is_nullable; + ensure!( + anno_ty + .to_owned() + .map(|v| v.datatype == None // like a vector[_] + || v.datatype == Some(real_ty.to_owned()) && v.is_nullable == is_nullable) + .unwrap_or(true), + OtherSnafu { + reason: format!( + "column {}'s Type annotation is {:?}, but actual type is {:?}", + copr.deco_args.arg_names[idx], anno_ty, real_ty + ) + } + ) + } + Ok(()) +} + +/// set arguments with given name and values in python scopes +fn set_items_in_scope( + scope: &Scope, + vm: &VirtualMachine, + arg_names: &[String], + args: Vec<PyVector>, +) -> Result<()> { + let _ = arg_names + .iter() + .zip(args) + .map(|(name, vector)| { + scope + .locals + .as_object() + .set_item(name, vm.new_pyobj(vector), vm) + }) + .collect::<StdResult<Vec<()>, PyBaseExceptionRef>>() + .map_err(|e| format_py_error(e, vm))?; + Ok(()) +} + +/// The coprocessor function accept a python script and a Record Batch: +/// ## What it does +/// 1. it take a python script and a [`DfRecordBatch`], extract columns and annotation info according to `args` given in decorator in python script +/// 2. execute python code and return a vector or a tuple of vector, +/// 3. the returning vector(s) is assembled into a new [`DfRecordBatch`] according to `returns` in python decorator and return to caller +/// +/// # Example +/// +/// ```ignore +/// use std::sync::Arc; +/// use datafusion_common::record_batch::RecordBatch as DfRecordBatch; +/// use arrow::array::PrimitiveArray; +/// use arrow::datatypes::{DataType, Field, Schema}; +/// use common_function::scalars::python::exec_coprocessor; +/// let python_source = r#" +/// @copr(args=["cpu", "mem"], returns=["perf", "what"]) +/// def a(cpu, mem): +/// return cpu + mem, cpu - mem +/// "#; +/// let cpu_array = PrimitiveArray::from_slice([0.9f32, 0.8, 0.7, 0.6]); +/// let mem_array = PrimitiveArray::from_slice([0.1f64, 0.2, 0.3, 0.4]); +/// let schema = Arc::new(Schema::from(vec![ +/// Field::new("cpu", DataType::Float32, false), +/// Field::new("mem", DataType::Float64, false), +/// ])); +/// let rb = +/// DfRecordBatch::try_new(schema, vec![Arc::new(cpu_array), Arc::new(mem_array)]).unwrap(); +/// let ret = exec_coprocessor(python_source, &rb).unwrap(); +/// assert_eq!(ret.column(0).len(), 4); +/// ``` +/// +/// # Type Annotation +/// you can use type annotations in args and returns to designate types, so coprocessor will check for corrsponding types. +/// +/// Currently support types are `u8`, `u16`, `u32`, `u64`, `i8`, `i16`, `i32`, `i64` and `f16`, `f32`, `f64` +/// +/// use `f64 | None` to mark if returning column is nullable like in [`DfRecordBatch`]'s schema's [`Field`]'s is_nullable +/// +/// you can also use single underscore `_` to let coprocessor infer what type it is, so `_` and `_ | None` are both valid in type annotation. +/// Note: using `_` means not nullable column, using `_ | None` means nullable column +/// +/// a example (of python script) given below: +/// ```python +/// @copr(args=["cpu", "mem"], returns=["perf", "minus", "mul", "div"]) +/// def a(cpu: vector[f32], mem: vector[f64])->(vector[f64|None], vector[f64], vector[_], vector[_ | None]): +/// return cpu + mem, cpu - mem, cpu * mem, cpu / mem +/// ``` +/// +/// # Return Constant columns +/// You can return constant in python code like `return 1, 1.0, True` +/// which create a constant array(with same value)(currently support int, float and bool) as column on return +#[cfg(test)] +pub fn exec_coprocessor(script: &str, rb: &DfRecordBatch) -> Result<RecordBatch> { + // 1. parse the script and check if it's only a function with `@coprocessor` decorator, and get `args` and `returns`, + // 2. also check for exist of `args` in `rb`, if not found, return error + // TODO(discord9): cache the result of parse_copr + let copr = parse::parse_copr(script)?; + exec_parsed(&copr, rb) +} + +pub(crate) fn exec_with_cached_vm( + copr: &Coprocessor, + rb: &DfRecordBatch, + args: Vec<PyVector>, + vm: &Interpreter, +) -> Result<RecordBatch> { + vm.enter(|vm| -> Result<RecordBatch> { + PyVector::make_class(&vm.ctx); + // set arguments with given name and values + let scope = vm.new_scope_with_builtins(); + set_items_in_scope(&scope, vm, &copr.deco_args.arg_names, args)?; + + let code_obj = copr.strip_append_and_compile()?; + let code_obj = vm.ctx.new_code(code_obj); + let ret = vm + .run_code_obj(code_obj, scope) + .map_err(|e| format_py_error(e, vm))?; + + // 5. get returns as either a PyVector or a PyTuple, and naming schema them according to `returns` + let col_len = rb.num_rows(); + let mut cols: Vec<ArrayRef> = try_into_columns(&ret, vm, col_len)?; + ensure!( + cols.len() == copr.deco_args.ret_names.len(), + OtherSnafu { + reason: format!( + "The number of return Vector is wrong, expect {}, found {}", + copr.deco_args.ret_names.len(), + cols.len() + ) + } + ); + + // if cols and schema's data types is not match, try coerce it to given type(if annotated)(if error occur, return relevant error with question mark) + copr.check_and_cast_type(&mut cols)?; + // 6. return a assembled DfRecordBatch + let schema = copr.gen_schema(&cols)?; + let res_rb = DfRecordBatch::try_new(schema.clone(), cols).context(ArrowSnafu)?; + Ok(RecordBatch { + schema: Arc::new(Schema::try_from(schema).context(TypeCastSnafu)?), + df_recordbatch: res_rb, + }) + }) +} + +/// init interpreter with type PyVector and Module: greptime +pub(crate) fn init_interpreter() -> Interpreter { + vm::Interpreter::with_init(Default::default(), |vm| { + PyVector::make_class(&vm.ctx); + vm.add_native_module("greptime", Box::new(greptime_builtin::make_module)); + }) +} + +/// using a parsed `Coprocessor` struct as input to execute python code +pub(crate) fn exec_parsed(copr: &Coprocessor, rb: &DfRecordBatch) -> Result<RecordBatch> { + // 3. get args from `rb`, and cast them into PyVector + let args: Vec<PyVector> = select_from_rb(rb, &copr.deco_args.arg_names)?; + check_args_anno_real_type(&args, copr, rb)?; + let interpreter = init_interpreter(); + // 4. then set args in scope and compile then run `CodeObject` which already append a new `Call` node + exec_with_cached_vm(copr, rb, args, &interpreter) +} + +/// execute script just like [`exec_coprocessor`] do, +/// but instead of return a internal [`Error`] type, +/// return a friendly String format of error +/// +/// use `ln_offset` and `filename` to offset line number and mark file name in error prompt +#[cfg(test)] +#[allow(dead_code)] +pub fn exec_copr_print( + script: &str, + rb: &DfRecordBatch, + ln_offset: usize, + filename: &str, +) -> StdResult<RecordBatch, String> { + let res = exec_coprocessor(script, rb); + res.map_err(|e| { + crate::python::error::pretty_print_error_in_src(script, &e, ln_offset, filename) + }) +} diff --git a/src/script/src/python/coprocessor/parse.rs b/src/script/src/python/coprocessor/parse.rs new file mode 100644 index 000000000000..7dea6ae072ef --- /dev/null +++ b/src/script/src/python/coprocessor/parse.rs @@ -0,0 +1,521 @@ +use std::collections::HashSet; + +use datatypes::arrow::datatypes::DataType; +use rustpython_parser::{ + ast, + ast::{Arguments, Location}, + parser, +}; +#[cfg(test)] +use serde::Deserialize; +use snafu::ResultExt; + +use crate::python::coprocessor::AnnotationInfo; +use crate::python::coprocessor::Coprocessor; +use crate::python::error::{ensure, CoprParseSnafu, PyParseSnafu, Result}; + +#[cfg_attr(test, derive(Deserialize))] +#[derive(Default, Debug, Clone, PartialEq, Eq)] +pub struct DecoratorArgs { + pub arg_names: Vec<String>, + pub ret_names: Vec<String>, + pub sql: Option<String>, + // maybe add a URL for connecting or what? + // also predicate for timed triggered or conditional triggered? +} + +/// Return a CoprParseSnafu for you to chain fail() to return correct err Result type +pub(crate) fn ret_parse_error( + reason: String, + loc: Option<Location>, +) -> CoprParseSnafu<String, Option<Location>> { + CoprParseSnafu { reason, loc } +} + +/// append a `.fail()` after `ret_parse_error`, so compiler can return a Err(this error) +#[macro_export] +macro_rules! fail_parse_error { + ($reason:expr, $loc:expr $(,)*) => { + ret_parse_error($reason, $loc).fail() + }; +} + +fn py_str_to_string(s: &ast::Expr<()>) -> Result<String> { + if let ast::ExprKind::Constant { + value: ast::Constant::Str(v), + kind: _, + } = &s.node + { + Ok(v.to_owned()) + } else { + fail_parse_error!( + format!( + "Expect a list of String, found one element to be: \n{:#?}", + &s.node + ), + Some(s.location) + ) + } +} + +/// turn a python list of string in ast form(a `ast::Expr`) of string into a `Vec<String>` +fn pylist_to_vec(lst: &ast::Expr<()>) -> Result<Vec<String>> { + if let ast::ExprKind::List { elts, ctx: _ } = &lst.node { + let ret = elts.iter().map(py_str_to_string).collect::<Result<_>>()?; + Ok(ret) + } else { + fail_parse_error!( + format!("Expect a list, found \n{:#?}", &lst.node), + Some(lst.location) + ) + } +} + +fn try_into_datatype(ty: &str, loc: &Location) -> Result<Option<DataType>> { + match ty { + "bool" => Ok(Some(DataType::Boolean)), + "u8" => Ok(Some(DataType::UInt8)), + "u16" => Ok(Some(DataType::UInt16)), + "u32" => Ok(Some(DataType::UInt32)), + "u64" => Ok(Some(DataType::UInt64)), + "i8" => Ok(Some(DataType::Int8)), + "i16" => Ok(Some(DataType::Int16)), + "i32" => Ok(Some(DataType::Int32)), + "i64" => Ok(Some(DataType::Int64)), + "f16" => Ok(Some(DataType::Float16)), + "f32" => Ok(Some(DataType::Float32)), + "f64" => Ok(Some(DataType::Float64)), + // for any datatype + "_" => Ok(None), + // note the different between "_" and _ + _ => fail_parse_error!( + format!("Unknown datatype: {ty} at {}", loc), + Some(loc.to_owned()) + ), + } +} + +/// Item => NativeType +/// default to be not nullable +fn parse_native_type(sub: &ast::Expr<()>) -> Result<AnnotationInfo> { + match &sub.node { + ast::ExprKind::Name { id, .. } => Ok(AnnotationInfo { + datatype: try_into_datatype(id, &sub.location)?, + is_nullable: false, + }), + _ => fail_parse_error!( + format!("Expect types' name, found \n{:#?}", &sub.node), + Some(sub.location) + ), + } +} + +/// check if binary op expr is legal(with one typename and one `None`) +fn check_bin_op(bin_op: &ast::Expr<()>) -> Result<()> { + if let ast::ExprKind::BinOp { left, op: _, right } = &bin_op.node { + // 1. first check if this BinOp is legal(Have one typename and(optional) a None) + let is_none = |node: &ast::Expr<()>| -> bool { + matches!( + &node.node, + ast::ExprKind::Constant { + value: ast::Constant::None, + kind: _, + } + ) + }; + let is_type = |node: &ast::Expr<()>| { + if let ast::ExprKind::Name { id, ctx: _ } = &node.node { + try_into_datatype(id, &node.location).is_ok() + } else { + false + } + }; + let left_is_ty = is_type(left); + let left_is_none = is_none(left); + let right_is_ty = is_type(right); + let right_is_none = is_none(right); + if left_is_ty && right_is_ty || left_is_none && right_is_none { + fail_parse_error!( + "Expect one typenames and one `None`".to_string(), + Some(bin_op.location) + )?; + } else if !(left_is_none && right_is_ty || left_is_ty && right_is_none) { + fail_parse_error!( + format!( + "Expect a type name and a `None`, found left: \n{:#?} \nand right: \n{:#?}", + &left.node, &right.node + ), + Some(bin_op.location) + )?; + } + Ok(()) + } else { + fail_parse_error!( + format!( + "Expect binary ops like `DataType | None`, found \n{:#?}", + bin_op.node + ), + Some(bin_op.location) + ) + } +} + +/// parse a `DataType | None` or a single `DataType` +fn parse_bin_op(bin_op: &ast::Expr<()>) -> Result<AnnotationInfo> { + // 1. first check if this BinOp is legal(Have one typename and(optional) a None) + check_bin_op(bin_op)?; + if let ast::ExprKind::BinOp { left, op: _, right } = &bin_op.node { + // then get types from this BinOp + let left_ty = parse_native_type(left).ok(); + let right_ty = parse_native_type(right).ok(); + let mut ty_anno = if let Some(left_ty) = left_ty { + left_ty + } else if let Some(right_ty) = right_ty { + right_ty + } else { + // deal with errors anyway in case code above changed but forget to modify + return fail_parse_error!( + "Expect a type name, not two `None`".into(), + Some(bin_op.location), + ); + }; + // because check_bin_op assure a `None` exist + ty_anno.is_nullable = true; + return Ok(ty_anno); + } + unreachable!() +} + +/// check for the grammar correctness of annotation, also return the slice of subscript for further parsing +fn check_annotation_ret_slice(sub: &ast::Expr<()>) -> Result<&ast::Expr<()>> { + // TODO(discord9): allow a single annotation like `vector` + if let ast::ExprKind::Subscript { + value, + slice, + ctx: _, + } = &sub.node + { + if let ast::ExprKind::Name { id, ctx: _ } = &value.node { + ensure!( + id == "vector", + ret_parse_error( + format!( + "Wrong type annotation, expect `vector[...]`, found `{}`", + id + ), + Some(value.location) + ) + ); + } else { + return fail_parse_error!( + format!("Expect \"vector\", found \n{:#?}", &value.node), + Some(value.location) + ); + } + Ok(slice) + } else { + fail_parse_error!( + format!("Expect type annotation, found \n{:#?}", &sub), + Some(sub.location) + ) + } +} + +/// where: +/// +/// Start => vector`[`TYPE`]` +/// +/// TYPE => Item | Item `|` None +/// +/// Item => NativeType +fn parse_annotation(sub: &ast::Expr<()>) -> Result<AnnotationInfo> { + let slice = check_annotation_ret_slice(sub)?; + + { + // i.e: vector[f64] + match &slice.node { + ast::ExprKind::Name { .. } => parse_native_type(slice), + ast::ExprKind::BinOp { + left: _, + op: _, + right: _, + } => parse_bin_op(slice), + _ => { + fail_parse_error!( + format!("Expect type in `vector[...]`, found \n{:#?}", &slice.node), + Some(slice.location), + ) + } + } + } +} + +/// parse a list of keyword and return args and returns list from keywords +fn parse_keywords(keywords: &Vec<ast::Keyword<()>>) -> Result<DecoratorArgs> { + // more keys maybe add to this list of `avail_key`(like `sql` for querying and maybe config for connecting to database?), for better extension using a `HashSet` in here + let avail_key = HashSet::from(["args", "returns", "sql"]); + let opt_keys = HashSet::from(["sql"]); + let mut visited_key = HashSet::new(); + let len_min = avail_key.len() - opt_keys.len(); + let len_max = avail_key.len(); + ensure!( + // "sql" is optional(for now) + keywords.len() >= len_min && keywords.len() <= len_max, + CoprParseSnafu { + reason: format!( + "Expect between {len_min} and {len_max} keyword argument, found {}.", + keywords.len() + ), + loc: keywords.get(0).map(|s| s.location) + } + ); + let mut ret_args = DecoratorArgs::default(); + for kw in keywords { + match &kw.node.arg { + Some(s) => { + let s = s.as_str(); + if visited_key.contains(s) { + return fail_parse_error!( + format!("`{s}` occur multiple times in decorator's arguements' list."), + Some(kw.location), + ); + } + if !avail_key.contains(s) { + return fail_parse_error!( + format!("Expect one of {:?}, found `{}`", &avail_key, s), + Some(kw.location), + ); + } else { + visited_key.insert(s); + } + match s { + "args" => ret_args.arg_names = pylist_to_vec(&kw.node.value)?, + "returns" => ret_args.ret_names = pylist_to_vec(&kw.node.value)?, + "sql" => ret_args.sql = Some(py_str_to_string(&kw.node.value)?), + _ => unreachable!(), + } + } + None => { + return fail_parse_error!( + format!( + "Expect explictly set both `args` and `returns`, found \n{:#?}", + &kw.node + ), + Some(kw.location), + ) + } + } + } + let loc = keywords[0].location; + for key in avail_key { + if !visited_key.contains(key) && !opt_keys.contains(key) { + return fail_parse_error!(format!("Expect `{key}` keyword"), Some(loc)); + } + } + Ok(ret_args) +} + +/// returns args and returns in Vec of String +fn parse_decorator(decorator: &ast::Expr<()>) -> Result<DecoratorArgs> { + //check_decorator(decorator)?; + if let ast::ExprKind::Call { + func, + args: _, + keywords, + } = &decorator.node + { + ensure!( + func.node + == ast::ExprKind::Name { + id: "copr".to_string(), + ctx: ast::ExprContext::Load + } + || func.node + == ast::ExprKind::Name { + id: "coprocessor".to_string(), + ctx: ast::ExprContext::Load + }, + CoprParseSnafu { + reason: format!( + "Expect decorator with name `copr` or `coprocessor`, found \n{:#?}", + &func.node + ), + loc: Some(func.location) + } + ); + parse_keywords(keywords) + } else { + fail_parse_error!( + format!( + "Expect decorator to be a function call(like `@copr(...)`), found \n{:#?}", + decorator.node + ), + Some(decorator.location), + ) + } +} + +// get type annotaion in arguments +fn get_arg_annotations(args: &Arguments) -> Result<Vec<Option<AnnotationInfo>>> { + // get arg types from type annotation> + args.args + .iter() + .map(|arg| { + if let Some(anno) = &arg.node.annotation { + // for there is erro handling for parse_annotation + parse_annotation(anno).map(Some) + } else { + Ok(None) + } + }) + .collect::<Result<Vec<Option<_>>>>() +} + +fn get_return_annotations(rets: &ast::Expr<()>) -> Result<Vec<Option<AnnotationInfo>>> { + let mut return_types = Vec::with_capacity(match &rets.node { + ast::ExprKind::Tuple { elts, ctx: _ } => elts.len(), + ast::ExprKind::Subscript { + value: _, + slice: _, + ctx: _, + } => 1, + _ => { + return fail_parse_error!( + format!( + "Expect `(vector[...], vector[...], ...)` or `vector[...]`, found \n{:#?}", + &rets.node + ), + Some(rets.location), + ) + } + }); + match &rets.node { + // python: ->(vector[...], vector[...], ...) + ast::ExprKind::Tuple { elts, .. } => { + for elem in elts { + return_types.push(Some(parse_annotation(elem)?)) + } + } + // python: -> vector[...] + ast::ExprKind::Subscript { + value: _, + slice: _, + ctx: _, + } => return_types.push(Some(parse_annotation(rets)?)), + _ => { + return fail_parse_error!( + format!( + "Expect one or many type annotation for the return type, found \n{:#?}", + &rets.node + ), + Some(rets.location), + ) + } + } + Ok(return_types) +} + +/// check if the list of statements contain only one statement and +/// that statement is a function call with one decorator +fn check_copr(stmts: &Vec<ast::Stmt<()>>) -> Result<()> { + ensure!( + stmts.len() == 1, + CoprParseSnafu { + reason: + "Expect one and only one python function with `@coprocessor` or `@cpor` decorator" + .to_string(), + loc: stmts.first().map(|s| s.location) + } + ); + if let ast::StmtKind::FunctionDef { + name: _, + args: _, + body: _, + decorator_list, + returns: _, + type_comment: _, + } = &stmts[0].node + { + ensure!( + decorator_list.len() == 1, + CoprParseSnafu { + reason: "Expect one decorator", + loc: decorator_list.first().map(|s| s.location) + } + ); + } else { + return fail_parse_error!( + format!( + "Expect a function definition, found a \n{:#?}", + &stmts[0].node + ), + Some(stmts[0].location), + ); + } + Ok(()) +} + +/// parse script and return `Coprocessor` struct with info extract from ast +pub fn parse_copr(script: &str) -> Result<Coprocessor> { + let python_ast = parser::parse_program(script).context(PyParseSnafu)?; + check_copr(&python_ast)?; + if let ast::StmtKind::FunctionDef { + name, + args: fn_args, + body: _, + decorator_list, + returns, + type_comment: _, + } = &python_ast[0].node + { + let decorator = &decorator_list[0]; + let deco_args = parse_decorator(decorator)?; + + // get arg types from type annotation + let arg_types = get_arg_annotations(fn_args)?; + + // get return types from type annotation + let return_types = if let Some(rets) = returns { + get_return_annotations(rets)? + } else { + // if no anntation at all, set it to all None + std::iter::repeat(None) + .take(deco_args.ret_names.len()) + .collect() + }; + + // make sure both arguments&returns in fucntion + // and in decorator have same length + ensure!( + deco_args.arg_names.len() == arg_types.len(), + CoprParseSnafu { + reason: format!( + "args number in decorator({}) and function({}) doesn't match", + deco_args.arg_names.len(), + arg_types.len() + ), + loc: None + } + ); + ensure!( + deco_args.ret_names.len() == return_types.len(), + CoprParseSnafu { + reason: format!( + "returns number in decorator( {} ) and function annotation( {} ) doesn't match", + deco_args.ret_names.len(), + return_types.len() + ), + loc: None + } + ); + Ok(Coprocessor { + name: name.to_string(), + deco_args, + arg_types, + return_types, + script: script.to_owned(), + }) + } else { + unreachable!() + } +} diff --git a/src/script/src/python/engine.rs b/src/script/src/python/engine.rs new file mode 100644 index 000000000000..6bf74b02bf42 --- /dev/null +++ b/src/script/src/python/engine.rs @@ -0,0 +1,230 @@ +//! Python script engine +use std::any::Any; +use std::pin::Pin; +use std::sync::Arc; +use std::task::{Context, Poll}; + +use async_trait::async_trait; +use common_error::prelude::BoxedError; +use common_recordbatch::{ + error::ExternalSnafu, error::Result as RecordBatchResult, RecordBatch, RecordBatchStream, + SendableRecordBatchStream, +}; +use datatypes::schema::SchemaRef; +use futures::Stream; +use query::Output; +use query::QueryEngineRef; +use snafu::{ensure, ResultExt}; +use sql::statements::statement::Statement; + +use crate::engine::{CompileContext, EvalContext, Script, ScriptEngine}; +use crate::python::coprocessor::{exec_parsed, parse::parse_copr}; +use crate::python::{ + coprocessor::CoprocessorRef, + error::{self, Result}, +}; + +const PY_ENGINE: &str = "python"; + +pub struct PyScript { + query_engine: QueryEngineRef, + copr: CoprocessorRef, +} + +pub struct CoprStream { + stream: SendableRecordBatchStream, + copr: CoprocessorRef, +} + +impl RecordBatchStream for CoprStream { + fn schema(&self) -> SchemaRef { + self.stream.schema() + } +} + +impl Stream for CoprStream { + type Item = RecordBatchResult<RecordBatch>; + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> { + match Pin::new(&mut self.stream).poll_next(cx) { + Poll::Pending => Poll::Pending, + Poll::Ready(Some(Ok(recordbatch))) => { + let batch = exec_parsed(&self.copr, &recordbatch.df_recordbatch) + .map_err(BoxedError::new) + .context(ExternalSnafu)?; + + Poll::Ready(Some(Ok(batch))) + } + Poll::Ready(other) => Poll::Ready(other), + } + } + + #[inline] + fn size_hint(&self) -> (usize, Option<usize>) { + self.stream.size_hint() + } +} + +#[async_trait] +impl Script for PyScript { + type Error = error::Error; + + fn engine_name(&self) -> &str { + PY_ENGINE + } + + fn as_any(&self) -> &dyn Any { + self + } + + async fn evaluate(&self, _ctx: EvalContext) -> Result<Output> { + if let Some(sql) = &self.copr.deco_args.sql { + let stmt = self.query_engine.sql_to_statement(sql)?; + ensure!( + matches!(stmt, Statement::Query { .. }), + error::UnsupportedSqlSnafu { sql } + ); + let plan = self.query_engine.statement_to_plan(stmt)?; + let res = self.query_engine.execute(&plan).await?; + let copr = self.copr.clone(); + match res { + query::Output::RecordBatch(stream) => { + Ok(Output::RecordBatch(Box::pin(CoprStream { copr, stream }))) + } + _ => unreachable!(), + } + } else { + // TODO(boyan): try to retrieve sql from user request + error::MissingSqlSnafu {}.fail() + } + } +} + +pub struct PyEngine { + query_engine: QueryEngineRef, +} + +impl PyEngine { + pub fn new(query_engine: QueryEngineRef) -> Self { + Self { query_engine } + } +} + +#[async_trait] +impl ScriptEngine for PyEngine { + type Error = error::Error; + type Script = PyScript; + + fn name(&self) -> &str { + PY_ENGINE + } + + fn as_any(&self) -> &dyn Any { + self + } + + async fn compile(&self, script: &str, _ctx: CompileContext) -> Result<PyScript> { + let copr = Arc::new(parse_copr(script)?); + + Ok(PyScript { + copr, + query_engine: self.query_engine.clone(), + }) + } +} + +#[cfg(test)] +mod tests { + use catalog::memory::{MemoryCatalogProvider, MemorySchemaProvider}; + use catalog::{ + CatalogList, CatalogProvider, SchemaProvider, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, + }; + use common_recordbatch::util; + use datafusion_common::field_util::FieldExt; + use datafusion_common::field_util::SchemaExt; + use datatypes::arrow::array::Float64Array; + use datatypes::arrow::array::Int64Array; + use query::QueryEngineFactory; + use table::table::numbers::NumbersTable; + + use super::*; + + #[tokio::test] + async fn test_compile_evaluate() { + let catalog_list = catalog::memory::new_memory_catalog_list().unwrap(); + + let default_schema = Arc::new(MemorySchemaProvider::new()); + default_schema + .register_table("numbers".to_string(), Arc::new(NumbersTable::default())) + .unwrap(); + let default_catalog = Arc::new(MemoryCatalogProvider::new()); + default_catalog.register_schema(DEFAULT_SCHEMA_NAME.to_string(), default_schema); + catalog_list.register_catalog(DEFAULT_CATALOG_NAME.to_string(), default_catalog); + + let factory = QueryEngineFactory::new(catalog_list); + let query_engine = factory.query_engine(); + + let script_engine = PyEngine::new(query_engine.clone()); + + let script = r#" +@copr(args=["a", "b", "c"], returns = ["r"], sql="select number as a,number as b,number as c from numbers limit 100") +def test(a, b, c): + import greptime as g + return (a + b) / g.sqrt(c) +"#; + let script = script_engine + .compile(script, CompileContext::default()) + .await + .unwrap(); + let output = script.evaluate(EvalContext::default()).await.unwrap(); + match output { + Output::RecordBatch(stream) => { + let numbers = util::collect(stream).await.unwrap(); + + assert_eq!(1, numbers.len()); + let number = &numbers[0]; + assert_eq!(number.df_recordbatch.num_columns(), 1); + assert_eq!("r", number.schema.arrow_schema().field(0).name()); + + let columns = number.df_recordbatch.columns(); + assert_eq!(1, columns.len()); + assert_eq!(100, columns[0].len()); + let rows = columns[0].as_any().downcast_ref::<Float64Array>().unwrap(); + assert!(rows.value(0).is_nan()); + assert_eq!((99f64 + 99f64) / 99f64.sqrt(), rows.value(99)) + } + _ => unreachable!(), + } + + // test list comprehension + let script = r#" +@copr(args=["number"], returns = ["r"], sql="select number from numbers limit 100") +def test(a): + import greptime as gt + return gt.vector([x for x in a if x % 2 == 0]) +"#; + let script = script_engine + .compile(script, CompileContext::default()) + .await + .unwrap(); + let output = script.evaluate(EvalContext::default()).await.unwrap(); + match output { + Output::RecordBatch(stream) => { + let numbers = util::collect(stream).await.unwrap(); + + assert_eq!(1, numbers.len()); + let number = &numbers[0]; + assert_eq!(number.df_recordbatch.num_columns(), 1); + assert_eq!("r", number.schema.arrow_schema().field(0).name()); + + let columns = number.df_recordbatch.columns(); + assert_eq!(1, columns.len()); + assert_eq!(50, columns[0].len()); + let rows = columns[0].as_any().downcast_ref::<Int64Array>().unwrap(); + assert_eq!(0, rows.value(0)); + assert_eq!(98, rows.value(49)) + } + _ => unreachable!(), + } + } +} diff --git a/src/script/src/python/error.rs b/src/script/src/python/error.rs new file mode 100644 index 000000000000..c06d20a9b475 --- /dev/null +++ b/src/script/src/python/error.rs @@ -0,0 +1,189 @@ +use common_error::prelude::{ErrorCompat, ErrorExt, StatusCode}; +use console::{style, Style}; +use datatypes::arrow::error::ArrowError; +use datatypes::error::Error as DataTypeError; +use query::error::Error as QueryError; +use rustpython_compiler_core::error::CompileError as CoreCompileError; +use rustpython_parser::{ast::Location, error::ParseError}; +pub use snafu::ensure; +use snafu::{prelude::Snafu, Backtrace}; +pub type Result<T> = std::result::Result<T, Error>; + +#[derive(Debug, Snafu)] +#[snafu(visibility(pub(crate)))] +pub enum Error { + #[snafu(display("Datatype error: {}", source))] + TypeCast { + #[snafu(backtrace)] + source: DataTypeError, + }, + + #[snafu(display("Failed to query, source: {}", source))] + DatabaseQuery { + #[snafu(backtrace)] + source: QueryError, + }, + + #[snafu(display("Failed to parse script, source: {}", source))] + PyParse { + backtrace: Backtrace, + source: ParseError, + }, + + #[snafu(display("Failed to compile script, source: {}", source))] + PyCompile { + backtrace: Backtrace, + source: CoreCompileError, + }, + + /// rustpython problem, using python virtual machines' backtrace instead + #[snafu(display("Python Runtime error, error: {}", msg))] + PyRuntime { msg: String, backtrace: Backtrace }, + + #[snafu(display("Arrow error: {}", source))] + Arrow { + backtrace: Backtrace, + source: ArrowError, + }, + + /// errors in coprocessors' parse check for types and etc. + #[snafu(display("Coprocessor error: {} {}.", reason, + if let Some(loc) = loc{ + format!("at {loc}") + }else{ + "".into() + }))] + CoprParse { + backtrace: Backtrace, + reason: String, + // location is option because maybe errors can't give a clear location? + loc: Option<Location>, + }, + + /// Other types of error that isn't any of above + #[snafu(display("Coprocessor's Internal error: {}", reason))] + Other { + backtrace: Backtrace, + reason: String, + }, + + #[snafu(display("Unsupported sql in coprocessor: {}", sql))] + UnsupportedSql { sql: String, backtrace: Backtrace }, + + #[snafu(display("Missing sql in coprocessor"))] + MissingSql { backtrace: Backtrace }, + + #[snafu(display("Failed to retrieve record batches, source: {}", source))] + RecordBatch { + #[snafu(backtrace)] + source: common_recordbatch::error::Error, + }, +} + +impl From<QueryError> for Error { + fn from(source: QueryError) -> Self { + Self::DatabaseQuery { source } + } +} + +impl ErrorExt for Error { + fn status_code(&self) -> StatusCode { + match self { + Error::Arrow { .. } + | Error::TypeCast { .. } + | Error::DatabaseQuery { .. } + | Error::PyRuntime { .. } + | Error::RecordBatch { .. } + | Error::Other { .. } => StatusCode::Internal, + + Error::PyParse { .. } + | Error::PyCompile { .. } + | Error::CoprParse { .. } + | Error::UnsupportedSql { .. } + | Error::MissingSql { .. } => StatusCode::InvalidArguments, + } + } + fn backtrace_opt(&self) -> Option<&common_error::snafu::Backtrace> { + ErrorCompat::backtrace(self) + } + + fn as_any(&self) -> &dyn std::any::Any { + self + } +} +// impl from for those error so one can use question mark and implictly cast into `CoprError` +impl From<DataTypeError> for Error { + fn from(e: DataTypeError) -> Self { + Self::TypeCast { source: e } + } +} + +/// pretty print [`Error`] in given script, +/// basically print a arrow which point to where error occurs(if possible to get a location) +pub fn pretty_print_error_in_src( + script: &str, + err: &Error, + ln_offset: usize, + filename: &str, +) -> String { + let (reason, loc) = get_error_reason_loc(err); + if let Some(loc) = loc { + visualize_loc(script, &loc, &err.to_string(), &reason, ln_offset, filename) + } else { + // No location provide + format!("\n{}: {}", style("error").red().bold(), err) + } +} + +/// pretty print a location in script with desc. +/// +/// `ln_offset` is line offset number that added to `loc`'s `row`, `filename` is the file's name display with it's row and columns info. +pub fn visualize_loc( + script: &str, + loc: &Location, + err_ty: &str, + desc: &str, + ln_offset: usize, + filename: &str, +) -> String { + let lines: Vec<&str> = script.split('\n').collect(); + let (row, col) = (loc.row(), loc.column()); + let red_bold = Style::new().red().bold(); + let blue_bold = Style::new().blue().bold(); + let col_space = (ln_offset + row).to_string().len().max(1); + let space: String = " ".repeat(col_space - 1); + let indicate = format!( + " +{error}: {err_ty} +{space}{r_arrow}{filename}:{row}:{col} +{prow:col_space$}{ln_pad} {line} +{space} {ln_pad} {arrow:>pad$} {desc} +", + error = red_bold.apply_to("error"), + err_ty = style(err_ty).bold(), + r_arrow = blue_bold.apply_to("-->"), + filename = filename, + row = ln_offset + row, + col = col, + line = lines[loc.row() - 1], + pad = loc.column(), + arrow = red_bold.apply_to("^"), + desc = red_bold.apply_to(desc), + ln_pad = blue_bold.apply_to("|"), + prow = blue_bold.apply_to(ln_offset + row), + space = space + ); + indicate +} + +/// extract a reason for [`Error`] in string format, also return a location if possible +pub fn get_error_reason_loc(err: &Error) -> (String, Option<Location>) { + match err { + Error::CoprParse { reason, loc, .. } => (reason.clone(), loc.to_owned()), + Error::Other { reason, .. } => (reason.clone(), None), + Error::PyRuntime { msg, .. } => (msg.clone(), None), + Error::PyParse { source, .. } => (source.error.to_string(), Some(source.location)), + Error::PyCompile { source, .. } => (source.error.to_string(), Some(source.location)), + _ => (format!("Unknown error: {:?}", err), None), + } +} diff --git a/src/script/src/python/test.rs b/src/script/src/python/test.rs new file mode 100644 index 000000000000..0e3456a3b349 --- /dev/null +++ b/src/script/src/python/test.rs @@ -0,0 +1,321 @@ +#![allow(clippy::print_stdout, clippy::print_stderr)] +// for debug purpose, also this is already a +// test module so allow print_stdout shouldn't be a problem? +use std::fs::File; +use std::io::prelude::*; +use std::path::Path; +use std::sync::Arc; + +use console::style; +use datafusion_common::record_batch::RecordBatch as DfRecordBatch; +use datatypes::arrow::array::PrimitiveArray; +use datatypes::arrow::datatypes::{DataType, Field, Schema}; +use ron::from_str as from_ron_string; +use rustpython_parser::parser; +use serde::{Deserialize, Serialize}; + +use super::error::{get_error_reason_loc, visualize_loc}; +use crate::python::coprocessor::AnnotationInfo; +use crate::python::error::pretty_print_error_in_src; +use crate::python::{ + coprocessor, coprocessor::parse::parse_copr, coprocessor::Coprocessor, error::Error, +}; + +#[derive(Deserialize, Debug)] +struct TestCase { + name: String, + code: String, + predicate: Predicate, +} + +#[derive(Deserialize, Debug)] +enum Predicate { + ParseIsOk { + result: Coprocessor, + }, + ParseIsErr { + /// used to check if after serialize [`Error`] into a String, that string contains `reason` + reason: String, + }, + ExecIsOk { + fields: Vec<AnnotationInfo>, + columns: Vec<ColumnInfo>, + }, + ExecIsErr { + /// used to check if after serialize [`Error`] into a String, that string contains `reason` + reason: String, + }, +} + +#[derive(Serialize, Deserialize, Debug)] +struct ColumnInfo { + pub ty: DataType, + pub len: usize, +} + +fn create_sample_recordbatch() -> DfRecordBatch { + let cpu_array = PrimitiveArray::from_slice([0.9f32, 0.8, 0.7, 0.6]); + let mem_array = PrimitiveArray::from_slice([0.1f64, 0.2, 0.3, 0.4]); + let schema = Arc::new(Schema::from(vec![ + Field::new("cpu", DataType::Float32, false), + Field::new("mem", DataType::Float64, false), + ])); + + DfRecordBatch::try_new(schema, vec![Arc::new(cpu_array), Arc::new(mem_array)]).unwrap() +} + +/// test cases which read from a .ron file, deser, +/// +/// and exec/parse (depending on the type of predicate) then decide if result is as expected +#[test] +fn run_ron_testcases() { + let loc = Path::new("src/python/testcases.ron"); + let loc = loc.to_str().expect("Fail to parse path"); + let mut file = File::open(loc).expect("Fail to open file"); + let mut buf = String::new(); + file.read_to_string(&mut buf) + .expect("Fail to read to string"); + let testcases: Vec<TestCase> = from_ron_string(&buf).expect("Fail to convert to testcases"); + println!("Read {} testcases from {}", testcases.len(), loc); + for testcase in testcases { + print!(".ron test {}", testcase.name); + match testcase.predicate { + Predicate::ParseIsOk { result } => { + let copr = parse_copr(&testcase.code); + let mut copr = copr.unwrap(); + copr.script = "".into(); + assert_eq!(copr, result); + } + Predicate::ParseIsErr { reason } => { + let copr = parse_copr(&testcase.code); + if copr.is_ok() { + eprintln!("Expect to be err, found{copr:#?}"); + panic!() + } + let res = &copr.unwrap_err(); + println!( + "{}", + pretty_print_error_in_src(&testcase.code, res, 0, "<embedded>") + ); + let (res, _) = get_error_reason_loc(res); + if !res.contains(&reason) { + eprintln!("{}", testcase.code); + eprintln!("Parse Error, expect \"{reason}\" in \"{res}\", but not found."); + panic!() + } + } + Predicate::ExecIsOk { fields, columns } => { + let rb = create_sample_recordbatch(); + let res = coprocessor::exec_coprocessor(&testcase.code, &rb); + if res.is_err() { + dbg!(&res); + } + assert!(res.is_ok()); + let res = res.unwrap(); + fields + .iter() + .zip(&res.schema.arrow_schema().fields) + .map(|(anno, real)| { + if !(anno.datatype.clone().unwrap() == real.data_type + && anno.is_nullable == real.is_nullable) + { + eprintln!("fields expect to be {anno:#?}, found to be {real:#?}."); + panic!() + } + }) + .count(); + columns + .iter() + .zip(res.df_recordbatch.columns()) + .map(|(anno, real)| { + if !(&anno.ty == real.data_type() && anno.len == real.len()) { + eprintln!( + "Unmatch type or length!Expect [{:#?}; {}], found [{:#?}; {}]", + anno.ty, + anno.len, + real.data_type(), + real.len() + ); + panic!() + } + }) + .count(); + } + Predicate::ExecIsErr { + reason: part_reason, + } => { + let rb = create_sample_recordbatch(); + let res = coprocessor::exec_coprocessor(&testcase.code, &rb); + if let Err(res) = res { + println!( + "{}", + pretty_print_error_in_src(&testcase.code, &res, 1120, "<embedded>") + ); + let (reason, _) = get_error_reason_loc(&res); + if !reason.contains(&part_reason) { + eprintln!( + "{}\nExecute error, expect \"{reason}\" in \"{res}\", but not found.", + testcase.code, + reason = style(reason).green(), + res = style(res).red() + ); + panic!() + } + } else { + eprintln!("{:#?}\nExpect Err(...), found Ok(...)", res); + panic!(); + } + } + } + println!(" ... {}", style("ok✅").green()); + } +} + +#[test] +#[allow(unused)] +fn test_type_anno() { + let python_source = r#" +@copr(args=["cpu", "mem"], returns=["perf", "what", "how", "why"]) +def a(cpu, mem: vector[f64])->(vector[f64|None], vector[f64], vector[_], vector[ _ | None]): + return cpu + mem, cpu - mem, cpu * mem, cpu / mem +"#; + let pyast = parser::parse(python_source, parser::Mode::Interactive).unwrap(); + let copr = parse_copr(python_source); + dbg!(copr); +} + +#[test] +#[allow(clippy::print_stdout, unused_must_use)] +// allow print in test function for debug purpose(like for quick testing a syntax&ideas) +fn test_calc_rvs() { + let python_source = r#" +@coprocessor(args=["open_time", "close"], returns=[ + "rv_7d", + "rv_15d", + "rv_30d", + "rv_60d", + "rv_90d", + "rv_180d" +]) +def calc_rvs(open_time, close): + from greptime import vector, log, prev, sqrt, datetime, pow, sum + def calc_rv(close, open_time, time, interval): + mask = (open_time < time) & (open_time > time - interval) + close = close[mask] + + avg_time_interval = (open_time[-1] - open_time[0])/(len(open_time)-1) + ref = log(close/prev(close)) + var = sum(pow(ref, 2)/(len(ref)-1)) + return sqrt(var/avg_time_interval) + + # how to get env var, + # maybe through accessing scope and serde then send to remote? + timepoint = open_time[-1] + rv_7d = calc_rv(close, open_time, timepoint, datetime("7d")) + rv_15d = calc_rv(close, open_time, timepoint, datetime("15d")) + rv_30d = calc_rv(close, open_time, timepoint, datetime("30d")) + rv_60d = calc_rv(close, open_time, timepoint, datetime("60d")) + rv_90d = calc_rv(close, open_time, timepoint, datetime("90d")) + rv_180d = calc_rv(close, open_time, timepoint, datetime("180d")) + return rv_7d, rv_15d, rv_30d, rv_60d, rv_90d, rv_180d +"#; + let close_array = PrimitiveArray::from_slice([ + 10106.79f32, + 10106.09, + 10108.73, + 10106.38, + 10106.95, + 10107.55, + 10104.68, + 10108.8, + 10115.96, + 10117.08, + 10120.43, + ]); + let open_time_array = PrimitiveArray::from_slice([ + 1581231300i64, + 1581231360, + 1581231420, + 1581231480, + 1581231540, + 1581231600, + 1581231660, + 1581231720, + 1581231780, + 1581231840, + 1581231900, + ]); + let schema = Arc::new(Schema::from(vec![ + Field::new("close", DataType::Float32, false), + Field::new("open_time", DataType::Int64, false), + ])); + let rb = DfRecordBatch::try_new( + schema, + vec![Arc::new(close_array), Arc::new(open_time_array)], + ) + .unwrap(); + let ret = coprocessor::exec_coprocessor(python_source, &rb); + if let Err(Error::PyParse { + backtrace: _, + source, + }) = ret + { + let res = visualize_loc( + python_source, + &source.location, + "unknown tokens", + source.error.to_string().as_str(), + 0, + "copr.py", + ); + println!("{res}"); + } else if let Ok(res) = ret { + dbg!(&res); + } else { + dbg!(ret); + } +} + +#[test] +#[allow(clippy::print_stdout, unused_must_use)] +// allow print in test function for debug purpose(like for quick testing a syntax&ideas) +fn test_coprocessor() { + let python_source = r#" +@copr(args=["cpu", "mem"], returns=["ref"]) +def a(cpu, mem): + import greptime as gt + from greptime import vector, log2, prev, sum, pow, sqrt, datetime + abc = vector([v[0] > v[1] for v in zip(cpu, mem)]) + fed = cpu.filter(abc) + ref = log2(fed/prev(fed)) + return (0.5 < cpu) & ~( cpu >= 0.75) +"#; + let cpu_array = PrimitiveArray::from_slice([0.9f32, 0.8, 0.7, 0.3]); + let mem_array = PrimitiveArray::from_slice([0.1f64, 0.2, 0.3, 0.4]); + let schema = Arc::new(Schema::from(vec![ + Field::new("cpu", DataType::Float32, false), + Field::new("mem", DataType::Float64, false), + ])); + let rb = + DfRecordBatch::try_new(schema, vec![Arc::new(cpu_array), Arc::new(mem_array)]).unwrap(); + let ret = coprocessor::exec_coprocessor(python_source, &rb); + if let Err(Error::PyParse { + backtrace: _, + source, + }) = ret + { + let res = visualize_loc( + python_source, + &source.location, + "unknown tokens", + source.error.to_string().as_str(), + 0, + "copr.py", + ); + println!("{res}"); + } else if let Ok(res) = ret { + dbg!(&res); + } else { + dbg!(ret); + } +} diff --git a/src/script/src/python/testcases.ron b/src/script/src/python/testcases.ron new file mode 100644 index 000000000000..ad2191211336 --- /dev/null +++ b/src/script/src/python/testcases.ron @@ -0,0 +1,413 @@ +// This is the file for python coprocessor's testcases, +// including coprocessor parsing test and execute test +// check src/scalars/python/test.rs for more information +[ + ( + name: "correct_parse", + code: r#" +@copr(args=["cpu", "mem"], returns=["perf", "what", "how", "why"]) +def a(cpu: vector[f32], mem: vector[f64])->(vector[f64], vector[f64|None], vector[_], vector[_ | None]): + return cpu + mem, cpu - mem, cpu * mem, cpu / mem + "#, + predicate: ParseIsOk( + result: ( + name: "a", + deco_args: ( + arg_names: ["cpu", "mem"], + ret_names: ["perf", "what", "how", "why"], + ), + arg_types: [ + Some(( + datatype: Some(Float32), + is_nullable: false + )), + Some(( + datatype: Some(Float64), + is_nullable: false + )), + ], + return_types: [ + Some(( + datatype: Some(Float64), + is_nullable: false + )), + Some(( + datatype: Some(Float64), + is_nullable: true + )), + Some(( + datatype: None, + is_nullable: false + )), + Some(( + datatype: None, + is_nullable: true + )), + ] + ) + ) + ), + ( + name: "missing_decorator", + code: r#" +def a(cpu: vector[f32], mem: vector[f64])->(vector[f64], vector[f64|None], vector[_], vector[_ | None]): + return cpu + mem, cpu - mem, cpu * mem, cpu / mem +"#, + predicate: ParseIsErr( + reason: "Expect one decorator" + ) + ), + ( + name: "not_a_list_of_string", + code: r#" +@copr(args=["cpu", 3], returns=["perf", "what", "how", "why"]) +def a(cpu: vector[f32], mem: vector[f64])->(vector[f64], vector[f64|None], vector[_], vector[_ | None]): + return cpu + mem, cpu - mem, cpu * mem, cpu / mem +"#, + predicate: ParseIsErr( + reason: "Expect a list of String, found" + ) + ), + ( + name: "not_even_a_list", + code: r#" +@copr(args=42, returns=["perf", "what", "how", "why"]) +def a(cpu: vector[f32], mem: vector[f64])->(vector[f64], vector[f64|None], vector[_], vector[_ | None]): + return cpu + mem, cpu - mem, cpu * mem, cpu / mem +"#, + predicate: ParseIsErr( + reason: "Expect a list, found" + ) + ), + ( + // unknown type names + name: "unknown_type_names", + code: r#" +@copr(args=["cpu", "mem"], returns=["perf", "what", "how", "why"]) +def a(cpu: vector[g32], mem: vector[f64])->(vector[f64], vector[f64|None], vector[_], vector[_ | None]): + return cpu + mem, cpu - mem, cpu * mem, cpu / mem +"#, + predicate: ParseIsErr( + reason: "Unknown datatype:" + ) + ), + ( + // two type name + name: "two_type_names", + code: r#" +@copr(args=["cpu", "mem"], returns=["perf", "what", "how", "why"]) +def a(cpu: vector[f32 | f64], mem: vector[f64])->(vector[f64], vector[f64|None], vector[_], vector[_ | None]): + return cpu + mem, cpu - mem, cpu * mem, cpu / mem +"#, + predicate: ParseIsErr( + reason: "Expect one typenames and one `None`" + ) + ), + ( + name: "two_none", + // two `None` + code: r#" +@copr(args=["cpu", "mem"], returns=["perf", "what", "how", "why"]) +def a(cpu: vector[None | None], mem: vector[f64])->(vector[f64], vector[None|None], vector[_], vector[_ | None]): + return cpu + mem, cpu - mem, cpu * mem, cpu / mem +"#, + predicate: ParseIsErr( + reason: "Expect one typenames and one `None`" + ) + ), + ( + // Expect a Types name + name: "unknown_type_names_in_ret", + code: r#" +@copr(args=["cpu", "mem"], returns=["perf", "what", "how", "why"]) +def a(cpu: vector[f64|None], mem: vector[f64])->(vector[g64], vector[f64|None], vector[_], vector[_ | None]): + return cpu + mem, cpu - mem, cpu * mem, cpu / mem +"#, + predicate: ParseIsErr( + reason: "Unknown datatype:" + ) + ), + ( + // no more `into` + name: "call_deprecated_for_cast_into", + code: r#" +@copr(args=["cpu", "mem"], returns=["perf", "what", "how", "why"]) +def a(cpu: vector[cast(f64)], mem: vector[f64])->(vector[f64], vector[f64|None], vector[_], vector[_ | None]): + return cpu + mem, cpu - mem, cpu * mem, cpu / mem +"#, + predicate: ParseIsErr( + reason: "Expect type in `vector[...]`, found " + ) + ), + ( + // Expect `vector` not `vec` + name: "vector_not_vec", + code: r#" +@copr(args=["cpu", "mem"], returns=["perf", "what", "how", "why"]) +def a(cpu: vec[f64], mem: vector[f64])->(vector[f64|None], vector[f64], vector[_], vector[_ | None]): + return cpu + mem, cpu - mem, cpu * mem, cpu / mem +"#, + predicate: ParseIsErr( + reason: "Wrong type annotation, expect `vector[...]`, found" + ) + ), + ( + // Expect `None` + name: "expect_none", + code: r#" +@copr(args=["cpu", "mem"], returns=["perf", "what", "how", "why"]) +def a(cpu: vector[f64|1], mem: vector[f64])->(vector[f64|None], vector[f64], vector[_], vector[_ | None]): + return cpu + mem, cpu - mem, cpu * mem, cpu / mem +"#, + predicate: ParseIsErr( + reason: "Expect a type name and a `None`, found left: " + ) + ), + ( + // more than one statement + name: "two_stmt", + code: r#" +print("hello world") +@copr(args=["cpu", "mem"], returns=["perf", "what", "how", "why"]) +def a(cpu: vector[f64], mem: vector[f64])->(vector[None|None], vector[into(f64)], vector[f64], vector[f64 | None]): + return cpu + mem, cpu - mem, cpu * mem, cpu / mem +"#, + predicate: ParseIsErr( + reason: + "Expect one and only one python function with `@coprocessor` or `@cpor` decorator" + ) + ), + ( + // wrong decorator name + name: "typo_copr", + code: r#" +@corp(args=["cpu", "mem"], returns=["perf", "what", "how", "why"]) +def a(cpu: vector[f64], mem: vector[f64])->(vector[None|None], vector[into(f64)], vector[f64], vector[f64 | None]): + return cpu + mem, cpu - mem, cpu * mem, cpu / mem +"#, + predicate: ParseIsErr( + reason: + "Expect decorator with name `copr` or `coprocessor`, found" + ) + ), + ( + name: "extra_keywords", + code: r#" +@copr(args=["cpu", "mem"], sql=3,psql = 4,rets=5) +def a(cpu: vector[f64], mem: vector[f64])->(vector[f64|None], vector[into(f64)], vector[f64], vector[f64 | None]): + return cpu + mem, cpu - mem, cpu * mem, cpu / mem +"#, + predicate: ParseIsErr( + reason: + " keyword argument, found " + ) + ), + ( + name: "missing_keywords", + code: r#" +@copr(args=["cpu", "mem"]) +def a(cpu: vector[f64], mem: vector[f64])->(vector[f64|None], vector[into(f64)], vector[f64], vector[f64 | None]): + return cpu + mem, cpu - mem, cpu * mem, cpu / mem +"#, + predicate: ParseIsErr( + reason: + " keyword argument, found " + ) + ), + ( + // exec_coprocessor + name: "correct_exec", + code: r#" +@copr(args=["cpu", "mem"], returns=["perf", "what"]) +def a(cpu: vector[f32], mem: vector[f64])->(vector[f64|None], + vector[f32]): + return cpu + mem, cpu - mem +"#, + predicate: ExecIsOk( + fields: [ + ( + datatype: Some(Float64), + is_nullable: true + ), + ( + datatype: Some(Float32), + is_nullable: false + ), + ], + columns: [ + ( + ty: Float64, + len: 4 + ), + ( + ty: Float32, + len: 4 + ) + ] + ) + ), + ( + // constant column(float) + name: "constant_float_col", + code: r#" +@copr(args=["cpu", "mem"], returns=["perf", "what"]) +def a(cpu: vector[f32], mem: vector[f64])->(vector[f64|None], + vector[f32]): + return cpu + mem, 1.0 +"#, + predicate: ExecIsOk( + fields: [ + ( + datatype: Some(Float64), + is_nullable: true + ), + ( + datatype: Some(Float32), + is_nullable: false + ), + ], + columns: [ + ( + ty: Float64, + len: 4 + ), + ( + ty: Float32, + len: 4 + ) + ] + ) + ), + ( + // constant column(int) + name: "constant_int_col", + code: r#" +@copr(args=["cpu", "mem"], returns=["perf", "what"]) +def a(cpu: vector[f32], mem: vector[f64])->(vector[f64|None], + vector[f32]): + return cpu + mem, 1 +"#, + predicate: ExecIsOk( + fields: [ + ( + datatype: Some(Float64), + is_nullable: true + ), + ( + datatype: Some(Float32), + is_nullable: false + ), + ], + columns: [ + ( + ty: Float64, + len: 4 + ), + ( + ty: Float32, + len: 4 + ) + ] + ) + ), + ( + // constant column(bool) + name: "constant_bool_col", + code: r#" +@copr(args=["cpu", "mem"], returns=["perf", "what"]) +def a(cpu: vector[f32], mem: vector[f64])->(vector[f64|None], + vector[f32]): + return cpu + mem, True +"#, + predicate: ExecIsOk( + fields: [ + ( + datatype: Some(Float64), + is_nullable: true + ), + ( + datatype: Some(Float32), + is_nullable: false + ), + ], + columns: [ + ( + ty: Float64, + len: 4 + ), + ( + ty: Float32, + len: 4 + ) + ] + ) + ), + ( + // expect 4 vector ,found 5 + name: "ret_nums_wrong", + code: r#" +@copr(args=["cpu", "mem"], returns=["perf", "what", "how", "why", "whatever", "nihilism"]) +def a(cpu: vector[f32], mem: vector[f64])->(vector[f64|None], vector[f64], vector[f64], vector[f64 | None], vector[bool], vector[_ | None]): + return cpu + mem, cpu - mem, cpu * mem, cpu / mem, cpu +"#, + predicate: ExecIsErr( + reason: "The number of return Vector is wrong, expect" + ) + ), + ( + name: "div_by_zero", + code: r#" +@copr(args=["cpu", "mem"], returns=["perf", "what"]) +def a(cpu: vector[f32], mem: vector[f64])->(vector[f64|None], + vector[f32]): + return cpu + mem, cpu - mem*(1/0) +"#, + predicate: ExecIsErr( + reason: "ZeroDivisionError: division by zero" + ) + ), + ( + name: "unexpected_token", + code: r#" +@copr(args=["cpu", "mem"], returns=["perf", "what"]) +def a(cpu: vector[f32], mem: vector[f64])->(vector[f64|None], + vector[f32]): + return cpu + mem, cpu - mem*** +"#, + predicate: ParseIsErr( + reason: "invalid syntax. Got unexpected token " + ) + ), + ( + name: "wrong_return_anno", + code: r#" +@copr(args=["cpu", "mem"], returns=["perf", "what"]) +def a(cpu: vector[f32], mem: vector[f64])->f32: + return cpu + mem, cpu - mem +"#, + predicate: ParseIsErr( + reason: "Expect `(vector[...], vector[...], ...)` or `vector[...]`, found " + ) + ), + ( + name: "break_outside_loop", + code: r#" +@copr(args=["cpu", "mem"], returns=["perf", "what"]) +def a(cpu: vector[f32], mem: vector[f64])->(vector[f64], vector[f64]): + break + return cpu + mem, cpu - mem +"#, + predicate: ExecIsErr( + reason: "'break' outside loop" + ) + ), + ( + name: "not_even_wrong", + code: r#" +42 +"#, + predicate: ParseIsErr( + reason: "Expect a function definition, found a" + ) + ) +] \ No newline at end of file diff --git a/src/script/src/python/utils.rs b/src/script/src/python/utils.rs new file mode 100644 index 000000000000..ba571e8bbeb8 --- /dev/null +++ b/src/script/src/python/utils.rs @@ -0,0 +1,27 @@ +use rustpython_vm::{builtins::PyBaseExceptionRef, PyObjectRef, PyPayload, PyRef, VirtualMachine}; +use snafu::{Backtrace, GenerateImplicitData}; + +use crate::python::error; +use crate::python::PyVector; +pub(crate) type PyVectorRef = PyRef<PyVector>; + +/// use `rustpython`'s `is_instance` method to check if a PyObject is a instance of class. +/// if `PyResult` is Err, then this function return `false` +pub fn is_instance<T: PyPayload>(obj: &PyObjectRef, vm: &VirtualMachine) -> bool { + obj.is_instance(T::class(vm).into(), vm).unwrap_or(false) +} + +pub fn format_py_error(excep: PyBaseExceptionRef, vm: &VirtualMachine) -> error::Error { + let mut msg = String::new(); + if let Err(e) = vm.write_exception(&mut msg, &excep) { + return error::Error::PyRuntime { + msg: format!("Failed to write exception msg, err: {}", e), + backtrace: Backtrace::generate(), + }; + } + + error::Error::PyRuntime { + msg, + backtrace: Backtrace::generate(), + } +} diff --git a/src/script/src/python/vector.rs b/src/script/src/python/vector.rs new file mode 100644 index 000000000000..1bcd21e39244 --- /dev/null +++ b/src/script/src/python/vector.rs @@ -0,0 +1,1197 @@ +use std::ops::Deref; +use std::sync::Arc; + +use datatypes::arrow; +use datatypes::arrow::array::BooleanArray; +use datatypes::arrow::compute; +use datatypes::arrow::datatypes::DataType; +use datatypes::arrow::scalar::{PrimitiveScalar, Scalar}; +use datatypes::arrow::{ + array::{Array, ArrayRef, PrimitiveArray}, + compute::{ + arithmetics, + cast::{self, CastOptions}, + comparison, + }, +}; +use datatypes::data_type::ConcreteDataType; +use datatypes::value::OrderedFloat; +use datatypes::{ + value, + vectors::{Helper, NullVector, VectorBuilder, VectorRef}, +}; +use rustpython_vm::function::{Either, PyComparisonValue}; +use rustpython_vm::types::{Comparable, PyComparisonOp}; +use rustpython_vm::{ + builtins::{PyBaseExceptionRef, PyBool, PyBytes, PyFloat, PyInt, PyNone, PyStr}, + function::OptionalArg, + protocol::{PyMappingMethods, PySequenceMethods}, + pyclass, pyimpl, + sliceable::{SaturatedSlice, SequenceIndex, SequenceIndexOp}, + types::{AsMapping, AsSequence}, + AsObject, PyObject, PyObjectRef, PyPayload, PyRef, PyResult, VirtualMachine, +}; + +use crate::python::utils::is_instance; +use crate::python::utils::PyVectorRef; + +#[pyclass(module = false, name = "vector")] +#[derive(PyPayload, Debug)] +pub struct PyVector { + vector: VectorRef, +} + +impl From<VectorRef> for PyVector { + fn from(vector: VectorRef) -> Self { + Self { vector } + } +} + +fn emit_cast_error( + vm: &VirtualMachine, + src_ty: &DataType, + dst_ty: &DataType, +) -> PyBaseExceptionRef { + vm.new_type_error(format!( + "Can't cast source operand of type {:?} into target type of {:?}", + src_ty, dst_ty + )) +} +fn arrow2_rsub_scalar( + arr: &dyn Array, + val: &dyn Scalar, + _vm: &VirtualMachine, +) -> PyResult<Box<dyn Array>> { + // b - a => a * (-1) + b + let neg = arithmetics::mul_scalar(arr, &PrimitiveScalar::new(DataType::Int64, Some(-1i64))); + Ok(arithmetics::add_scalar(neg.as_ref(), val)) +} + +fn arrow2_rtruediv_scalar( + arr: &dyn Array, + val: &dyn Scalar, + vm: &VirtualMachine, +) -> PyResult<Box<dyn Array>> { + // val / arr => one_arr / arr * val (this is simpler to write) + let one_arr: Box<dyn Array> = if is_float(arr.data_type()) { + Box::new(PrimitiveArray::from_values(vec![1f64; arr.len()])) + } else if is_integer(arr.data_type()) { + Box::new(PrimitiveArray::from_values(vec![1i64; arr.len()])) + } else { + return Err(vm.new_not_implemented_error(format!( + "truediv of {:?} Scalar with {:?} Array is not supported", + val.data_type(), + arr.data_type() + ))); + }; + let tmp = arithmetics::mul_scalar(one_arr.as_ref(), val); + Ok(arithmetics::div(tmp.as_ref(), arr)) +} + +fn arrow2_rfloordiv_scalar( + arr: &dyn Array, + val: &dyn Scalar, + vm: &VirtualMachine, +) -> PyResult<Box<dyn Array>> { + // val // arr => one_arr // arr * val (this is simpler to write) + let one_arr: Box<dyn Array> = if is_float(arr.data_type()) { + Box::new(PrimitiveArray::from_values(vec![1f64; arr.len()])) + } else if is_integer(arr.data_type()) { + Box::new(PrimitiveArray::from_values(vec![1i64; arr.len()])) + } else { + return Err(vm.new_not_implemented_error(format!( + "truediv of {:?} Scalar with {:?} Array is not supported", + val.data_type(), + arr.data_type() + ))); + }; + let tmp = arithmetics::mul_scalar(one_arr.as_ref(), val); + + Ok(arrow::compute::cast::cast( + arithmetics::div(tmp.as_ref(), arr).as_ref(), + &DataType::Int64, + cast::CastOptions { + wrapped: false, + partial: true, + }, + ) + .unwrap()) +} + +fn wrap_result<F>( + f: F, +) -> impl Fn(&dyn Array, &dyn Scalar, &VirtualMachine) -> PyResult<Box<dyn Array>> +where + F: Fn(&dyn Array, &dyn Scalar) -> Box<dyn Array>, +{ + move |left, right, _vm| Ok(f(left, right)) +} + +fn is_float(datatype: &DataType) -> bool { + matches!( + datatype, + DataType::Float16 | DataType::Float32 | DataType::Float64 + ) +} + +fn is_integer(datatype: &DataType) -> bool { + is_signed(datatype) || is_unsigned(datatype) +} + +fn is_signed(datatype: &DataType) -> bool { + matches!( + datatype, + DataType::Int8 | DataType::Int16 | DataType::Int32 | DataType::Int64 + ) +} + +fn is_unsigned(datatype: &DataType) -> bool { + matches!( + datatype, + DataType::UInt8 | DataType::UInt16 | DataType::UInt32 | DataType::UInt64 + ) +} + +fn cast(array: ArrayRef, target_type: &DataType, vm: &VirtualMachine) -> PyResult<Box<dyn Array>> { + cast::cast( + array.as_ref(), + target_type, + CastOptions { + wrapped: true, + partial: true, + }, + ) + .map_err(|e| vm.new_type_error(e.to_string())) +} +fn from_debug_error(err: impl std::fmt::Debug, vm: &VirtualMachine) -> PyBaseExceptionRef { + vm.new_runtime_error(format!("Runtime Error: {err:#?}")) +} + +impl AsRef<PyVector> for PyVector { + fn as_ref(&self) -> &PyVector { + self + } +} + +/// PyVector type wraps a greptime vector, impl multiply/div/add/sub opeerators etc. +#[pyimpl(with(AsMapping, AsSequence, Comparable))] +impl PyVector { + pub(crate) fn new( + iterable: OptionalArg<PyObjectRef>, + vm: &VirtualMachine, + ) -> PyResult<PyVector> { + if let OptionalArg::Present(iterable) = iterable { + let mut elements: Vec<PyObjectRef> = iterable.try_to_value(vm)?; + + if elements.is_empty() { + return Ok(PyVector::default()); + } + + let datatype = get_concrete_type(&elements[0], vm)?; + let mut buf = VectorBuilder::with_capacity(datatype.clone(), elements.len()); + + for obj in elements.drain(..) { + let val = if let Some(v) = + pyobj_try_to_typed_val(obj.clone(), vm, Some(datatype.clone())) + { + v + } else { + return Err(vm.new_type_error(format!( + "Can't cast pyobject {:?} into concrete type {:?}", + obj, datatype + ))); + }; + buf.push(&val); + } + + Ok(PyVector { + vector: buf.finish(), + }) + } else { + Ok(PyVector::default()) + } + } + + /// create a ref to inner vector + #[inline] + pub fn as_vector_ref(&self) -> VectorRef { + self.vector.clone() + } + + #[inline] + pub fn to_arrow_array(&self) -> ArrayRef { + self.vector.to_arrow_array() + } + + fn scalar_arith_op<F>( + &self, + other: PyObjectRef, + target_type: Option<DataType>, + op: F, + vm: &VirtualMachine, + ) -> PyResult<PyVector> + where + F: Fn(&dyn Array, &dyn Scalar, &VirtualMachine) -> PyResult<Box<dyn Array>>, + { + // the right operand only support PyInt or PyFloat, + let (right, right_type) = { + if is_instance::<PyInt>(&other, vm) { + other + .try_into_value::<i64>(vm) + .map(|v| (value::Value::Int64(v), DataType::Int64))? + } else if is_instance::<PyFloat>(&other, vm) { + other + .try_into_value::<f64>(vm) + .map(|v| (value::Value::Float64(OrderedFloat(v)), DataType::Float64))? + } else { + return Err(vm.new_type_error(format!( + "Can't cast right operand into Scalar of Int or Float, actual: {}", + other.class().name() + ))); + } + }; + // assuming they are all 64 bit type if possible + let left = self.to_arrow_array(); + + let left_type = left.data_type(); + let right_type = &right_type; + // TODO(discord9): found better way to cast between signed and unsigned type + let target_type = target_type.unwrap_or_else(|| { + if is_signed(left_type) && is_signed(right_type) { + DataType::Int64 + } else if is_unsigned(left_type) && is_unsigned(right_type) { + DataType::UInt64 + } else { + DataType::Float64 + } + }); + let left = cast(left, &target_type, vm)?; + let right: Box<dyn Scalar> = if is_float(&target_type) { + match right { + value::Value::Int64(v) => { + Box::new(PrimitiveScalar::new(target_type, Some(v as f64))) + } + value::Value::UInt64(v) => { + Box::new(PrimitiveScalar::new(target_type, Some(v as f64))) + } + value::Value::Float64(v) => { + Box::new(PrimitiveScalar::new(target_type, Some(f64::from(v)))) + } + _ => unreachable!(), + } + } else if is_signed(&target_type) { + match right { + value::Value::Int64(v) => Box::new(PrimitiveScalar::new(target_type, Some(v))), + value::Value::UInt64(v) => { + Box::new(PrimitiveScalar::new(target_type, Some(v as i64))) + } + value::Value::Float64(v) => { + Box::new(PrimitiveScalar::new(DataType::Float64, Some(v.0 as i64))) + } + _ => unreachable!(), + } + } else if is_unsigned(&target_type) { + match right { + value::Value::Int64(v) => Box::new(PrimitiveScalar::new(target_type, Some(v))), + value::Value::UInt64(v) => Box::new(PrimitiveScalar::new(target_type, Some(v))), + value::Value::Float64(v) => { + Box::new(PrimitiveScalar::new(target_type, Some(f64::from(v)))) + } + _ => unreachable!(), + } + } else { + return Err(emit_cast_error(vm, right_type, &target_type)); + }; + + let result = op(left.as_ref(), right.as_ref(), vm)?; + + Ok(Helper::try_into_vector(&*result) + .map_err(|e| { + vm.new_type_error(format!( + "Can't cast result into vector, result: {:?}, err: {:?}", + result, e + )) + })? + .into()) + } + + fn arith_op<F>( + &self, + other: PyObjectRef, + target_type: Option<DataType>, + op: F, + vm: &VirtualMachine, + ) -> PyResult<PyVector> + where + F: Fn(&dyn Array, &dyn Array) -> Box<dyn Array>, + { + let right = other.downcast_ref::<PyVector>().ok_or_else(|| { + vm.new_type_error(format!( + "Can't cast right operand into PyVector, actual: {}", + other.class().name() + )) + })?; + let left = self.to_arrow_array(); + let right = right.to_arrow_array(); + + let left_type = &left.data_type(); + let right_type = &right.data_type(); + + let target_type = target_type.unwrap_or_else(|| { + if is_signed(left_type) && is_signed(right_type) { + DataType::Int64 + } else if is_unsigned(left_type) && is_unsigned(right_type) { + DataType::UInt64 + } else { + DataType::Float64 + } + }); + + let left = cast(left, &target_type, vm)?; + let right = cast(right, &target_type, vm)?; + + let result = op(left.as_ref(), right.as_ref()); + + Ok(Helper::try_into_vector(&*result) + .map_err(|e| { + vm.new_type_error(format!( + "Can't cast result into vector, result: {:?}, err: {:?}", + result, e + )) + })? + .into()) + } + + #[pymethod(name = "__radd__")] + #[pymethod(magic)] + fn add(&self, other: PyObjectRef, vm: &VirtualMachine) -> PyResult<PyVector> { + if is_pyobj_scalar(&other, vm) { + self.scalar_arith_op(other, None, wrap_result(arithmetics::add_scalar), vm) + } else { + self.arith_op(other, None, arithmetics::add, vm) + } + } + + #[pymethod(magic)] + fn sub(&self, other: PyObjectRef, vm: &VirtualMachine) -> PyResult<PyVector> { + if is_pyobj_scalar(&other, vm) { + self.scalar_arith_op(other, None, wrap_result(arithmetics::sub_scalar), vm) + } else { + self.arith_op(other, None, arithmetics::sub, vm) + } + } + + #[pymethod(magic)] + fn rsub(&self, other: PyObjectRef, vm: &VirtualMachine) -> PyResult<PyVector> { + if is_pyobj_scalar(&other, vm) { + self.scalar_arith_op(other, None, arrow2_rsub_scalar, vm) + } else { + self.arith_op(other, None, |a, b| arithmetics::sub(b, a), vm) + } + } + + #[pymethod(name = "__rmul__")] + #[pymethod(magic)] + fn mul(&self, other: PyObjectRef, vm: &VirtualMachine) -> PyResult<PyVector> { + if is_pyobj_scalar(&other, vm) { + self.scalar_arith_op(other, None, wrap_result(arithmetics::mul_scalar), vm) + } else { + self.arith_op(other, None, arithmetics::mul, vm) + } + } + + #[pymethod(magic)] + fn truediv(&self, other: PyObjectRef, vm: &VirtualMachine) -> PyResult<PyVector> { + if is_pyobj_scalar(&other, vm) { + self.scalar_arith_op( + other, + Some(DataType::Float64), + wrap_result(arithmetics::div_scalar), + vm, + ) + } else { + self.arith_op(other, Some(DataType::Float64), arithmetics::div, vm) + } + } + + #[pymethod(magic)] + fn rtruediv(&self, other: PyObjectRef, vm: &VirtualMachine) -> PyResult<PyVector> { + if is_pyobj_scalar(&other, vm) { + self.scalar_arith_op(other, Some(DataType::Float64), arrow2_rtruediv_scalar, vm) + } else { + self.arith_op( + other, + Some(DataType::Float64), + |a, b| arithmetics::div(b, a), + vm, + ) + } + } + + #[pymethod(magic)] + fn floordiv(&self, other: PyObjectRef, vm: &VirtualMachine) -> PyResult<PyVector> { + if is_pyobj_scalar(&other, vm) { + self.scalar_arith_op( + other, + Some(DataType::Int64), + wrap_result(arithmetics::div_scalar), + vm, + ) + } else { + self.arith_op(other, Some(DataType::Int64), arithmetics::div, vm) + } + } + + #[pymethod(magic)] + fn rfloordiv(&self, other: PyObjectRef, vm: &VirtualMachine) -> PyResult<PyVector> { + if is_pyobj_scalar(&other, vm) { + // FIXME: DataType convert problem, target_type should be infered? + self.scalar_arith_op(other, Some(DataType::Int64), arrow2_rfloordiv_scalar, vm) + } else { + self.arith_op( + other, + Some(DataType::Int64), + |a, b| arithmetics::div(b, a), + vm, + ) + } + } + + /// rich compare, return a boolean array, accept type are vec and vec and vec and number + fn richcompare( + &self, + other: PyObjectRef, + op: PyComparisonOp, + vm: &VirtualMachine, + ) -> PyResult<PyVector> { + if is_pyobj_scalar(&other, vm) { + let scalar_op = get_arrow_scalar_op(op); + self.scalar_arith_op(other, None, scalar_op, vm) + } else { + let arr_op = get_arrow_op(op); + self.arith_op(other, None, arr_op, vm) + } + } + + // it seems rustpython's richcompare support is not good + // The Comparable Trait only support normal cmp + // (yes there is a slot_richcompare function, but it is not used in anywhere) + // so use our own function + // TODO(discord9): test those funciton + + #[pymethod(name = "eq")] + #[pymethod(magic)] + fn eq(&self, other: PyObjectRef, vm: &VirtualMachine) -> PyResult<PyVector> { + self.richcompare(other, PyComparisonOp::Eq, vm) + } + + #[pymethod(name = "ne")] + #[pymethod(magic)] + fn ne(&self, other: PyObjectRef, vm: &VirtualMachine) -> PyResult<PyVector> { + self.richcompare(other, PyComparisonOp::Ne, vm) + } + + #[pymethod(name = "gt")] + #[pymethod(magic)] + fn gt(&self, other: PyObjectRef, vm: &VirtualMachine) -> PyResult<PyVector> { + self.richcompare(other, PyComparisonOp::Gt, vm) + } + + #[pymethod(name = "lt")] + #[pymethod(magic)] + fn lt(&self, other: PyObjectRef, vm: &VirtualMachine) -> PyResult<PyVector> { + self.richcompare(other, PyComparisonOp::Lt, vm) + } + + #[pymethod(name = "ge")] + #[pymethod(magic)] + fn ge(&self, other: PyObjectRef, vm: &VirtualMachine) -> PyResult<PyVector> { + self.richcompare(other, PyComparisonOp::Ge, vm) + } + + #[pymethod(name = "le")] + #[pymethod(magic)] + fn le(&self, other: PyObjectRef, vm: &VirtualMachine) -> PyResult<PyVector> { + self.richcompare(other, PyComparisonOp::Le, vm) + } + + #[pymethod(magic)] + fn and(&self, other: PyVectorRef, vm: &VirtualMachine) -> PyResult<PyVector> { + let left = self.to_arrow_array(); + let left = left + .as_any() + .downcast_ref::<BooleanArray>() + .ok_or_else(|| vm.new_type_error(format!("Can't cast {left:#?} as a Boolean Array")))?; + let right = other.to_arrow_array(); + let right = right + .as_any() + .downcast_ref::<BooleanArray>() + .ok_or_else(|| vm.new_type_error(format!("Can't cast {left:#?} as a Boolean Array")))?; + let res = compute::boolean::and(left, right).map_err(|err| from_debug_error(err, vm))?; + let res = Arc::new(res) as ArrayRef; + let ret = Helper::try_into_vector(&*res).map_err(|err| from_debug_error(err, vm))?; + Ok(ret.into()) + } + + #[pymethod(magic)] + fn or(&self, other: PyVectorRef, vm: &VirtualMachine) -> PyResult<PyVector> { + let left = self.to_arrow_array(); + let left = left + .as_any() + .downcast_ref::<BooleanArray>() + .ok_or_else(|| vm.new_type_error(format!("Can't cast {left:#?} as a Boolean Array")))?; + let right = other.to_arrow_array(); + let right = right + .as_any() + .downcast_ref::<BooleanArray>() + .ok_or_else(|| vm.new_type_error(format!("Can't cast {left:#?} as a Boolean Array")))?; + let res = compute::boolean::or(left, right).map_err(|err| from_debug_error(err, vm))?; + let res = Arc::new(res) as ArrayRef; + let ret = Helper::try_into_vector(&*res).map_err(|err| from_debug_error(err, vm))?; + Ok(ret.into()) + } + + #[pymethod(magic)] + fn invert(&self, vm: &VirtualMachine) -> PyResult<PyVector> { + dbg!(); + let left = self.to_arrow_array(); + let left = left + .as_any() + .downcast_ref::<BooleanArray>() + .ok_or_else(|| vm.new_type_error(format!("Can't cast {left:#?} as a Boolean Array")))?; + let res = compute::boolean::not(left); + let res = Arc::new(res) as ArrayRef; + let ret = Helper::try_into_vector(&*res).map_err(|err| from_debug_error(err, vm))?; + Ok(ret.into()) + } + + #[pymethod(magic)] + fn len(&self) -> usize { + self.as_vector_ref().len() + } + + /// take a boolean array and filters the Array, returning elements matching the filter (i.e. where the values are true). + #[pymethod(name = "filter")] + fn filter(&self, other: PyVectorRef, vm: &VirtualMachine) -> PyResult<PyVector> { + let left = self.to_arrow_array(); + let right: ArrayRef = other.to_arrow_array(); + let filter = right.as_any().downcast_ref::<BooleanArray>(); + match filter { + Some(filter) => { + let res = compute::filter::filter(left.as_ref(), filter); + + let res = + res.map_err(|err| vm.new_runtime_error(format!("Arrow Error: {err:#?}")))?; + let ret = Helper::try_into_vector(&*res).map_err(|e| { + vm.new_type_error(format!( + "Can't cast result into vector, result: {:?}, err: {:?}", + res, e + )) + })?; + Ok(ret.into()) + } + None => Err(vm.new_runtime_error(format!( + "Can't cast operand into a Boolean Array, which is {right:#?}" + ))), + } + } + + #[pymethod(magic)] + fn doc(&self) -> PyResult<PyStr> { + Ok(PyStr::from( + "PyVector is like a Python array, a compact array of elem of same datatype, but Readonly for now", + )) + } + + fn _getitem(&self, needle: &PyObject, vm: &VirtualMachine) -> PyResult<PyObjectRef> { + if let Some(seq) = needle.payload::<PyVector>() { + let mask = seq.to_arrow_array(); + let mask = mask + .as_any() + .downcast_ref::<BooleanArray>() + .ok_or_else(|| { + vm.new_type_error(format!("Can't cast {seq:#?} as a Boolean Array")) + })?; + // let left = self.to_arrow_array(); + let res = compute::filter::filter(self.to_arrow_array().as_ref(), mask) + .map_err(|err| vm.new_runtime_error(format!("Arrow Error: {err:#?}")))?; + let ret = Helper::try_into_vector(&*res).map_err(|e| { + vm.new_type_error(format!( + "Can't cast result into vector, result: {:?}, err: {:?}", + res, e + )) + })?; + Ok(Self::from(ret).into_pyobject(vm)) + } else { + match SequenceIndex::try_from_borrowed_object(vm, needle, "vector")? { + SequenceIndex::Int(i) => self.getitem_by_index(i, vm), + SequenceIndex::Slice(slice) => self.getitem_by_slice(&slice, vm), + } + } + } + + fn getitem_by_index(&self, i: isize, vm: &VirtualMachine) -> PyResult<PyObjectRef> { + // in the newest version of rustpython_vm, wrapped_at for isize is replace by wrap_index(i, len) + let i = i + .wrapped_at(self.len()) + .ok_or_else(|| vm.new_index_error("PyVector index out of range".to_owned()))?; + Ok(val_to_pyobj(self.as_vector_ref().get(i), vm)) + } + + /// Return a `PyVector` in `PyObjectRef` + fn getitem_by_slice( + &self, + slice: &SaturatedSlice, + vm: &VirtualMachine, + ) -> PyResult<PyObjectRef> { + // adjust_indices so negative number is transform to usize + let (mut range, step, slice_len) = slice.adjust_indices(self.len()); + let vector = self.as_vector_ref(); + + let mut buf = VectorBuilder::with_capacity(vector.data_type(), slice_len); + if slice_len == 0 { + let v: PyVector = buf.finish().into(); + Ok(v.into_pyobject(vm)) + } else if step == 1 { + let v: PyVector = vector.slice(range.next().unwrap_or(0), slice_len).into(); + Ok(v.into_pyobject(vm)) + } else if step.is_negative() { + // Negative step require special treatment + for i in range.rev().step_by(step.unsigned_abs()) { + buf.push(&vector.get(i)) + } + let v: PyVector = buf.finish().into(); + Ok(v.into_pyobject(vm)) + } else { + for i in range.step_by(step.unsigned_abs()) { + buf.push(&vector.get(i)) + } + let v: PyVector = buf.finish().into(); + Ok(v.into_pyobject(vm)) + } + } + + /// Unsupport + /// TODO(discord9): make it work + #[allow(unused)] + fn setitem_by_index( + zelf: PyRef<Self>, + i: isize, + value: PyObjectRef, + vm: &VirtualMachine, + ) -> PyResult<()> { + Err(vm.new_not_implemented_error("setitem_by_index unimplemented".to_string())) + } +} + +/// get corrsponding arrow op function according to given PyComaprsionOp +/// +/// TODO(discord9): impl scalar version function +fn get_arrow_op(op: PyComparisonOp) -> impl Fn(&dyn Array, &dyn Array) -> Box<dyn Array> { + let op_bool_arr = match op { + PyComparisonOp::Eq => comparison::eq, + PyComparisonOp::Ne => comparison::neq, + PyComparisonOp::Gt => comparison::gt, + PyComparisonOp::Lt => comparison::lt, + PyComparisonOp::Ge => comparison::gt_eq, + PyComparisonOp::Le => comparison::lt_eq, + }; + + move |a: &dyn Array, b: &dyn Array| -> Box<dyn Array> { + let ret = op_bool_arr(a, b); + Box::new(ret) as _ + } +} + +/// get corrsponding arrow scalar op function according to given PyComaprsionOp +/// +/// TODO(discord9): impl scalar version function +fn get_arrow_scalar_op( + op: PyComparisonOp, +) -> impl Fn(&dyn Array, &dyn Scalar, &VirtualMachine) -> PyResult<Box<dyn Array>> { + let op_bool_arr = match op { + PyComparisonOp::Eq => comparison::eq_scalar, + PyComparisonOp::Ne => comparison::neq_scalar, + PyComparisonOp::Gt => comparison::gt_scalar, + PyComparisonOp::Lt => comparison::lt_scalar, + PyComparisonOp::Ge => comparison::gt_eq_scalar, + PyComparisonOp::Le => comparison::lt_eq_scalar, + }; + + move |a: &dyn Array, b: &dyn Scalar, _vm| -> PyResult<Box<dyn Array>> { + let ret = op_bool_arr(a, b); + Ok(Box::new(ret) as _) + } +} + +/// if this pyobj can be cast to a scalar value(i.e Null/Int/Float/Bool) +#[inline] +fn is_pyobj_scalar(obj: &PyObjectRef, vm: &VirtualMachine) -> bool { + //let is_instance = |ty: &PyObject| obj.is_instance(ty, vm).unwrap_or(false); + is_instance::<PyNone>(obj, vm) + || is_instance::<PyInt>(obj, vm) + || is_instance::<PyFloat>(obj, vm) + || is_instance::<PyBool>(obj, vm) +} + +/// convert a `PyObjectRef` into a `datatypes::Value`(is that ok?) +/// if `obj` can be convert to given ConcreteDataType then return inner `Value` else return None +/// if dtype is None, return types with highest precision +/// Not used for now but may be use in future +pub fn pyobj_try_to_typed_val( + obj: PyObjectRef, + vm: &VirtualMachine, + dtype: Option<ConcreteDataType>, +) -> Option<value::Value> { + if let Some(dtype) = dtype { + match dtype { + ConcreteDataType::Null(_) => { + if is_instance::<PyNone>(&obj, vm) { + Some(value::Value::Null) + } else { + None + } + } + ConcreteDataType::Boolean(_) => { + if is_instance::<PyBool>(&obj, vm) || is_instance::<PyInt>(&obj, vm) { + Some(value::Value::Boolean( + obj.try_into_value::<bool>(vm).unwrap_or(false), + )) + } else { + None + } + } + ConcreteDataType::Int8(_) + | ConcreteDataType::Int16(_) + | ConcreteDataType::Int32(_) + | ConcreteDataType::Int64(_) => { + if is_instance::<PyInt>(&obj, vm) { + match dtype { + ConcreteDataType::Int8(_) => { + obj.try_into_value::<i8>(vm).ok().map(value::Value::Int8) + } + ConcreteDataType::Int16(_) => { + obj.try_into_value::<i16>(vm).ok().map(value::Value::Int16) + } + ConcreteDataType::Int32(_) => { + obj.try_into_value::<i32>(vm).ok().map(value::Value::Int32) + } + ConcreteDataType::Int64(_) => { + obj.try_into_value::<i64>(vm).ok().map(value::Value::Int64) + } + _ => unreachable!(), + } + } else { + None + } + } + ConcreteDataType::UInt8(_) + | ConcreteDataType::UInt16(_) + | ConcreteDataType::UInt32(_) + | ConcreteDataType::UInt64(_) => { + if is_instance::<PyInt>(&obj, vm) + && obj.clone().try_into_value::<i64>(vm).unwrap_or(-1) >= 0 + { + match dtype { + ConcreteDataType::UInt8(_) => { + obj.try_into_value::<u8>(vm).ok().map(value::Value::UInt8) + } + ConcreteDataType::UInt16(_) => { + obj.try_into_value::<u16>(vm).ok().map(value::Value::UInt16) + } + ConcreteDataType::UInt32(_) => { + obj.try_into_value::<u32>(vm).ok().map(value::Value::UInt32) + } + ConcreteDataType::UInt64(_) => { + obj.try_into_value::<u64>(vm).ok().map(value::Value::UInt64) + } + _ => unreachable!(), + } + } else { + None + } + } + ConcreteDataType::Float32(_) | ConcreteDataType::Float64(_) => { + if is_instance::<PyFloat>(&obj, vm) { + match dtype { + ConcreteDataType::Float32(_) => obj + .try_into_value::<f32>(vm) + .ok() + .map(|v| value::Value::Float32(OrderedFloat(v))), + ConcreteDataType::Float64(_) => obj + .try_into_value::<f64>(vm) + .ok() + .map(|v| value::Value::Float64(OrderedFloat(v))), + _ => unreachable!(), + } + } else { + None + } + } + + ConcreteDataType::String(_) => { + if is_instance::<PyStr>(&obj, vm) { + obj.try_into_value::<String>(vm) + .ok() + .map(|v| value::Value::String(v.into())) + } else { + None + } + } + ConcreteDataType::Binary(_) => { + if is_instance::<PyBytes>(&obj, vm) { + obj.try_into_value::<Vec<u8>>(vm).ok().and_then(|v| { + String::from_utf8(v) + .ok() + .map(|v| value::Value::String(v.into())) + }) + } else { + None + } + } + ConcreteDataType::List(_) => unreachable!(), + ConcreteDataType::Date(_) | ConcreteDataType::DateTime(_) => todo!(), + } + } else if is_instance::<PyNone>(&obj, vm) { + // if Untyped then by default return types with highest precision + Some(value::Value::Null) + } else if is_instance::<PyBool>(&obj, vm) { + Some(value::Value::Boolean( + obj.try_into_value::<bool>(vm).unwrap_or(false), + )) + } else if is_instance::<PyInt>(&obj, vm) { + obj.try_into_value::<i64>(vm).ok().map(value::Value::Int64) + } else if is_instance::<PyFloat>(&obj, vm) { + obj.try_into_value::<f64>(vm) + .ok() + .map(|v| value::Value::Float64(OrderedFloat(v))) + } else if is_instance::<PyStr>(&obj, vm) { + obj.try_into_value::<Vec<u8>>(vm).ok().and_then(|v| { + String::from_utf8(v) + .ok() + .map(|v| value::Value::String(v.into())) + }) + } else if is_instance::<PyBytes>(&obj, vm) { + obj.try_into_value::<Vec<u8>>(vm).ok().and_then(|v| { + String::from_utf8(v) + .ok() + .map(|v| value::Value::String(v.into())) + }) + } else { + None + } +} + +/// convert a DataType `Value` into a `PyObjectRef` +pub fn val_to_pyobj(val: value::Value, vm: &VirtualMachine) -> PyObjectRef { + match val { + // This comes from:https://github.com/RustPython/RustPython/blob/8ab4e770351d451cfdff5dc2bf8cce8df76a60ab/vm/src/builtins/singletons.rs#L37 + // None in Python is universally singleton so + // use `vm.ctx.new_int` and `new_***` is more idomtic for there are cerntain optimize can be use in this way(small int pool etc.) + value::Value::Null => vm.ctx.none(), + value::Value::Boolean(v) => vm.ctx.new_bool(v).into(), + value::Value::UInt8(v) => vm.ctx.new_int(v).into(), + value::Value::UInt16(v) => vm.ctx.new_int(v).into(), + value::Value::UInt32(v) => vm.ctx.new_int(v).into(), + value::Value::UInt64(v) => vm.ctx.new_int(v).into(), + value::Value::Int8(v) => vm.ctx.new_int(v).into(), + value::Value::Int16(v) => vm.ctx.new_int(v).into(), + value::Value::Int32(v) => vm.ctx.new_int(v).into(), + value::Value::Int64(v) => vm.ctx.new_int(v).into(), + value::Value::Float32(v) => vm.ctx.new_float(v.0 as f64).into(), + value::Value::Float64(v) => vm.ctx.new_float(v.0).into(), + value::Value::String(s) => vm.ctx.new_str(s.as_utf8()).into(), + // is this copy necessary? + value::Value::Binary(b) => vm.ctx.new_bytes(b.deref().to_vec()).into(), + // is `Date` and `DateTime` supported yet? For now just ad hoc into PyInt + value::Value::Date(v) => vm.ctx.new_int(v.val()).into(), + value::Value::DateTime(v) => vm.ctx.new_int(v.val()).into(), + value::Value::List(_) => unreachable!(), + } +} + +impl Default for PyVector { + fn default() -> PyVector { + PyVector { + vector: Arc::new(NullVector::new(0)), + } + } +} + +fn get_concrete_type(obj: &PyObjectRef, vm: &VirtualMachine) -> PyResult<ConcreteDataType> { + if is_instance::<PyNone>(obj, vm) { + Ok(ConcreteDataType::null_datatype()) + } else if is_instance::<PyBool>(obj, vm) { + Ok(ConcreteDataType::boolean_datatype()) + } else if is_instance::<PyInt>(obj, vm) { + Ok(ConcreteDataType::int64_datatype()) + } else if is_instance::<PyFloat>(obj, vm) { + Ok(ConcreteDataType::float64_datatype()) + } else if is_instance::<PyStr>(obj, vm) { + Ok(ConcreteDataType::string_datatype()) + } else { + Err(vm.new_type_error(format!("Unsupported pyobject type: {:?}", obj))) + } +} + +impl AsMapping for PyVector { + const AS_MAPPING: PyMappingMethods = PyMappingMethods { + length: Some(|mapping, _vm| Ok(Self::mapping_downcast(mapping).len())), + subscript: Some(|mapping, needle, vm| Self::mapping_downcast(mapping)._getitem(needle, vm)), + ass_subscript: None, + }; +} + +impl AsSequence for PyVector { + const AS_SEQUENCE: PySequenceMethods = PySequenceMethods { + length: Some(|seq, _vm| Ok(Self::sequence_downcast(seq).len())), + item: Some(|seq, i, vm| { + let zelf = Self::sequence_downcast(seq); + zelf.getitem_by_index(i, vm) + }), + ass_item: Some(|_seq, _i, _value, vm| { + Err(vm.new_type_error("PyVector object doesn't support item assigns".to_owned())) + }), + ..PySequenceMethods::NOT_IMPLEMENTED + }; +} + +impl Comparable for PyVector { + fn slot_richcompare( + zelf: &PyObject, + other: &PyObject, + op: PyComparisonOp, + vm: &VirtualMachine, + ) -> PyResult<Either<PyObjectRef, PyComparisonValue>> { + // TODO(discord9): return a boolean array of compare result + if let Some(zelf) = zelf.downcast_ref::<Self>() { + let ret: PyVector = zelf.richcompare(other.to_owned(), op, vm)?; + let ret = ret.into_pyobject(vm); + Ok(Either::A(ret)) + } else { + Err(vm.new_type_error(format!( + "unexpected payload {} for {}", + zelf, + op.method_name(&vm.ctx).as_str() + ))) + } + } + fn cmp( + _zelf: &rustpython_vm::Py<Self>, + _other: &PyObject, + _op: PyComparisonOp, + _vm: &VirtualMachine, + ) -> PyResult<PyComparisonValue> { + Ok(PyComparisonValue::NotImplemented) + } +} +#[cfg(test)] +pub mod tests { + + use std::sync::Arc; + + use datatypes::vectors::{Float32Vector, Int32Vector, NullVector}; + use rustpython_vm::{builtins::PyList, class::PyClassImpl, protocol::PySequence}; + use value::Value; + + use super::*; + + type PredicateFn = Option<fn(PyResult<PyObjectRef>, &VirtualMachine) -> bool>; + /// test the paired `val_to_obj` and `pyobj_to_val` func + #[test] + fn test_val2pyobj2val() { + rustpython_vm::Interpreter::without_stdlib(Default::default()).enter(|vm| { + let i = value::Value::Float32(OrderedFloat(2.0)); + let j = value::Value::Int32(1); + let dtype = i.data_type(); + let obj = val_to_pyobj(i, vm); + assert!(is_pyobj_scalar(&obj, vm)); + let obj_1 = obj.clone(); + let obj_2 = obj.clone(); + let ri = pyobj_try_to_typed_val(obj, vm, Some(dtype)); + let rj = pyobj_try_to_typed_val(obj_1, vm, Some(j.data_type())); + let rn = pyobj_try_to_typed_val(obj_2, vm, None); + assert_eq!(rj, None); + assert_eq!(rn, Some(value::Value::Float64(OrderedFloat(2.0)))); + assert_eq!(ri, Some(value::Value::Float32(OrderedFloat(2.0)))); + let typed_lst = { + [ + Value::Null, + Value::Boolean(true), + Value::Boolean(false), + // PyInt is Big Int + Value::Int16(2), + Value::Int32(2), + Value::Int64(2), + Value::UInt16(2), + Value::UInt32(2), + Value::UInt64(2), + Value::Float32(OrderedFloat(2.0)), + Value::Float64(OrderedFloat(2.0)), + Value::String("123".into()), + // TODO(discord9): test Bytes and Date/DateTime + ] + }; + for val in typed_lst { + let obj = val_to_pyobj(val.clone(), vm); + let ret = pyobj_try_to_typed_val(obj, vm, Some(val.data_type())); + assert_eq!(ret, Some(val)); + } + }) + } + + #[test] + fn test_getitem_by_index_in_vm() { + rustpython_vm::Interpreter::without_stdlib(Default::default()).enter(|vm| { + PyVector::make_class(&vm.ctx); + let a: VectorRef = Arc::new(Int32Vector::from_vec(vec![1, 2, 3, 4])); + let a = PyVector::from(a); + assert_eq!( + 1, + a.getitem_by_index(0, vm) + .map(|v| v.try_into_value::<i32>(vm).unwrap_or(0)) + .unwrap_or(0) + ); + assert!(a.getitem_by_index(4, vm).ok().is_none()); + assert_eq!( + 4, + a.getitem_by_index(-1, vm) + .map(|v| v.try_into_value::<i32>(vm).unwrap_or(0)) + .unwrap_or(0) + ); + assert!(a.getitem_by_index(-5, vm).ok().is_none()); + + let a: VectorRef = Arc::new(NullVector::new(42)); + let a = PyVector::from(a); + let a = a.into_pyobject(vm); + assert!(PySequence::find_methods(&a, vm).is_some()); + assert!(PySequence::new(&a, vm).is_some()); + }) + } + + pub fn execute_script( + script: &str, + test_vec: Option<PyVector>, + predicate: PredicateFn, + ) -> Result<(PyObjectRef, Option<bool>), PyRef<rustpython_vm::builtins::PyBaseException>> { + let mut pred_res = None; + rustpython_vm::Interpreter::without_stdlib(Default::default()) + .enter(|vm| { + PyVector::make_class(&vm.ctx); + let scope = vm.new_scope_with_builtins(); + let a: VectorRef = Arc::new(Int32Vector::from_vec(vec![1, 2, 3, 4])); + let a = PyVector::from(a); + let b: VectorRef = Arc::new(Float32Vector::from_vec(vec![1.2, 2.0, 3.4, 4.5])); + let b = PyVector::from(b); + scope + .locals + .as_object() + .set_item("a", vm.new_pyobj(a), vm) + .expect("failed"); + scope + .locals + .as_object() + .set_item("b", vm.new_pyobj(b), vm) + .expect("failed"); + + if let Some(v) = test_vec { + scope + .locals + .as_object() + .set_item("test_vec", vm.new_pyobj(v), vm) + .expect("failed"); + } + + let code_obj = vm + .compile( + script, + rustpython_vm::compile::Mode::BlockExpr, + "<embedded>".to_owned(), + ) + .map_err(|err| vm.new_syntax_error(&err))?; + let ret = vm.run_code_obj(code_obj, scope); + pred_res = predicate.map(|f| f(ret.clone(), vm)); + ret + }) + .map(|r| (r, pred_res)) + } + + #[test] + #[allow(clippy::print_stdout)] + // for debug purpose, also this is already a test function so allow print_stdout shouldn't be a problem? + fn test_execute_script() { + fn is_eq<T: std::cmp::PartialEq + rustpython_vm::TryFromObject>( + v: PyResult, + i: T, + vm: &VirtualMachine, + ) -> bool { + v.and_then(|v| v.try_into_value::<T>(vm)) + .map(|v| v == i) + .unwrap_or(false) + } + + let snippet: Vec<(&str, PredicateFn)> = vec![ + ("1", Some(|v, vm| is_eq(v, 1i32, vm))), + ("len(a)", Some(|v, vm| is_eq(v, 4i32, vm))), + ("a[-1]", Some(|v, vm| is_eq(v, 4i32, vm))), + ("a[0]*5", Some(|v, vm| is_eq(v, 5i32, vm))), + ( + "list(a)", + Some(|v, vm| { + v.map_or(false, |obj| { + obj.is_instance(PyList::class(vm).into(), vm) + .unwrap_or(false) + }) + }), + ), + ( + "len(a[1:-1])#elem in [1,3)", + Some(|v, vm| is_eq(v, 2i64, vm)), + ), + ("(a+1)[0]", Some(|v, vm| is_eq(v, 2i32, vm))), + ("(a-1)[0]", Some(|v, vm| is_eq(v, 0i32, vm))), + ("(a*2)[0]", Some(|v, vm| is_eq(v, 2i64, vm))), + ("(a/2.0)[2]", Some(|v, vm| is_eq(v, 1.5f64, vm))), + ("(a/2)[2]", Some(|v, vm| is_eq(v, 1.5f64, vm))), + ("(a//2)[2]", Some(|v, vm| is_eq(v, 1i32, vm))), + ("(2-a)[0]", Some(|v, vm| is_eq(v, 1i32, vm))), + ("(3/a)[1]", Some(|v, vm| is_eq(v, 1.5, vm))), + ("(3//a)[1]", Some(|v, vm| is_eq(v, 1, vm))), + ("(3/a)[2]", Some(|v, vm| is_eq(v, 1.0, vm))), + ( + "(a+1)[0] + (a-1)[0] * (a/2.0)[2]", + Some(|v, vm| is_eq(v, 2.0, vm)), + ), + ]; + for (code, pred) in snippet { + let result = execute_script(code, None, pred); + + println!( + "\u{001B}[35m{code}\u{001B}[0m: {:?}{}", + result.clone().map(|v| v.0), + result + .clone() + .map(|v| if let Some(v) = v.1 { + if v { + "\u{001B}[32m...[ok]\u{001B}[0m".to_string() + } else { + "\u{001B}[31m...[failed]\u{001B}[0m".to_string() + } + } else { + "\u{001B}[36m...[unapplicable]\u{001B}[0m".to_string() + }) + .unwrap() + ); + + if let Ok(p) = result { + if let Some(v) = p.1 { + if !v { + panic!("{code}: {:?}\u{001B}[12m...[failed]\u{001B}[0m", p.0) + } + } + } else { + panic!("{code}: {:?}", result) + } + } + } +} diff --git a/src/servers/Cargo.toml b/src/servers/Cargo.toml index cfd5904e5173..ffd72b51de18 100644 --- a/src/servers/Cargo.toml +++ b/src/servers/Cargo.toml @@ -34,3 +34,4 @@ catalog = { path = "../catalog" } mysql_async = "0.30" rand = "0.8" test-util = { path = "../../test-util" } +script = { path = "../script", features = ["python"] } diff --git a/src/servers/src/error.rs b/src/servers/src/error.rs index 22df75682c44..97d31ce035a4 100644 --- a/src/servers/src/error.rs +++ b/src/servers/src/error.rs @@ -49,6 +49,13 @@ pub enum Error { source: BoxedError, }, + #[snafu(display("Failed to execute script: {}, source: {}", script, source))] + ExecuteScript { + script: String, + #[snafu(backtrace)] + source: BoxedError, + }, + #[snafu(display("Not supported: {}", feat))] NotSupported { feat: String }, } @@ -66,7 +73,11 @@ impl ErrorExt for Error { | Error::StartHttp { .. } | Error::StartGrpc { .. } | Error::TcpBind { .. } => StatusCode::Internal, - Error::ExecuteQuery { source, .. } => source.status_code(), + + Error::ExecuteScript { source, .. } | Error::ExecuteQuery { source, .. } => { + source.status_code() + } + Error::NotSupported { .. } => StatusCode::InvalidArguments, } } diff --git a/src/servers/src/http.rs b/src/servers/src/http.rs index d9f5101cc692..81e5483657ba 100644 --- a/src/servers/src/http.rs +++ b/src/servers/src/http.rs @@ -8,8 +8,7 @@ use axum::{ error_handling::HandleErrorLayer, response::IntoResponse, response::{Json, Response}, - routing::get, - BoxError, Extension, Router, + routing, BoxError, Extension, Router, }; use common_recordbatch::{util, RecordBatch}; use common_telemetry::logging::info; @@ -23,6 +22,8 @@ use crate::error::{Result, StartHttpSnafu}; use crate::query_handler::SqlQueryHandlerRef; use crate::server::Server; +const HTTP_API_VERSION: &str = "v1"; + pub struct HttpServer { query_handler: SqlQueryHandlerRef, } @@ -116,9 +117,14 @@ impl HttpServer { pub fn make_app(&self) -> Router { Router::new() - // handlers - .route("/sql", get(handler::sql)) - .route("/metrics", get(handler::metrics)) + .nest( + &format!("/{}", HTTP_API_VERSION), + Router::new() + // handlers + .route("/sql", routing::get(handler::sql)) + .route("/scripts", routing::post(handler::scripts)), + ) + .route("/metrics", routing::get(handler::metrics)) // middlewares .layer( ServiceBuilder::new() diff --git a/src/servers/src/http/handler.rs b/src/servers/src/http/handler.rs index 1ecbaeb51176..15134690c947 100644 --- a/src/servers/src/http/handler.rs +++ b/src/servers/src/http/handler.rs @@ -1,7 +1,8 @@ use std::collections::HashMap; -use axum::extract::{Extension, Query}; +use axum::extract::{Extension, Json, Query}; use common_telemetry::metric; +use serde::{Deserialize, Serialize}; use crate::http::{HttpResponse, JsonResponse}; use crate::query_handler::SqlQueryHandlerRef; @@ -33,3 +34,23 @@ pub async fn metrics( HttpResponse::Text("Prometheus handle not initialized.".to_string()) } } + +#[derive(Deserialize, Serialize)] +pub struct ScriptExecution { + pub script: String, +} + +/// Handler to execute scripts +#[axum_macros::debug_handler] +pub async fn scripts( + Extension(query_handler): Extension<SqlQueryHandlerRef>, + Json(payload): Json<ScriptExecution>, +) -> HttpResponse { + if payload.script.is_empty() { + return HttpResponse::Json(JsonResponse::with_error(Some("Invalid script".to_string()))); + } + + HttpResponse::Json( + JsonResponse::from_output(query_handler.execute_script(&payload.script).await).await, + ) +} diff --git a/src/servers/src/query_handler.rs b/src/servers/src/query_handler.rs index 0c469c37b669..d42862dc825b 100644 --- a/src/servers/src/query_handler.rs +++ b/src/servers/src/query_handler.rs @@ -22,6 +22,7 @@ pub type GrpcQueryHandlerRef = Arc<dyn GrpcQueryHandler + Send + Sync>; #[async_trait] pub trait SqlQueryHandler { async fn do_query(&self, query: &str) -> Result<Output>; + async fn execute_script(&self, script: &str) -> Result<Output>; } #[async_trait] diff --git a/src/servers/tests/http/http_handler_test.rs b/src/servers/tests/http/http_handler_test.rs index 22c30b81fb1b..5ba52ad2e5a0 100644 --- a/src/servers/tests/http/http_handler_test.rs +++ b/src/servers/tests/http/http_handler_test.rs @@ -1,10 +1,11 @@ use std::collections::HashMap; -use axum::extract::Query; +use axum::extract::{Json, Query}; use axum::Extension; use common_telemetry::metric; use metrics::counter; use servers::http::handler as http_handler; +use servers::http::handler::ScriptExecution; use servers::http::{HttpResponse, JsonOutput}; use test_util::MemTable; @@ -70,6 +71,41 @@ async fn test_metrics() { } } +#[tokio::test] +async fn test_scripts() { + common_telemetry::init_default_ut_logging(); + + let exec = create_script_payload(); + let query_handler = create_testing_sql_query_handler(MemTable::default_numbers_table()); + let extension = Extension(query_handler); + + let json = http_handler::scripts(extension, exec).await; + match json { + HttpResponse::Json(json) => { + assert!(json.success(), "{:?}", json); + assert!(json.error().is_none()); + match json.output().expect("assertion failed") { + JsonOutput::Rows(rows) => { + assert_eq!(1, rows.len()); + } + _ => unreachable!(), + } + } + _ => unreachable!(), + } +} + +fn create_script_payload() -> Json<ScriptExecution> { + Json(ScriptExecution { + script: r#" +@copr(sql='select uint32s as number from numbers', args=['number'], returns=['n']) +def test(n): + return n; +"# + .to_string(), + }) +} + fn create_query() -> Query<HashMap<String, String>> { Query(HashMap::from([( "sql".to_string(), diff --git a/src/servers/tests/mod.rs b/src/servers/tests/mod.rs index cca5e8662836..c4e42a53ead6 100644 --- a/src/servers/tests/mod.rs +++ b/src/servers/tests/mod.rs @@ -12,9 +12,23 @@ use test_util::MemTable; mod http; mod mysql; +use script::{ + engine::{CompileContext, EvalContext, Script, ScriptEngine}, + python::PyEngine, +}; struct DummyInstance { query_engine: QueryEngineRef, + py_engine: Arc<PyEngine>, +} + +impl DummyInstance { + fn new(query_engine: QueryEngineRef) -> Self { + Self { + py_engine: Arc::new(PyEngine::new(query_engine.clone())), + query_engine, + } + } } #[async_trait] @@ -23,6 +37,15 @@ impl SqlQueryHandler for DummyInstance { let plan = self.query_engine.sql_to_plan(query).unwrap(); Ok(self.query_engine.execute(&plan).await.unwrap()) } + async fn execute_script(&self, script: &str) -> Result<Output> { + let py_script = self + .py_engine + .compile(script, CompileContext::default()) + .await + .unwrap(); + + Ok(py_script.evaluate(EvalContext::default()).await.unwrap()) + } } fn create_testing_sql_query_handler(table: MemTable) -> SqlQueryHandlerRef { @@ -38,5 +61,5 @@ fn create_testing_sql_query_handler(table: MemTable) -> SqlQueryHandlerRef { let factory = QueryEngineFactory::new(catalog_list); let query_engine = factory.query_engine().clone(); - Arc::new(DummyInstance { query_engine }) + Arc::new(DummyInstance::new(query_engine)) } diff --git a/src/storage/src/region.rs b/src/storage/src/region.rs index 4f36bfa447a4..8ddbe00c079d 100644 --- a/src/storage/src/region.rs +++ b/src/storage/src/region.rs @@ -322,7 +322,7 @@ struct RegionInner<S: LogStore> { impl<S: LogStore> RegionInner<S> { #[inline] fn version_control(&self) -> &VersionControl { - self.shared.version_control.as_ref() + &self.shared.version_control } fn in_memory_metadata(&self) -> RegionMetaImpl {
feat
script engine and python impl (#219)
5b315c2d4052b2c0b8ad744df0e08d080d09fd5a
2024-03-20 02:06:51
WU Jingdi
feat: support multi params in promql range function macro (#3464)
false
diff --git a/src/common/macro/src/range_fn.rs b/src/common/macro/src/range_fn.rs index 622e21ef6c73..c907f1d0d165 100644 --- a/src/common/macro/src/range_fn.rs +++ b/src/common/macro/src/range_fn.rs @@ -56,6 +56,18 @@ pub(crate) fn process_range_fn(args: TokenStream, input: TokenStream) -> TokenSt } = &sig; let arg_types = ok!(extract_input_types(inputs)); + // with format like Float64Array + let array_types = arg_types + .iter() + .map(|ty| { + if let Type::Reference(TypeReference { elem, .. }) = ty { + elem.as_ref().clone() + } else { + ty.clone() + } + }) + .collect::<Vec<_>>(); + // build the struct and its impl block // only do this when `display_name` is specified if let Ok(display_name) = get_ident(&arg_map, "display_name", arg_span) { @@ -64,6 +76,8 @@ pub(crate) fn process_range_fn(args: TokenStream, input: TokenStream) -> TokenSt vis, ok!(get_ident(&arg_map, "name", arg_span)), display_name, + array_types, + ok!(get_ident(&arg_map, "ret", arg_span)), ); result.extend(struct_code); } @@ -90,6 +104,8 @@ fn build_struct( vis: Visibility, name: Ident, display_name_ident: Ident, + array_types: Vec<Type>, + return_array_type: Ident, ) -> TokenStream { let display_name = display_name_ident.to_string(); quote! { @@ -114,18 +130,12 @@ fn build_struct( } } - // TODO(ruihang): this should be parameterized - // time index column and value column fn input_type() -> Vec<DataType> { - vec![ - RangeArray::convert_data_type(DataType::Timestamp(TimeUnit::Millisecond, None)), - RangeArray::convert_data_type(DataType::Float64), - ] + vec![#( RangeArray::convert_data_type(#array_types::new_null(0).data_type().clone()), )*] } - // TODO(ruihang): this should be parameterized fn return_type() -> DataType { - DataType::Float64 + #return_array_type::new_null(0).data_type().clone() } } } @@ -160,6 +170,7 @@ fn build_calc_fn( .map(|name| Ident::new(&format!("{}_range_array", name), name.span())) .collect::<Vec<_>>(); let first_range_array_name = range_array_names.first().unwrap().clone(); + let first_param_name = param_names.first().unwrap().clone(); quote! { impl #name { @@ -168,13 +179,29 @@ fn build_calc_fn( #( let #range_array_names = RangeArray::try_new(extract_array(&input[#param_numbers])?.to_data().into())?; )* - // TODO(ruihang): add ensure!() + // check arrays len + { + let len_first = #first_range_array_name.len(); + #( + if len_first != #range_array_names.len() { + return Err(DataFusionError::Execution(format!("RangeArray have different lengths in PromQL function {}: array1={}, array2={}", #name::name(), len_first, #range_array_names.len()))); + } + )* + } let mut result_array = Vec::new(); for index in 0..#first_range_array_name.len(){ #( let #param_names = #range_array_names.get(index).unwrap().as_any().downcast_ref::<#unref_param_types>().unwrap().clone(); )* - // TODO(ruihang): add ensure!() to check length + // check element len + { + let len_first = #first_param_name.len(); + #( + if len_first != #param_names.len() { + return Err(DataFusionError::Execution(format!("RangeArray's element {} have different lengths in PromQL function {}: array1={}, array2={}", index, #name::name(), len_first, #param_names.len()))); + } + )* + } let result = #fn_name(#( &#param_names, )*); result_array.push(result); diff --git a/src/promql/src/functions/aggr_over_time.rs b/src/promql/src/functions/aggr_over_time.rs index f57b2d561256..e02e4a7d910b 100644 --- a/src/promql/src/functions/aggr_over_time.rs +++ b/src/promql/src/functions/aggr_over_time.rs @@ -16,7 +16,6 @@ use std::sync::Arc; use common_macro::range_fn; use datafusion::arrow::array::{Float64Array, TimestampMillisecondArray}; -use datafusion::arrow::datatypes::TimeUnit; use datafusion::common::DataFusionError; use datafusion::logical_expr::{ScalarUDF, Signature, TypeSignature, Volatility}; use datafusion::physical_plan::ColumnarValue; diff --git a/src/promql/src/functions/changes.rs b/src/promql/src/functions/changes.rs index a8b29c9cbdac..bb547e87f1ba 100644 --- a/src/promql/src/functions/changes.rs +++ b/src/promql/src/functions/changes.rs @@ -19,7 +19,6 @@ use std::sync::Arc; use common_macro::range_fn; use datafusion::arrow::array::{Float64Array, TimestampMillisecondArray}; -use datafusion::arrow::datatypes::TimeUnit; use datafusion::common::DataFusionError; use datafusion::logical_expr::{ScalarUDF, Signature, TypeSignature, Volatility}; use datafusion::physical_plan::ColumnarValue; diff --git a/src/promql/src/functions/deriv.rs b/src/promql/src/functions/deriv.rs index e573242e8286..462637ceb5aa 100644 --- a/src/promql/src/functions/deriv.rs +++ b/src/promql/src/functions/deriv.rs @@ -19,7 +19,6 @@ use std::sync::Arc; use common_macro::range_fn; use datafusion::arrow::array::{Float64Array, TimestampMillisecondArray}; -use datafusion::arrow::datatypes::TimeUnit; use datafusion::common::DataFusionError; use datafusion::logical_expr::{ScalarUDF, Signature, TypeSignature, Volatility}; use datafusion::physical_plan::ColumnarValue; diff --git a/src/promql/src/functions/resets.rs b/src/promql/src/functions/resets.rs index 218e1908738a..00dec32d019e 100644 --- a/src/promql/src/functions/resets.rs +++ b/src/promql/src/functions/resets.rs @@ -19,7 +19,6 @@ use std::sync::Arc; use common_macro::range_fn; use datafusion::arrow::array::{Float64Array, TimestampMillisecondArray}; -use datafusion::arrow::datatypes::TimeUnit; use datafusion::common::DataFusionError; use datafusion::logical_expr::{ScalarUDF, Signature, TypeSignature, Volatility}; use datafusion::physical_plan::ColumnarValue;
feat
support multi params in promql range function macro (#3464)
8a0054aa898f890df1cb92619a0b0a18caa03560
2023-10-13 13:46:26
Yingwen
fix: make nyc-taxi bench work again (#2599)
false
diff --git a/Cargo.lock b/Cargo.lock index 2730af59070d..95a0d66cc619 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -842,8 +842,10 @@ name = "benchmarks" version = "0.4.0" dependencies = [ "arrow", + "chrono", "clap 4.4.1", "client", + "futures-util", "indicatif", "itertools 0.10.5", "parquet", diff --git a/benchmarks/Cargo.toml b/benchmarks/Cargo.toml index 3bbe8dc86140..dca955714470 100644 --- a/benchmarks/Cargo.toml +++ b/benchmarks/Cargo.toml @@ -6,8 +6,10 @@ license.workspace = true [dependencies] arrow.workspace = true +chrono.workspace = true clap = { version = "4.0", features = ["derive"] } client = { workspace = true } +futures-util.workspace = true indicatif = "0.17.1" itertools.workspace = true parquet.workspace = true diff --git a/benchmarks/src/bin/nyc-taxi.rs b/benchmarks/src/bin/nyc-taxi.rs index 5f14c655512b..b30989625f73 100644 --- a/benchmarks/src/bin/nyc-taxi.rs +++ b/benchmarks/src/bin/nyc-taxi.rs @@ -29,14 +29,14 @@ use client::api::v1::column::Values; use client::api::v1::{ Column, ColumnDataType, ColumnDef, CreateTableExpr, InsertRequest, InsertRequests, SemanticType, }; -use client::{Client, Database, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME}; +use client::{Client, Database, Output, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME}; +use futures_util::TryStreamExt; use indicatif::{MultiProgress, ProgressBar, ProgressStyle}; use parquet::arrow::arrow_reader::ParquetRecordBatchReaderBuilder; use tokio::task::JoinSet; const CATALOG_NAME: &str = "greptime"; const SCHEMA_NAME: &str = "public"; -const TABLE_NAME: &str = "nyc_taxi"; #[derive(Parser)] #[command(name = "NYC benchmark runner")] @@ -74,7 +74,12 @@ fn get_file_list<P: AsRef<Path>>(path: P) -> Vec<PathBuf> { .collect() } +fn new_table_name() -> String { + format!("nyc_taxi_{}", chrono::Utc::now().timestamp()) +} + async fn write_data( + table_name: &str, batch_size: usize, db: &Database, path: PathBuf, @@ -104,7 +109,7 @@ async fn write_data( } let (columns, row_count) = convert_record_batch(record_batch); let request = InsertRequest { - table_name: TABLE_NAME.to_string(), + table_name: table_name.to_string(), columns, row_count, }; @@ -113,7 +118,7 @@ async fn write_data( }; let now = Instant::now(); - let _ = db.insert(requests).await.unwrap(); + db.insert(requests).await.unwrap(); let elapsed = now.elapsed(); total_rpc_elapsed_ms += elapsed.as_millis(); progress_bar.inc(row_count as _); @@ -131,6 +136,11 @@ fn convert_record_batch(record_batch: RecordBatch) -> (Vec<Column>, u32) { for (array, field) in record_batch.columns().iter().zip(fields.iter()) { let (values, datatype) = build_values(array); + let semantic_type = match field.name().as_str() { + "VendorID" => SemanticType::Tag, + "tpep_pickup_datetime" => SemanticType::Timestamp, + _ => SemanticType::Field, + }; let column = Column { column_name: field.name().clone(), @@ -141,8 +151,7 @@ fn convert_record_batch(record_batch: RecordBatch) -> (Vec<Column>, u32) { .map(|bitmap| bitmap.buffer().as_slice().to_vec()) .unwrap_or_default(), datatype: datatype.into(), - // datatype and semantic_type are set to default - ..Default::default() + semantic_type: semantic_type as i32, }; columns.push(column); } @@ -243,11 +252,11 @@ fn is_record_batch_full(batch: &RecordBatch) -> bool { batch.columns().iter().all(|col| col.null_count() == 0) } -fn create_table_expr() -> CreateTableExpr { +fn create_table_expr(table_name: &str) -> CreateTableExpr { CreateTableExpr { catalog_name: CATALOG_NAME.to_string(), schema_name: SCHEMA_NAME.to_string(), - table_name: TABLE_NAME.to_string(), + table_name: table_name.to_string(), desc: "".to_string(), column_defs: vec![ ColumnDef { @@ -261,7 +270,7 @@ fn create_table_expr() -> CreateTableExpr { ColumnDef { name: "tpep_pickup_datetime".to_string(), data_type: ColumnDataType::TimestampMicrosecond as i32, - is_nullable: true, + is_nullable: false, default_constraint: vec![], semantic_type: SemanticType::Timestamp as i32, comment: String::new(), @@ -405,31 +414,31 @@ fn create_table_expr() -> CreateTableExpr { ], time_index: "tpep_pickup_datetime".to_string(), primary_keys: vec!["VendorID".to_string()], - create_if_not_exists: false, + create_if_not_exists: true, table_options: Default::default(), table_id: None, engine: "mito".to_string(), } } -fn query_set() -> HashMap<String, String> { +fn query_set(table_name: &str) -> HashMap<String, String> { HashMap::from([ ( "count_all".to_string(), - format!("SELECT COUNT(*) FROM {TABLE_NAME};"), + format!("SELECT COUNT(*) FROM {table_name};"), ), ( "fare_amt_by_passenger".to_string(), - format!("SELECT passenger_count, MIN(fare_amount), MAX(fare_amount), SUM(fare_amount) FROM {TABLE_NAME} GROUP BY passenger_count"), + format!("SELECT passenger_count, MIN(fare_amount), MAX(fare_amount), SUM(fare_amount) FROM {table_name} GROUP BY passenger_count"), ) ]) } -async fn do_write(args: &Args, db: &Database) { +async fn do_write(args: &Args, db: &Database, table_name: &str) { let mut file_list = get_file_list(args.path.clone().expect("Specify data path in argument")); let mut write_jobs = JoinSet::new(); - let create_table_result = db.create(create_table_expr()).await; + let create_table_result = db.create(create_table_expr(table_name)).await; println!("Create table result: {create_table_result:?}"); let progress_bar_style = ProgressStyle::with_template( @@ -447,8 +456,10 @@ async fn do_write(args: &Args, db: &Database) { let db = db.clone(); let mpb = multi_progress_bar.clone(); let pb_style = progress_bar_style.clone(); - let _ = write_jobs - .spawn(async move { write_data(batch_size, &db, path, mpb, pb_style).await }); + let table_name = table_name.to_string(); + let _ = write_jobs.spawn(async move { + write_data(&table_name, batch_size, &db, path, mpb, pb_style).await + }); } } while write_jobs.join_next().await.is_some() { @@ -457,24 +468,32 @@ async fn do_write(args: &Args, db: &Database) { let db = db.clone(); let mpb = multi_progress_bar.clone(); let pb_style = progress_bar_style.clone(); - let _ = write_jobs - .spawn(async move { write_data(batch_size, &db, path, mpb, pb_style).await }); + let table_name = table_name.to_string(); + let _ = write_jobs.spawn(async move { + write_data(&table_name, batch_size, &db, path, mpb, pb_style).await + }); } } } -async fn do_query(num_iter: usize, db: &Database) { - for (query_name, query) in query_set() { +async fn do_query(num_iter: usize, db: &Database, table_name: &str) { + for (query_name, query) in query_set(table_name) { println!("Running query: {query}"); for i in 0..num_iter { let now = Instant::now(); - let _res = db.sql(&query).await.unwrap(); + let res = db.sql(&query).await.unwrap(); + match res { + Output::AffectedRows(_) | Output::RecordBatches(_) => (), + Output::Stream(stream) => { + stream.try_collect::<Vec<_>>().await.unwrap(); + } + } let elapsed = now.elapsed(); println!( "query {}, iteration {}: {}ms", query_name, i, - elapsed.as_millis() + elapsed.as_millis(), ); } } @@ -491,13 +510,14 @@ fn main() { .block_on(async { let client = Client::with_urls(vec![&args.endpoint]); let db = Database::new(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, client); + let table_name = new_table_name(); if !args.skip_write { - do_write(&args, &db).await; + do_write(&args, &db, &table_name).await; } if !args.skip_read { - do_query(args.iter_num, &db).await; + do_query(args.iter_num, &db, &table_name).await; } }) } diff --git a/src/client/src/lib.rs b/src/client/src/lib.rs index 23a67ebae1bd..7f8330f68902 100644 --- a/src/client/src/lib.rs +++ b/src/client/src/lib.rs @@ -26,6 +26,8 @@ use api::v1::greptime_response::Response; use api::v1::{AffectedRows, GreptimeResponse}; pub use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME}; use common_error::status_code::StatusCode; +pub use common_query::Output; +pub use common_recordbatch::{RecordBatches, SendableRecordBatchStream}; use snafu::OptionExt; pub use self::client::Client;
fix
make nyc-taxi bench work again (#2599)
f889ed548800d3b85b9b02d69e361491991f8164
2022-05-05 13:58:38
Lei, Huang
feat: INSERT statement and planner implementation (#16)
false
diff --git a/Cargo.lock b/Cargo.lock index 3f965e195c91..c24be666b5a3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -402,7 +402,7 @@ dependencies = [ "pin-project-lite", "rand", "smallvec", - "sqlparser 0.15.0", + "sqlparser", "tempfile", "tokio", "tokio-stream", @@ -416,7 +416,7 @@ dependencies = [ "arrow2", "ordered-float 2.10.0", "parquet2", - "sqlparser 0.15.0", + "sqlparser", ] [[package]] @@ -427,7 +427,7 @@ dependencies = [ "ahash", "arrow2", "datafusion-common", - "sqlparser 0.15.0", + "sqlparser", ] [[package]] @@ -1060,6 +1060,7 @@ dependencies = [ "futures", "futures-util", "snafu", + "sql", "table", "tokio", "tokio-stream", @@ -1256,9 +1257,8 @@ checksum = "45456094d1983e2ee2a18fdfebce3189fa451699d0502cb8e3b49dba5ba41451" name = "sql" version = "0.1.0" dependencies = [ - "query", "snafu", - "sqlparser 0.16.0", + "sqlparser", ] [[package]] @@ -1270,15 +1270,6 @@ dependencies = [ "log", ] -[[package]] -name = "sqlparser" -version = "0.16.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e9a527b68048eb95495a1508f6c8395c8defcff5ecdbe8ad4106d08a2ef2a3c" -dependencies = [ - "log", -] - [[package]] name = "static_assertions" version = "1.1.0" diff --git a/src/query/Cargo.toml b/src/query/Cargo.toml index 31f608e9228e..9bac7f20f387 100644 --- a/src/query/Cargo.toml +++ b/src/query/Cargo.toml @@ -18,6 +18,7 @@ futures-util = "0.3.21" snafu = "0.7.0" table = { path = "../table" } tokio = "1.0" +sql = { path = "../sql" } [dev-dependencies] tokio = { version = "1.0", features = ["macros", "rt", "rt-multi-thread", "sync", "fs", "parking_lot"] } diff --git a/src/query/src/error.rs b/src/query/src/error.rs index 8cda8b87cbda..e25b224dc669 100644 --- a/src/query/src/error.rs +++ b/src/query/src/error.rs @@ -21,3 +21,13 @@ impl From<Error> for DataFusionError { DataFusionError::External(Box::new(e)) } } + +#[derive(Debug, Snafu)] +#[snafu(visibility(pub))] +pub enum PlannerError { + #[snafu(display("Cannot plan SQL: {}, source: {}", sql, source))] + DfPlan { + sql: String, + source: DataFusionError, + }, +} diff --git a/src/query/src/lib.rs b/src/query/src/lib.rs index 505028de67a6..a9fc6cee1e45 100644 --- a/src/query/src/lib.rs +++ b/src/query/src/lib.rs @@ -6,4 +6,5 @@ pub mod logical_optimizer; pub mod physical_optimizer; pub mod physical_planner; pub mod plan; +pub mod planner; pub mod query_engine; diff --git a/src/query/src/planner.rs b/src/query/src/planner.rs new file mode 100644 index 000000000000..e276fe3da7b5 --- /dev/null +++ b/src/query/src/planner.rs @@ -0,0 +1,56 @@ +use datafusion::sql::planner::{ContextProvider, SqlToRel}; +use snafu::ResultExt; +use sql::statements::query::Query; +use sql::statements::statement::Statement; + +use crate::error; +use crate::error::PlannerError; +use crate::plan::LogicalPlan; + +pub trait Planner { + fn statement_to_plan(&self, statement: Statement) -> Result<LogicalPlan>; +} + +type Result<T> = std::result::Result<T, PlannerError>; + +pub struct DfPlanner<'a, S: ContextProvider> { + sql_to_rel: SqlToRel<'a, S>, +} + +impl<'a, S: ContextProvider> DfPlanner<'a, S> { + /// Creates a DataFusion planner instance + pub fn new(schema_provider: &'a S) -> Self { + let rel = SqlToRel::new(schema_provider); + Self { sql_to_rel: rel } + } + + /// Converts QUERY statement to logical plan. + pub fn query_to_plan(&self, query: Box<Query>) -> Result<LogicalPlan> { + // todo(hl): original SQL should be provided as an argument + let sql = query.inner.to_string(); + let result = self + .sql_to_rel + .query_to_plan(query.inner) + .context(error::DfPlanSnafu { sql })?; + + Ok(LogicalPlan::DfPlan(result)) + } +} + +impl<'a, S> Planner for DfPlanner<'a, S> +where + S: ContextProvider, +{ + /// Converts statement to logical plan using datafusion planner + fn statement_to_plan(&self, statement: Statement) -> Result<LogicalPlan> { + match statement { + Statement::ShowDatabases(_) => { + todo!("Currently not supported") + } + Statement::Query(qb) => self.query_to_plan(qb), + Statement::Insert(_) => { + todo!() + } + } + } +} diff --git a/src/query/src/query_engine/state.rs b/src/query/src/query_engine/state.rs index ef5fd34bfdd4..88fa896ae447 100644 --- a/src/query/src/query_engine/state.rs +++ b/src/query/src/query_engine/state.rs @@ -20,10 +20,14 @@ use crate::catalog::{schema::SchemaProvider, CatalogList, CatalogProvider}; use crate::error::{self, Result}; use crate::executor::Runtime; +const DEFAULT_CATALOG_NAME: &str = "greptime"; +const DEFAULT_SCHEMA_NAME: &str = "public"; + /// Query engine global state #[derive(Clone)] pub struct QueryEngineState { df_context: ExecutionContext, + catalog_list: Arc<dyn CatalogList>, } impl fmt::Debug for QueryEngineState { @@ -35,7 +39,8 @@ impl fmt::Debug for QueryEngineState { impl QueryEngineState { pub(crate) fn new(catalog_list: Arc<dyn CatalogList>) -> Self { - let config = ExecutionConfig::new().with_default_catalog_and_schema("greptime", "public"); + let config = ExecutionConfig::new() + .with_default_catalog_and_schema(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME); let df_context = ExecutionContext::with_config(config); df_context.state.lock().catalog_list = Arc::new(DfCatalogListAdapter { @@ -43,7 +48,10 @@ impl QueryEngineState { runtime: df_context.runtime_env(), }); - Self { df_context } + Self { + df_context, + catalog_list, + } } #[inline] @@ -55,6 +63,13 @@ impl QueryEngineState { pub(crate) fn runtime(&self) -> Runtime { self.df_context.runtime_env().into() } + + #[allow(dead_code)] + pub(crate) fn schema(&self, schema_name: &str) -> Option<Arc<dyn SchemaProvider>> { + self.catalog_list + .catalog(DEFAULT_CATALOG_NAME) + .and_then(|c| c.schema(schema_name)) + } } /// Adapters between datafusion and greptime query engine. diff --git a/src/sql/Cargo.toml b/src/sql/Cargo.toml index 94bf7505d05f..b9b763e39867 100644 --- a/src/sql/Cargo.toml +++ b/src/sql/Cargo.toml @@ -6,6 +6,5 @@ edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -query = { path = "../query" } snafu = "0.7.0" -sqlparser = "0.16.0" +sqlparser = "0.15.0" diff --git a/src/sql/src/lib.rs b/src/sql/src/lib.rs index 3081a847dcf6..2ff323e9749c 100644 --- a/src/sql/src/lib.rs +++ b/src/sql/src/lib.rs @@ -7,5 +7,4 @@ pub mod dialect; pub mod errors; pub mod parser; pub mod parsers; -pub mod planner; pub mod statements; diff --git a/src/sql/src/parser.rs b/src/sql/src/parser.rs index 4f61c600c0e3..5bb5f2eaf752 100644 --- a/src/sql/src/parser.rs +++ b/src/sql/src/parser.rs @@ -5,9 +5,11 @@ use sqlparser::parser::Parser; use sqlparser::tokenizer::{Token, Tokenizer}; use crate::errors; +use crate::statements::show_database::SqlShowDatabase; use crate::statements::show_kind::ShowKind; use crate::statements::statement::Statement; -use crate::statements::statement_show_database::SqlShowDatabase; + +pub type Result<T> = std::result::Result<T, errors::ParserError>; /// GrepTime SQL parser context, a simple wrapper for Datafusion SQL parser. pub struct ParserContext<'a> { @@ -17,10 +19,7 @@ pub struct ParserContext<'a> { impl<'a> ParserContext<'a> { /// Parses SQL with given dialect - pub fn create_with_dialect( - sql: &'a str, - dialect: &dyn Dialect, - ) -> Result<Vec<Statement>, errors::ParserError> { + pub fn create_with_dialect(sql: &'a str, dialect: &dyn Dialect) -> Result<Vec<Statement>> { let mut stmts: Vec<Statement> = Vec::new(); let mut tokenizer = Tokenizer::new(dialect, sql); @@ -54,7 +53,7 @@ impl<'a> ParserContext<'a> { } /// Parses parser context to a set of statements. - pub fn parse_statement(&mut self) -> Result<Statement, errors::ParserError> { + pub fn parse_statement(&mut self) -> Result<Statement> { match self.parser.peek_token() { Token::Word(w) => { match w.keyword { @@ -87,7 +86,7 @@ impl<'a> ParserContext<'a> { } /// Raises an "unsupported statement" error. - pub fn unsupported<T>(&self, keyword: String) -> Result<T, errors::ParserError> { + pub fn unsupported<T>(&self, keyword: String) -> Result<T> { Err(errors::ParserError::Unsupported { sql: self.sql.to_string(), keyword, @@ -96,7 +95,7 @@ impl<'a> ParserContext<'a> { /// Parses SHOW statements /// todo(hl) support `show table`/`show settings`/`show create`/`show users` ect. - fn parse_show(&mut self) -> Result<Statement, errors::ParserError> { + fn parse_show(&mut self) -> Result<Statement> { if self.consume_token("DATABASES") || self.consume_token("SCHEMAS") { Ok(self.parse_show_databases()?) } else { @@ -104,15 +103,11 @@ impl<'a> ParserContext<'a> { } } - fn parse_explain(&mut self) -> Result<Statement, errors::ParserError> { - todo!() - } - - fn parse_insert(&mut self) -> Result<Statement, errors::ParserError> { + fn parse_explain(&mut self) -> Result<Statement> { todo!() } - fn parse_create(&mut self) -> Result<Statement, errors::ParserError> { + fn parse_create(&mut self) -> Result<Statement> { todo!() } @@ -131,7 +126,7 @@ impl<'a> ParserContext<'a> { } /// Parses `SHOW DATABASES` statement. - pub fn parse_show_databases(&mut self) -> Result<Statement, errors::ParserError> { + pub fn parse_show_databases(&mut self) -> Result<Statement> { let tok = self.parser.next_token(); match &tok { Token::EOF | Token::SemiColon => Ok(Statement::ShowDatabases(SqlShowDatabase::new( diff --git a/src/sql/src/parsers.rs b/src/sql/src/parsers.rs index 9d5260780c95..d290d588189d 100644 --- a/src/sql/src/parsers.rs +++ b/src/sql/src/parsers.rs @@ -1 +1,2 @@ +pub(crate) mod insert_parser; pub(crate) mod query_parser; diff --git a/src/sql/src/parsers/insert_parser.rs b/src/sql/src/parsers/insert_parser.rs new file mode 100644 index 000000000000..84af1fa78e4d --- /dev/null +++ b/src/sql/src/parsers/insert_parser.rs @@ -0,0 +1,30 @@ +use snafu::ResultExt; +use sqlparser::ast::Statement as SpStatement; + +use crate::errors; +use crate::parser::ParserContext; +use crate::parser::Result; +use crate::statements::insert::Insert; +use crate::statements::statement::Statement; + +/// INSERT statement parser implementation +impl<'a> ParserContext<'a> { + pub(crate) fn parse_insert(&mut self) -> Result<Statement> { + self.parser.next_token(); + let spstatement = self + .parser + .parse_insert() + .context(errors::InnerSnafu { sql: self.sql })?; + + match spstatement { + SpStatement::Insert { .. } => { + Ok(Statement::Insert(Box::new(Insert { inner: spstatement }))) + } + unexp => errors::UnsupportedSnafu { + sql: self.sql.to_string(), + keyword: unexp.to_string(), + } + .fail(), + } + } +} diff --git a/src/sql/src/parsers/query_parser.rs b/src/sql/src/parsers/query_parser.rs index 38d6887519a9..adcc4ab302fb 100644 --- a/src/sql/src/parsers/query_parser.rs +++ b/src/sql/src/parsers/query_parser.rs @@ -1,14 +1,14 @@ -use errors::ParserError; use snafu::prelude::*; use crate::errors; use crate::parser::ParserContext; +use crate::parser::Result; +use crate::statements::query::Query; use crate::statements::statement::Statement; -use crate::statements::statement_query::Query; impl<'a> ParserContext<'a> { /// Parses select and it's variants. - pub(crate) fn parse_query(&mut self) -> Result<Statement, ParserError> { + pub(crate) fn parse_query(&mut self) -> Result<Statement> { let spquery = self .parser .parse_query() @@ -45,6 +45,9 @@ mod tests { Statement::ShowDatabases(_) => { panic!("Not expected to be a show database statement") } + Statement::Insert(_) => { + panic!("Not expected to be a show database statement") + } Statement::Query(_) => {} } } diff --git a/src/sql/src/planner.rs b/src/sql/src/planner.rs deleted file mode 100644 index 8b137891791f..000000000000 --- a/src/sql/src/planner.rs +++ /dev/null @@ -1 +0,0 @@ - diff --git a/src/sql/src/statements.rs b/src/sql/src/statements.rs index 895ca9bfeef9..eea41d1e380d 100644 --- a/src/sql/src/statements.rs +++ b/src/sql/src/statements.rs @@ -1,4 +1,5 @@ +pub mod insert; +pub mod query; +pub mod show_database; pub mod show_kind; pub mod statement; -pub mod statement_query; -pub mod statement_show_database; diff --git a/src/sql/src/statements/insert.rs b/src/sql/src/statements/insert.rs new file mode 100644 index 000000000000..afd8c05f04ea --- /dev/null +++ b/src/sql/src/statements/insert.rs @@ -0,0 +1,22 @@ +use sqlparser::ast::Statement; +use sqlparser::parser::ParserError; + +#[derive(Debug, Clone, PartialEq)] +pub struct Insert { + // Can only be sqlparser::ast::Statement::Insert variant + pub inner: Statement, +} + +impl TryFrom<Statement> for Insert { + type Error = ParserError; + + fn try_from(value: Statement) -> Result<Self, Self::Error> { + match value { + Statement::Insert { .. } => Ok(Insert { inner: value }), + unexp => Err(ParserError::ParserError(format!( + "Not expected to be {}", + unexp + ))), + } + } +} diff --git a/src/sql/src/statements/query.rs b/src/sql/src/statements/query.rs new file mode 100644 index 000000000000..ec4315b91aba --- /dev/null +++ b/src/sql/src/statements/query.rs @@ -0,0 +1,26 @@ +use sqlparser::ast::Query as SpQuery; + +use crate::errors::ParserError; + +/// Query statement instance. +#[derive(Debug, Clone, PartialEq)] +pub struct Query { + pub inner: SpQuery, +} + +/// Automatically converts from sqlparser Query instance to SqlQuery. +impl TryFrom<SpQuery> for Query { + type Error = ParserError; + + fn try_from(q: SpQuery) -> Result<Self, Self::Error> { + Ok(Query { inner: q }) + } +} + +impl TryFrom<Query> for SpQuery { + type Error = ParserError; + + fn try_from(value: Query) -> Result<Self, Self::Error> { + Ok(value.inner) + } +} diff --git a/src/sql/src/statements/statement_show_database.rs b/src/sql/src/statements/show_database.rs similarity index 100% rename from src/sql/src/statements/statement_show_database.rs rename to src/sql/src/statements/show_database.rs diff --git a/src/sql/src/statements/statement.rs b/src/sql/src/statements/statement.rs index 9b62092200b4..0a15c4eb4d31 100644 --- a/src/sql/src/statements/statement.rs +++ b/src/sql/src/statements/statement.rs @@ -1,8 +1,9 @@ use sqlparser::ast::Statement as SpStatement; -use sqlparser::parser::ParserError::ParserError; +use sqlparser::parser::ParserError; -use crate::statements::statement_query::Query; -use crate::statements::statement_show_database::SqlShowDatabase; +use crate::statements::insert::Insert; +use crate::statements::query::Query; +use crate::statements::show_database::SqlShowDatabase; /// Tokens parsed by `DFParser` are converted into these values. #[derive(Debug, Clone, PartialEq)] @@ -12,6 +13,9 @@ pub enum Statement { // Query Query(Box<Query>), + + // Insert + Insert(Box<Insert>), } /// Converts Statement to sqlparser statement @@ -20,10 +24,11 @@ impl TryFrom<Statement> for SpStatement { fn try_from(value: Statement) -> Result<Self, Self::Error> { match value { - Statement::ShowDatabases(_) => Err(ParserError( + Statement::ShowDatabases(_) => Err(ParserError::ParserError( "sqlparser does not support SHOW DATABASE query.".to_string(), )), Statement::Query(s) => Ok(SpStatement::Query(Box::new(s.inner))), + Statement::Insert(i) => Ok(i.inner), } } }
feat
INSERT statement and planner implementation (#16)
7b28da277d909bbf5f1beb70baef5d2714ee7b10
2024-07-16 09:17:41
Lei, HUANG
refactor: LastRowReader to use LastRowSelector (#4374)
false
diff --git a/src/mito2/src/read/last_row.rs b/src/mito2/src/read/last_row.rs index 85f82760610e..f91f20b4f848 100644 --- a/src/mito2/src/read/last_row.rs +++ b/src/mito2/src/read/last_row.rs @@ -30,7 +30,7 @@ pub(crate) struct LastRowReader { /// Inner reader. reader: BoxedBatchReader, /// The last batch pending to return. - last_batch: Option<Batch>, + selector: LastRowSelector, } impl LastRowReader { @@ -38,37 +38,18 @@ impl LastRowReader { pub(crate) fn new(reader: BoxedBatchReader) -> Self { Self { reader, - last_batch: None, + selector: LastRowSelector::default(), } } /// Returns the last row of the next key. pub(crate) async fn next_last_row(&mut self) -> Result<Option<Batch>> { while let Some(batch) = self.reader.next_batch().await? { - if let Some(last) = &self.last_batch { - if last.primary_key() == batch.primary_key() { - // Same key, update last batch. - self.last_batch = Some(batch); - } else { - // Different key, return the last row in `last` and update `last_batch` by - // current batch. - debug_assert!(!last.is_empty()); - let last_row = last.slice(last.num_rows() - 1, 1); - self.last_batch = Some(batch); - return Ok(Some(last_row)); - } - } else { - self.last_batch = Some(batch); + if let Some(yielded) = self.selector.on_next(batch) { + return Ok(Some(yielded)); } } - - if let Some(last) = self.last_batch.take() { - // This is the last key. - let last_row = last.slice(last.num_rows() - 1, 1); - return Ok(Some(last_row)); - } - - Ok(None) + Ok(self.selector.finish()) } } @@ -79,6 +60,45 @@ impl BatchReader for LastRowReader { } } +/// Common struct that selects only the last row of each time series. +#[derive(Default)] +pub struct LastRowSelector { + last_batch: Option<Batch>, +} + +impl LastRowSelector { + /// Handles next batch. Return the yielding batch if present. + pub fn on_next(&mut self, batch: Batch) -> Option<Batch> { + if let Some(last) = &self.last_batch { + if last.primary_key() == batch.primary_key() { + // Same key, update last batch. + self.last_batch = Some(batch); + None + } else { + // Different key, return the last row in `last` and update `last_batch` by + // current batch. + debug_assert!(!last.is_empty()); + let last_row = last.slice(last.num_rows() - 1, 1); + self.last_batch = Some(batch); + Some(last_row) + } + } else { + self.last_batch = Some(batch); + None + } + } + + /// Finishes the selector and returns the pending batch if any. + pub fn finish(&mut self) -> Option<Batch> { + if let Some(last) = self.last_batch.take() { + // This is the last key. + let last_row = last.slice(last.num_rows() - 1, 1); + return Some(last_row); + } + None + } +} + #[cfg(test)] mod tests { use api::v1::OpType;
refactor
LastRowReader to use LastRowSelector (#4374)
51a4d660b78586be068ea14d78e780b17c841376
2023-06-12 14:51:49
Eugene Tolbakov
feat(to_unixtime): add timestamp types as arguments (#1632)
false
diff --git a/src/common/function/src/scalars/timestamp/to_unixtime.rs b/src/common/function/src/scalars/timestamp/to_unixtime.rs index 99225251646e..e07adc793dad 100644 --- a/src/common/function/src/scalars/timestamp/to_unixtime.rs +++ b/src/common/function/src/scalars/timestamp/to_unixtime.rs @@ -16,13 +16,16 @@ use std::fmt; use std::str::FromStr; use std::sync::Arc; -use common_query::error::{self, Result, UnsupportedInputDataTypeSnafu}; +use common_query::error::{InvalidFuncArgsSnafu, Result, UnsupportedInputDataTypeSnafu}; use common_query::prelude::{Signature, Volatility}; use common_time::timestamp::TimeUnit; use common_time::Timestamp; use datatypes::prelude::ConcreteDataType; -use datatypes::types::StringType; -use datatypes::vectors::{Int64Vector, StringVector, Vector, VectorRef}; +use datatypes::types::TimestampType; +use datatypes::vectors::{ + Int64Vector, StringVector, TimestampMicrosecondVector, TimestampMillisecondVector, + TimestampNanosecondVector, TimestampSecondVector, Vector, VectorRef, +}; use snafu::ensure; use crate::scalars::function::{Function, FunctionContext}; @@ -42,18 +45,33 @@ fn convert_to_seconds(arg: &str) -> Option<i64> { } } +fn process_vector(vector: &dyn Vector) -> Vec<Option<i64>> { + (0..vector.len()) + .map(|i| paste::expr!((vector.get(i)).as_timestamp().map(|ts| ts.value()))) + .collect::<Vec<Option<i64>>>() +} + impl Function for ToUnixtimeFunction { fn name(&self) -> &str { NAME } fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> { - Ok(ConcreteDataType::timestamp_second_datatype()) + Ok(ConcreteDataType::int64_datatype()) } fn signature(&self) -> Signature { - Signature::exact( - vec![ConcreteDataType::String(StringType)], + Signature::uniform( + 1, + vec![ + ConcreteDataType::string_datatype(), + ConcreteDataType::int32_datatype(), + ConcreteDataType::int64_datatype(), + ConcreteDataType::timestamp_second_datatype(), + ConcreteDataType::timestamp_millisecond_datatype(), + ConcreteDataType::timestamp_microsecond_datatype(), + ConcreteDataType::timestamp_nanosecond_datatype(), + ], Volatility::Immutable, ) } @@ -61,7 +79,7 @@ impl Function for ToUnixtimeFunction { fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> { ensure!( columns.len() == 1, - error::InvalidFuncArgsSnafu { + InvalidFuncArgsSnafu { err_msg: format!( "The length of the args is not correct, expect exactly one, have: {}", columns.len() @@ -79,6 +97,42 @@ impl Function for ToUnixtimeFunction { .collect::<Vec<_>>(), ))) } + ConcreteDataType::Int64(_) | ConcreteDataType::Int32(_) => { + let array = columns[0].to_arrow_array(); + Ok(Arc::new(Int64Vector::try_from_arrow_array(&array).unwrap())) + } + ConcreteDataType::Timestamp(ts) => { + let array = columns[0].to_arrow_array(); + let value = match ts { + TimestampType::Second(_) => { + let vector = paste::expr!(TimestampSecondVector::try_from_arrow_array( + array + ) + .unwrap()); + process_vector(&vector) + } + TimestampType::Millisecond(_) => { + let vector = paste::expr!( + TimestampMillisecondVector::try_from_arrow_array(array).unwrap() + ); + process_vector(&vector) + } + TimestampType::Microsecond(_) => { + let vector = paste::expr!( + TimestampMicrosecondVector::try_from_arrow_array(array).unwrap() + ); + process_vector(&vector) + } + TimestampType::Nanosecond(_) => { + let vector = paste::expr!(TimestampNanosecondVector::try_from_arrow_array( + array + ) + .unwrap()); + process_vector(&vector) + } + }; + Ok(Arc::new(Int64Vector::from(value))) + } _ => UnsupportedInputDataTypeSnafu { function: NAME, datatypes: columns.iter().map(|c| c.data_type()).collect::<Vec<_>>(), @@ -97,28 +151,37 @@ impl fmt::Display for ToUnixtimeFunction { #[cfg(test)] mod tests { use common_query::prelude::TypeSignature; - use datatypes::prelude::ConcreteDataType; - use datatypes::types::StringType; + use datatypes::prelude::{ConcreteDataType, ScalarVectorBuilder}; + use datatypes::scalars::ScalarVector; + use datatypes::timestamp::TimestampSecond; use datatypes::value::Value; - use datatypes::vectors::StringVector; + use datatypes::vectors::{StringVector, TimestampSecondVector}; use super::{ToUnixtimeFunction, *}; use crate::scalars::Function; #[test] - fn test_to_unixtime() { + fn test_string_to_unixtime() { let f = ToUnixtimeFunction::default(); assert_eq!("to_unixtime", f.name()); assert_eq!( - ConcreteDataType::timestamp_second_datatype(), + ConcreteDataType::int64_datatype(), f.return_type(&[]).unwrap() ); assert!(matches!(f.signature(), - Signature { - type_signature: TypeSignature::Exact(valid_types), - volatility: Volatility::Immutable - } if valid_types == vec![ConcreteDataType::String(StringType)] + Signature { + type_signature: TypeSignature::Uniform(1, valid_types), + volatility: Volatility::Immutable + } if valid_types == vec![ + ConcreteDataType::string_datatype(), + ConcreteDataType::int32_datatype(), + ConcreteDataType::int64_datatype(), + ConcreteDataType::timestamp_second_datatype(), + ConcreteDataType::timestamp_millisecond_datatype(), + ConcreteDataType::timestamp_microsecond_datatype(), + ConcreteDataType::timestamp_nanosecond_datatype(), + ] )); let times = vec![ @@ -145,4 +208,106 @@ mod tests { } } } + + #[test] + fn test_int_to_unixtime() { + let f = ToUnixtimeFunction::default(); + assert_eq!("to_unixtime", f.name()); + assert_eq!( + ConcreteDataType::int64_datatype(), + f.return_type(&[]).unwrap() + ); + + assert!(matches!(f.signature(), + Signature { + type_signature: TypeSignature::Uniform(1, valid_types), + volatility: Volatility::Immutable + } if valid_types == vec![ + ConcreteDataType::string_datatype(), + ConcreteDataType::int32_datatype(), + ConcreteDataType::int64_datatype(), + ConcreteDataType::timestamp_second_datatype(), + ConcreteDataType::timestamp_millisecond_datatype(), + ConcreteDataType::timestamp_microsecond_datatype(), + ConcreteDataType::timestamp_nanosecond_datatype(), + ] + )); + + let times = vec![Some(3_i64), None, Some(5_i64), None]; + let results = vec![Some(3), None, Some(5), None]; + let args: Vec<VectorRef> = vec![Arc::new(Int64Vector::from(times.clone()))]; + let vector = f.eval(FunctionContext::default(), &args).unwrap(); + assert_eq!(4, vector.len()); + for (i, _t) in times.iter().enumerate() { + let v = vector.get(i); + if i == 1 || i == 3 { + assert_eq!(Value::Null, v); + continue; + } + match v { + Value::Int64(ts) => { + assert_eq!(ts, (*results.get(i).unwrap()).unwrap()); + } + _ => unreachable!(), + } + } + } + + #[test] + fn test_timestamp_to_unixtime() { + let f = ToUnixtimeFunction::default(); + assert_eq!("to_unixtime", f.name()); + assert_eq!( + ConcreteDataType::int64_datatype(), + f.return_type(&[]).unwrap() + ); + + assert!(matches!(f.signature(), + Signature { + type_signature: TypeSignature::Uniform(1, valid_types), + volatility: Volatility::Immutable + } if valid_types == vec![ + ConcreteDataType::string_datatype(), + ConcreteDataType::int32_datatype(), + ConcreteDataType::int64_datatype(), + ConcreteDataType::timestamp_second_datatype(), + ConcreteDataType::timestamp_millisecond_datatype(), + ConcreteDataType::timestamp_microsecond_datatype(), + ConcreteDataType::timestamp_nanosecond_datatype(), + ] + )); + + let times: Vec<Option<TimestampSecond>> = vec![ + Some(TimestampSecond::new(123)), + None, + Some(TimestampSecond::new(42)), + None, + ]; + let results = vec![Some(123), None, Some(42), None]; + let ts_vector: TimestampSecondVector = build_vector_from_slice(&times); + let args: Vec<VectorRef> = vec![Arc::new(ts_vector)]; + let vector = f.eval(FunctionContext::default(), &args).unwrap(); + assert_eq!(4, vector.len()); + for (i, _t) in times.iter().enumerate() { + let v = vector.get(i); + if i == 1 || i == 3 { + assert_eq!(Value::Null, v); + continue; + } + match v { + Value::Int64(ts) => { + assert_eq!(ts, (*results.get(i).unwrap()).unwrap()); + } + _ => unreachable!(), + } + } + } + + fn build_vector_from_slice<T: ScalarVector>(items: &[Option<T::RefItem<'_>>]) -> T { + let mut builder = T::Builder::with_capacity(items.len()); + for item in items { + builder.push(*item); + } + builder.finish() + } } diff --git a/tests/cases/standalone/common/select/dummy.result b/tests/cases/standalone/common/select/dummy.result index 759f7b57c934..0887fdd7f8f3 100644 --- a/tests/cases/standalone/common/select/dummy.result +++ b/tests/cases/standalone/common/select/dummy.result @@ -42,3 +42,64 @@ select TO_UNIXTIME('2023-03-01T06:35:02Z'); | 1677652502 | +-------------------------------------------+ +select TO_UNIXTIME(2); + ++-----------------------+ +| to_unixtime(Int64(2)) | ++-----------------------+ +| 2 | ++-----------------------+ + +create table test_unixtime(a int, b timestamp time index); + +Affected Rows: 0 + +DESC TABLE test_unixtime; + ++-------+----------------------+------+---------+---------------+ +| Field | Type | Null | Default | Semantic Type | ++-------+----------------------+------+---------+---------------+ +| a | Int32 | YES | | FIELD | +| b | TimestampMillisecond | NO | | TIME INDEX | ++-------+----------------------+------+---------+---------------+ + +insert into test_unixtime values(27, 27); + +Affected Rows: 1 + +select * from test_unixtime; + ++----+-------------------------+ +| a | b | ++----+-------------------------+ +| 27 | 1970-01-01T00:00:00.027 | ++----+-------------------------+ + +select a from test_unixtime; + ++----+ +| a | ++----+ +| 27 | ++----+ + +select b from test_unixtime; + ++-------------------------+ +| b | ++-------------------------+ +| 1970-01-01T00:00:00.027 | ++-------------------------+ + +select TO_UNIXTIME(b) from test_unixtime; + ++------------------------------+ +| to_unixtime(test_unixtime.b) | ++------------------------------+ +| 27 | ++------------------------------+ + +DROP TABLE test_unixtime; + +Affected Rows: 1 + diff --git a/tests/cases/standalone/common/select/dummy.sql b/tests/cases/standalone/common/select/dummy.sql index 9c2a3944cf19..5bf16da86433 100644 --- a/tests/cases/standalone/common/select/dummy.sql +++ b/tests/cases/standalone/common/select/dummy.sql @@ -11,3 +11,21 @@ select "A"; select * where "a" = "A"; select TO_UNIXTIME('2023-03-01T06:35:02Z'); + +select TO_UNIXTIME(2); + +create table test_unixtime(a int, b timestamp time index); + +DESC TABLE test_unixtime; + +insert into test_unixtime values(27, 27); + +select * from test_unixtime; + +select a from test_unixtime; + +select b from test_unixtime; + +select TO_UNIXTIME(b) from test_unixtime; + +DROP TABLE test_unixtime;
feat
add timestamp types as arguments (#1632)
d2d62e0c6f42e75f500a85847494faa97032b6e3
2024-09-07 09:58:11
Ruihang Xia
fix: unconditional statistics (#4694)
false
diff --git a/src/mito2/src/read/scan_region.rs b/src/mito2/src/read/scan_region.rs index dcf5b4395c42..ec45c9b93454 100644 --- a/src/mito2/src/read/scan_region.rs +++ b/src/mito2/src/read/scan_region.rs @@ -709,6 +709,10 @@ impl ScanInput { rows_in_files + rows_in_memtables } + pub(crate) fn predicate(&self) -> Option<Predicate> { + self.predicate.clone() + } + /// Retrieves [`PartitionRange`] from memtable and files pub(crate) fn partition_ranges(&self) -> Vec<PartitionRange> { let mut id = 0; diff --git a/src/mito2/src/read/seq_scan.rs b/src/mito2/src/read/seq_scan.rs index ec5fcf53d34a..ca232df83481 100644 --- a/src/mito2/src/read/seq_scan.rs +++ b/src/mito2/src/read/seq_scan.rs @@ -515,6 +515,11 @@ impl RegionScanner for SeqScan { self.properties.partitions = ranges; Ok(()) } + + fn has_predicate(&self) -> bool { + let predicate = self.stream_ctx.input.predicate(); + predicate.map(|p| !p.exprs().is_empty()).unwrap_or(false) + } } impl DisplayAs for SeqScan { diff --git a/src/mito2/src/read/unordered_scan.rs b/src/mito2/src/read/unordered_scan.rs index ec43654e09c2..5dfcc519d656 100644 --- a/src/mito2/src/read/unordered_scan.rs +++ b/src/mito2/src/read/unordered_scan.rs @@ -228,6 +228,11 @@ impl RegionScanner for UnorderedScan { Ok(stream) } + + fn has_predicate(&self) -> bool { + let predicate = self.stream_ctx.input.predicate(); + predicate.map(|p| !p.exprs().is_empty()).unwrap_or(false) + } } impl DisplayAs for UnorderedScan { diff --git a/src/store-api/src/region_engine.rs b/src/store-api/src/region_engine.rs index cf37fe82f9e6..84555a595bd0 100644 --- a/src/store-api/src/region_engine.rs +++ b/src/store-api/src/region_engine.rs @@ -233,6 +233,9 @@ pub trait RegionScanner: Debug + DisplayAs + Send { /// # Panics /// Panics if the `partition` is out of bound. fn scan_partition(&self, partition: usize) -> Result<SendableRecordBatchStream, BoxedError>; + + /// Check if there is any predicate that may be executed in this scanner. + fn has_predicate(&self) -> bool; } pub type RegionScannerRef = Box<dyn RegionScanner>; @@ -367,6 +370,10 @@ impl RegionScanner for SinglePartitionScanner { )) }) } + + fn has_predicate(&self) -> bool { + false + } } impl DisplayAs for SinglePartitionScanner { diff --git a/src/table/src/table/scan.rs b/src/table/src/table/scan.rs index 19283058c6e1..e67c6dc032fb 100644 --- a/src/table/src/table/scan.rs +++ b/src/table/src/table/scan.rs @@ -180,7 +180,7 @@ impl ExecutionPlan for RegionScanExec { } fn statistics(&self) -> DfResult<Statistics> { - let statistics = if self.append_mode { + let statistics = if self.append_mode && !self.scanner.lock().unwrap().has_predicate() { let column_statistics = self .arrow_schema .fields diff --git a/tests/cases/standalone/common/aggregate/count.result b/tests/cases/standalone/common/aggregate/count.result index 4523118d18ac..f93189d985cd 100644 --- a/tests/cases/standalone/common/aggregate/count.result +++ b/tests/cases/standalone/common/aggregate/count.result @@ -54,3 +54,50 @@ drop table test; Affected Rows: 0 +-- Append table +create table count_where_bug ( + tag String, + ts TimestampMillisecond time index, + num Int64, + primary key (tag), +) engine=mito with('append_mode'='true'); + +Affected Rows: 0 + +insert into count_where_bug (tag, ts, num) +values ('a', '2024-09-06T06:00:01Z', 1), + ('a', '2024-09-06T06:00:02Z', 2), + ('a', '2024-09-06T06:00:03Z', 3), + ('b', '2024-09-06T06:00:04Z', 4), + ('b', '2024-09-06T06:00:05Z', 5); + +Affected Rows: 5 + +select count(1) from count_where_bug where tag = 'b'; + ++-----------------+ +| COUNT(Int64(1)) | ++-----------------+ +| 2 | ++-----------------+ + +select count(1) from count_where_bug where ts > '2024-09-06T06:00:04Z'; + ++-----------------+ +| COUNT(Int64(1)) | ++-----------------+ +| 1 | ++-----------------+ + +select count(1) from count_where_bug where num != 3; + ++-----------------+ +| COUNT(Int64(1)) | ++-----------------+ +| 4 | ++-----------------+ + +drop table count_where_bug; + +Affected Rows: 0 + diff --git a/tests/cases/standalone/common/aggregate/count.sql b/tests/cases/standalone/common/aggregate/count.sql index 80100c96aecf..22745b723cfd 100644 --- a/tests/cases/standalone/common/aggregate/count.sql +++ b/tests/cases/standalone/common/aggregate/count.sql @@ -17,3 +17,27 @@ select count(*) from (select * from test cross join "HelloWorld"); drop table "HelloWorld"; drop table test; + +-- Append table + +create table count_where_bug ( + tag String, + ts TimestampMillisecond time index, + num Int64, + primary key (tag), +) engine=mito with('append_mode'='true'); + +insert into count_where_bug (tag, ts, num) +values ('a', '2024-09-06T06:00:01Z', 1), + ('a', '2024-09-06T06:00:02Z', 2), + ('a', '2024-09-06T06:00:03Z', 3), + ('b', '2024-09-06T06:00:04Z', 4), + ('b', '2024-09-06T06:00:05Z', 5); + +select count(1) from count_where_bug where tag = 'b'; + +select count(1) from count_where_bug where ts > '2024-09-06T06:00:04Z'; + +select count(1) from count_where_bug where num != 3; + +drop table count_where_bug;
fix
unconditional statistics (#4694)
4b4c6dbb66497e48e9573509dd1d9ab76e57097e
2024-12-13 13:04:24
Yohan Wal
refactor: cache inverted index with fixed-size page (#5114)
false
diff --git a/Cargo.lock b/Cargo.lock index 534b8c465ae6..b60615c8e54c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6643,6 +6643,7 @@ dependencies = [ "async-channel 1.9.0", "async-stream", "async-trait", + "bytemuck", "bytes", "common-base", "common-config", diff --git a/config/config.md b/config/config.md index 6a500a5b4a34..d3353930b163 100644 --- a/config/config.md +++ b/config/config.md @@ -150,6 +150,7 @@ | `region_engine.mito.inverted_index.intermediate_path` | String | `""` | Deprecated, use `region_engine.mito.index.aux_path` instead. | | `region_engine.mito.inverted_index.metadata_cache_size` | String | `64MiB` | Cache size for inverted index metadata. | | `region_engine.mito.inverted_index.content_cache_size` | String | `128MiB` | Cache size for inverted index content. | +| `region_engine.mito.inverted_index.content_cache_page_size` | String | `8MiB` | Page size for inverted index content cache. | | `region_engine.mito.fulltext_index` | -- | -- | The options for full-text index in Mito engine. | | `region_engine.mito.fulltext_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never | | `region_engine.mito.fulltext_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never | @@ -475,6 +476,9 @@ | `region_engine.mito.inverted_index.apply_on_query` | String | `auto` | Whether to apply the index on query<br/>- `auto`: automatically (default)<br/>- `disable`: never | | `region_engine.mito.inverted_index.mem_threshold_on_create` | String | `auto` | Memory threshold for performing an external sort during index creation.<br/>- `auto`: automatically determine the threshold based on the system memory size (default)<br/>- `unlimited`: no memory limit<br/>- `[size]` e.g. `64MB`: fixed memory threshold | | `region_engine.mito.inverted_index.intermediate_path` | String | `""` | Deprecated, use `region_engine.mito.index.aux_path` instead. | +| `region_engine.mito.inverted_index.metadata_cache_size` | String | `64MiB` | Cache size for inverted index metadata. | +| `region_engine.mito.inverted_index.content_cache_size` | String | `128MiB` | Cache size for inverted index content. | +| `region_engine.mito.inverted_index.content_cache_page_size` | String | `8MiB` | Page size for inverted index content cache. | | `region_engine.mito.fulltext_index` | -- | -- | The options for full-text index in Mito engine. | | `region_engine.mito.fulltext_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never | | `region_engine.mito.fulltext_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never | diff --git a/config/datanode.example.toml b/config/datanode.example.toml index 0ba80a9f7d92..90a4d69b2e89 100644 --- a/config/datanode.example.toml +++ b/config/datanode.example.toml @@ -543,6 +543,15 @@ mem_threshold_on_create = "auto" ## Deprecated, use `region_engine.mito.index.aux_path` instead. intermediate_path = "" +## Cache size for inverted index metadata. +metadata_cache_size = "64MiB" + +## Cache size for inverted index content. +content_cache_size = "128MiB" + +## Page size for inverted index content cache. +content_cache_page_size = "8MiB" + ## The options for full-text index in Mito engine. [region_engine.mito.fulltext_index] diff --git a/config/standalone.example.toml b/config/standalone.example.toml index 8eae532d6166..b73246d37f0a 100644 --- a/config/standalone.example.toml +++ b/config/standalone.example.toml @@ -588,6 +588,9 @@ metadata_cache_size = "64MiB" ## Cache size for inverted index content. content_cache_size = "128MiB" +## Page size for inverted index content cache. +content_cache_page_size = "8MiB" + ## The options for full-text index in Mito engine. [region_engine.mito.fulltext_index] diff --git a/src/common/base/src/range_read.rs b/src/common/base/src/range_read.rs index 61f28cb629fd..53c26eeebdee 100644 --- a/src/common/base/src/range_read.rs +++ b/src/common/base/src/range_read.rs @@ -205,9 +205,7 @@ impl RangeReader for Vec<u8> { }) } - async fn read(&mut self, mut range: Range<u64>) -> io::Result<Bytes> { - range.end = range.end.min(self.len() as u64); - + async fn read(&mut self, range: Range<u64>) -> io::Result<Bytes> { let bytes = Bytes::copy_from_slice(&self[range.start as usize..range.end as usize]); Ok(bytes) } diff --git a/src/index/src/inverted_index/format/reader.rs b/src/index/src/inverted_index/format/reader.rs index a6fb0cecbfcd..904681d5f40a 100644 --- a/src/index/src/inverted_index/format/reader.rs +++ b/src/index/src/inverted_index/format/reader.rs @@ -12,6 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +use std::ops::Range; use std::sync::Arc; use async_trait::async_trait; @@ -30,23 +31,23 @@ mod footer; #[mockall::automock] #[async_trait] pub trait InvertedIndexReader: Send { - /// Reads all data to dest. - async fn read_all(&mut self, dest: &mut Vec<u8>) -> Result<usize>; - /// Seeks to given offset and reads data with exact size as provided. - async fn seek_read(&mut self, offset: u64, size: u32) -> Result<Vec<u8>>; + async fn range_read(&mut self, offset: u64, size: u32) -> Result<Vec<u8>>; + + /// Reads the bytes in the given ranges. + async fn read_vec(&mut self, ranges: &[Range<u64>]) -> Result<Vec<Vec<u8>>>; /// Retrieves metadata of all inverted indices stored within the blob. async fn metadata(&mut self) -> Result<Arc<InvertedIndexMetas>>; /// Retrieves the finite state transducer (FST) map from the given offset and size. async fn fst(&mut self, offset: u64, size: u32) -> Result<FstMap> { - let fst_data = self.seek_read(offset, size).await?; + let fst_data = self.range_read(offset, size).await?; FstMap::new(fst_data).context(DecodeFstSnafu) } /// Retrieves the bitmap from the given offset and size. async fn bitmap(&mut self, offset: u64, size: u32) -> Result<BitVec> { - self.seek_read(offset, size).await.map(BitVec::from_vec) + self.range_read(offset, size).await.map(BitVec::from_vec) } } diff --git a/src/index/src/inverted_index/format/reader/blob.rs b/src/index/src/inverted_index/format/reader/blob.rs index de34cd36f849..371655d535f3 100644 --- a/src/index/src/inverted_index/format/reader/blob.rs +++ b/src/index/src/inverted_index/format/reader/blob.rs @@ -12,6 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +use std::ops::Range; use std::sync::Arc; use async_trait::async_trait; @@ -50,16 +51,7 @@ impl<R> InvertedIndexBlobReader<R> { #[async_trait] impl<R: RangeReader> InvertedIndexReader for InvertedIndexBlobReader<R> { - async fn read_all(&mut self, dest: &mut Vec<u8>) -> Result<usize> { - let metadata = self.source.metadata().await.context(CommonIoSnafu)?; - self.source - .read_into(0..metadata.content_length, dest) - .await - .context(CommonIoSnafu)?; - Ok(metadata.content_length as usize) - } - - async fn seek_read(&mut self, offset: u64, size: u32) -> Result<Vec<u8>> { + async fn range_read(&mut self, offset: u64, size: u32) -> Result<Vec<u8>> { let buf = self .source .read(offset..offset + size as u64) @@ -68,6 +60,11 @@ impl<R: RangeReader> InvertedIndexReader for InvertedIndexBlobReader<R> { Ok(buf.into()) } + async fn read_vec(&mut self, ranges: &[Range<u64>]) -> Result<Vec<Vec<u8>>> { + let bufs = self.source.read_vec(ranges).await.context(CommonIoSnafu)?; + Ok(bufs.into_iter().map(|buf| buf.into()).collect()) + } + async fn metadata(&mut self) -> Result<Arc<InvertedIndexMetas>> { let metadata = self.source.metadata().await.context(CommonIoSnafu)?; let blob_size = metadata.content_length; diff --git a/src/mito2/Cargo.toml b/src/mito2/Cargo.toml index eedf6ae636d8..eecb79440a2e 100644 --- a/src/mito2/Cargo.toml +++ b/src/mito2/Cargo.toml @@ -17,6 +17,7 @@ aquamarine.workspace = true async-channel = "1.9" async-stream.workspace = true async-trait = "0.1" +bytemuck.workspace = true bytes.workspace = true common-base.workspace = true common-config.workspace = true diff --git a/src/mito2/src/cache.rs b/src/mito2/src/cache.rs index 7018b039d62e..03cf9136245a 100644 --- a/src/mito2/src/cache.rs +++ b/src/mito2/src/cache.rs @@ -244,6 +244,7 @@ pub struct CacheManagerBuilder { page_cache_size: u64, index_metadata_size: u64, index_content_size: u64, + index_content_page_size: u64, puffin_metadata_size: u64, write_cache: Option<WriteCacheRef>, selector_result_cache_size: u64, @@ -286,6 +287,12 @@ impl CacheManagerBuilder { self } + /// Sets page size for index content. + pub fn index_content_page_size(mut self, bytes: u64) -> Self { + self.index_content_page_size = bytes; + self + } + /// Sets cache size for puffin metadata. pub fn puffin_metadata_size(mut self, bytes: u64) -> Self { self.puffin_metadata_size = bytes; @@ -352,8 +359,11 @@ impl CacheManagerBuilder { }) .build() }); - let inverted_index_cache = - InvertedIndexCache::new(self.index_metadata_size, self.index_content_size); + let inverted_index_cache = InvertedIndexCache::new( + self.index_metadata_size, + self.index_content_size, + self.index_content_page_size, + ); let puffin_metadata_cache = PuffinMetadataCache::new(self.puffin_metadata_size, &CACHE_BYTES); let selector_result_cache = (self.selector_result_cache_size != 0).then(|| { diff --git a/src/mito2/src/cache/index.rs b/src/mito2/src/cache/index.rs index 4e6e4deee260..e25fb22dcbf5 100644 --- a/src/mito2/src/cache/index.rs +++ b/src/mito2/src/cache/index.rs @@ -12,6 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +use std::ops::Range; use std::sync::Arc; use api::v1::index::InvertedIndexMetas; @@ -34,14 +35,16 @@ const INDEX_CONTENT_TYPE: &str = "index_content"; /// Inverted index blob reader with cache. pub struct CachedInvertedIndexBlobReader<R> { file_id: FileId, + file_size: u64, inner: R, cache: InvertedIndexCacheRef, } impl<R> CachedInvertedIndexBlobReader<R> { - pub fn new(file_id: FileId, inner: R, cache: InvertedIndexCacheRef) -> Self { + pub fn new(file_id: FileId, file_size: u64, inner: R, cache: InvertedIndexCacheRef) -> Self { Self { file_id, + file_size, inner, cache, } @@ -59,43 +62,77 @@ where offset: u64, size: u32, ) -> index::inverted_index::error::Result<Vec<u8>> { - let range = offset as usize..(offset + size as u64) as usize; - if let Some(cached) = self.cache.get_index(IndexKey { - file_id: self.file_id, - }) { - CACHE_HIT.with_label_values(&[INDEX_CONTENT_TYPE]).inc(); - Ok(cached[range].to_vec()) - } else { - let mut all_data = Vec::with_capacity(1024 * 1024); - self.inner.read_all(&mut all_data).await?; - let result = all_data[range].to_vec(); - self.cache.put_index( - IndexKey { - file_id: self.file_id, - }, - Arc::new(all_data), - ); - CACHE_MISS.with_label_values(&[INDEX_CONTENT_TYPE]).inc(); - Ok(result) + let keys = + IndexDataPageKey::generate_page_keys(self.file_id, offset, size, self.cache.page_size); + // Size is 0, return empty data. + if keys.is_empty() { + return Ok(Vec::new()); + } + // TODO: Can be replaced by an uncontinuous structure like opendal::Buffer. + let mut data = Vec::with_capacity(keys.len()); + data.resize(keys.len(), Arc::new(Vec::new())); + let mut cache_miss_range = vec![]; + let mut cache_miss_idx = vec![]; + let last_index = keys.len() - 1; + // TODO: Avoid copy as much as possible. + for (i, index) in keys.clone().into_iter().enumerate() { + match self.cache.get_index(&index) { + Some(page) => { + CACHE_HIT.with_label_values(&[INDEX_CONTENT_TYPE]).inc(); + data[i] = page; + } + None => { + CACHE_MISS.with_label_values(&[INDEX_CONTENT_TYPE]).inc(); + let base_offset = index.page_id * self.cache.page_size; + let pruned_size = if i == last_index { + prune_size(&keys, self.file_size, self.cache.page_size) + } else { + self.cache.page_size + }; + cache_miss_range.push(base_offset..base_offset + pruned_size); + cache_miss_idx.push(i); + } + } } + if !cache_miss_range.is_empty() { + let pages = self.inner.read_vec(&cache_miss_range).await?; + for (i, page) in cache_miss_idx.into_iter().zip(pages.into_iter()) { + let page = Arc::new(page); + let key = keys[i].clone(); + data[i] = page.clone(); + self.cache.put_index(key, page.clone()); + } + } + let mut result = Vec::with_capacity(size as usize); + data.iter().enumerate().for_each(|(i, page)| { + let range = if i == 0 { + IndexDataPageKey::calculate_first_page_range(offset, size, self.cache.page_size) + } else if i == last_index { + IndexDataPageKey::calculate_last_page_range(offset, size, self.cache.page_size) + } else { + 0..self.cache.page_size as usize + }; + result.extend_from_slice(&page[range]); + }); + Ok(result) } } #[async_trait] impl<R: InvertedIndexReader> InvertedIndexReader for CachedInvertedIndexBlobReader<R> { - async fn read_all( - &mut self, - dest: &mut Vec<u8>, - ) -> index::inverted_index::error::Result<usize> { - self.inner.read_all(dest).await - } - - async fn seek_read( + async fn range_read( &mut self, offset: u64, size: u32, ) -> index::inverted_index::error::Result<Vec<u8>> { - self.inner.seek_read(offset, size).await + self.inner.range_read(offset, size).await + } + + async fn read_vec( + &mut self, + ranges: &[Range<u64>], + ) -> index::inverted_index::error::Result<Vec<Vec<u8>>> { + self.inner.read_vec(ranges).await } async fn metadata(&mut self) -> index::inverted_index::error::Result<Arc<InvertedIndexMetas>> { @@ -130,22 +167,81 @@ impl<R: InvertedIndexReader> InvertedIndexReader for CachedInvertedIndexBlobRead } #[derive(Debug, Clone, PartialEq, Eq, Hash)] -pub struct IndexKey { +pub struct IndexMetadataKey { + file_id: FileId, +} + +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +pub struct IndexDataPageKey { file_id: FileId, + page_id: u64, +} + +impl IndexDataPageKey { + /// Converts an offset to a page ID based on the page size. + fn calculate_page_id(offset: u64, page_size: u64) -> u64 { + offset / page_size + } + + /// Calculates the total number of pages that a given size spans, starting from a specific offset. + fn calculate_page_count(offset: u64, size: u32, page_size: u64) -> u32 { + let start_page = Self::calculate_page_id(offset, page_size); + let end_page = Self::calculate_page_id(offset + (size as u64) - 1, page_size); + (end_page + 1 - start_page) as u32 + } + + /// Computes the byte range in the first page based on the offset and size. + /// For example, if offset is 1000 and size is 5000 with PAGE_SIZE of 4096, the first page range is 1000..4096. + fn calculate_first_page_range(offset: u64, size: u32, page_size: u64) -> Range<usize> { + let start = (offset % page_size) as usize; + let end = if size > page_size as u32 - start as u32 { + page_size as usize + } else { + start + size as usize + }; + start..end + } + + /// Computes the byte range in the last page based on the offset and size. + /// For example, if offset is 1000 and size is 5000 with PAGE_SIZE of 4096, the last page range is 0..1904. + fn calculate_last_page_range(offset: u64, size: u32, page_size: u64) -> Range<usize> { + let offset = offset as usize; + let size = size as usize; + let page_size = page_size as usize; + if (offset + size) % page_size == 0 { + 0..page_size + } else { + 0..((offset + size) % page_size) + } + } + + /// Generates a vector of IndexKey instances for the pages that a given offset and size span. + fn generate_page_keys(file_id: FileId, offset: u64, size: u32, page_size: u64) -> Vec<Self> { + let start_page = Self::calculate_page_id(offset, page_size); + let total_pages = Self::calculate_page_count(offset, size, page_size); + (0..total_pages) + .map(|i| Self { + file_id, + page_id: start_page + i as u64, + }) + .collect() + } } pub type InvertedIndexCacheRef = Arc<InvertedIndexCache>; pub struct InvertedIndexCache { /// Cache for inverted index metadata - index_metadata: moka::sync::Cache<IndexKey, Arc<InvertedIndexMetas>>, + index_metadata: moka::sync::Cache<IndexMetadataKey, Arc<InvertedIndexMetas>>, /// Cache for inverted index content. - index: moka::sync::Cache<IndexKey, Arc<Vec<u8>>>, + index: moka::sync::Cache<IndexDataPageKey, Arc<Vec<u8>>>, + // Page size for index content. + page_size: u64, } impl InvertedIndexCache { /// Creates `InvertedIndexCache` with provided `index_metadata_cap` and `index_content_cap`. - pub fn new(index_metadata_cap: u64, index_content_cap: u64) -> Self { + pub fn new(index_metadata_cap: u64, index_content_cap: u64, page_size: u64) -> Self { common_telemetry::debug!("Building InvertedIndexCache with metadata size: {index_metadata_cap}, content size: {index_content_cap}"); let index_metadata = moka::sync::CacheBuilder::new(index_metadata_cap) .name("inverted_index_metadata") @@ -170,29 +266,29 @@ impl InvertedIndexCache { Self { index_metadata, index: index_cache, + page_size, } } } impl InvertedIndexCache { pub fn get_index_metadata(&self, file_id: FileId) -> Option<Arc<InvertedIndexMetas>> { - self.index_metadata.get(&IndexKey { file_id }) + self.index_metadata.get(&IndexMetadataKey { file_id }) } pub fn put_index_metadata(&self, file_id: FileId, metadata: Arc<InvertedIndexMetas>) { - let key = IndexKey { file_id }; + let key = IndexMetadataKey { file_id }; CACHE_BYTES .with_label_values(&[INDEX_METADATA_TYPE]) .add(index_metadata_weight(&key, &metadata).into()); self.index_metadata.insert(key, metadata) } - // todo(hl): align index file content to pages with size like 4096 bytes. - pub fn get_index(&self, key: IndexKey) -> Option<Arc<Vec<u8>>> { - self.index.get(&key) + pub fn get_index(&self, key: &IndexDataPageKey) -> Option<Arc<Vec<u8>>> { + self.index.get(key) } - pub fn put_index(&self, key: IndexKey, value: Arc<Vec<u8>>) { + pub fn put_index(&self, key: IndexDataPageKey, value: Arc<Vec<u8>>) { CACHE_BYTES .with_label_values(&[INDEX_CONTENT_TYPE]) .add(index_content_weight(&key, &value).into()); @@ -201,11 +297,234 @@ impl InvertedIndexCache { } /// Calculates weight for index metadata. -fn index_metadata_weight(k: &IndexKey, v: &Arc<InvertedIndexMetas>) -> u32 { +fn index_metadata_weight(k: &IndexMetadataKey, v: &Arc<InvertedIndexMetas>) -> u32 { (k.file_id.as_bytes().len() + v.encoded_len()) as u32 } /// Calculates weight for index content. -fn index_content_weight(k: &IndexKey, v: &Arc<Vec<u8>>) -> u32 { +fn index_content_weight(k: &IndexDataPageKey, v: &Arc<Vec<u8>>) -> u32 { (k.file_id.as_bytes().len() + v.len()) as u32 } + +/// Prunes the size of the last page based on the indexes. +/// We have following cases: +/// 1. The rest file size is less than the page size, read to the end of the file. +/// 2. Otherwise, read the page size. +fn prune_size(indexes: &[IndexDataPageKey], file_size: u64, page_size: u64) -> u64 { + let last_page_start = indexes.last().map(|i| i.page_id * page_size).unwrap_or(0); + page_size.min(file_size - last_page_start) +} + +#[cfg(test)] +mod test { + use std::num::NonZeroUsize; + + use common_base::BitVec; + use futures::stream; + use index::inverted_index::format::reader::{InvertedIndexBlobReader, InvertedIndexReader}; + use index::inverted_index::format::writer::{InvertedIndexBlobWriter, InvertedIndexWriter}; + use index::inverted_index::Bytes; + use prometheus::register_int_counter_vec; + use rand::{Rng, RngCore}; + + use super::*; + use crate::sst::index::store::InstrumentedStore; + use crate::test_util::TestEnv; + + // Fuzz test for index data page key + #[test] + fn fuzz_index_calculation() { + // randomly generate a large u8 array + let mut rng = rand::thread_rng(); + let mut data = vec![0u8; 1024 * 1024]; + rng.fill_bytes(&mut data); + let file_id = FileId::random(); + + for _ in 0..100 { + let offset = rng.gen_range(0..data.len() as u64); + let size = rng.gen_range(0..data.len() as u32 - offset as u32); + let page_size: usize = rng.gen_range(1..1024); + + let indexes = + IndexDataPageKey::generate_page_keys(file_id, offset, size, page_size as u64); + let page_num = indexes.len(); + let mut read = Vec::with_capacity(size as usize); + let last_index = indexes.len() - 1; + for (i, key) in indexes.into_iter().enumerate() { + let start = key.page_id as usize * page_size; + let page = if start + page_size < data.len() { + &data[start..start + page_size] + } else { + &data[start..] + }; + let range = if i == 0 { + // first page range + IndexDataPageKey::calculate_first_page_range(offset, size, page_size as u64) + } else if i == last_index { + // last page range. when the first page is the last page, the range is not used. + IndexDataPageKey::calculate_last_page_range(offset, size, page_size as u64) + } else { + 0..page_size + }; + read.extend_from_slice(&page[range]); + } + let expected_range = offset as usize..(offset + size as u64 as u64) as usize; + if read != data.get(expected_range).unwrap() { + panic!( + "fuzz_read_index failed, offset: {}, size: {}, page_size: {}\nread len: {}, expected len: {}\nfirst page range: {:?}, last page range: {:?}, page num: {}", + offset, size, page_size, read.len(), size as usize, + IndexDataPageKey::calculate_first_page_range(offset, size, page_size as u64), + IndexDataPageKey::calculate_last_page_range(offset, size, page_size as u64), page_num + ); + } + } + } + + fn unpack(fst_value: u64) -> [u32; 2] { + bytemuck::cast::<u64, [u32; 2]>(fst_value) + } + + async fn create_inverted_index_blob() -> Vec<u8> { + let mut blob = Vec::new(); + let mut writer = InvertedIndexBlobWriter::new(&mut blob); + writer + .add_index( + "tag0".to_string(), + BitVec::from_slice(&[0b0000_0001, 0b0000_0000]), + Box::new(stream::iter(vec![ + Ok((Bytes::from("a"), BitVec::from_slice(&[0b0000_0001]))), + Ok((Bytes::from("b"), BitVec::from_slice(&[0b0010_0000]))), + Ok((Bytes::from("c"), BitVec::from_slice(&[0b0000_0001]))), + ])), + ) + .await + .unwrap(); + writer + .add_index( + "tag1".to_string(), + BitVec::from_slice(&[0b0000_0001, 0b0000_0000]), + Box::new(stream::iter(vec![ + Ok((Bytes::from("x"), BitVec::from_slice(&[0b0000_0001]))), + Ok((Bytes::from("y"), BitVec::from_slice(&[0b0010_0000]))), + Ok((Bytes::from("z"), BitVec::from_slice(&[0b0000_0001]))), + ])), + ) + .await + .unwrap(); + writer + .finish(8, NonZeroUsize::new(1).unwrap()) + .await + .unwrap(); + + blob + } + + #[tokio::test] + async fn test_inverted_index_cache() { + let blob = create_inverted_index_blob().await; + + // Init a test range reader in local fs. + let mut env = TestEnv::new(); + let file_size = blob.len() as u64; + let store = env.init_object_store_manager(); + let temp_path = "data"; + store.write(temp_path, blob).await.unwrap(); + let store = InstrumentedStore::new(store); + let metric = + register_int_counter_vec!("test_bytes", "a counter for test", &["test"]).unwrap(); + let counter = metric.with_label_values(&["test"]); + let range_reader = store + .range_reader("data", &counter, &counter) + .await + .unwrap(); + + let reader = InvertedIndexBlobReader::new(range_reader); + let mut cached_reader = CachedInvertedIndexBlobReader::new( + FileId::random(), + file_size, + reader, + Arc::new(InvertedIndexCache::new(8192, 8192, 50)), + ); + let metadata = cached_reader.metadata().await.unwrap(); + assert_eq!(metadata.total_row_count, 8); + assert_eq!(metadata.segment_row_count, 1); + assert_eq!(metadata.metas.len(), 2); + // tag0 + let tag0 = metadata.metas.get("tag0").unwrap(); + let stats0 = tag0.stats.as_ref().unwrap(); + assert_eq!(stats0.distinct_count, 3); + assert_eq!(stats0.null_count, 1); + assert_eq!(stats0.min_value, Bytes::from("a")); + assert_eq!(stats0.max_value, Bytes::from("c")); + let fst0 = cached_reader + .fst( + tag0.base_offset + tag0.relative_fst_offset as u64, + tag0.fst_size, + ) + .await + .unwrap(); + assert_eq!(fst0.len(), 3); + let [offset, size] = unpack(fst0.get(b"a").unwrap()); + let bitmap = cached_reader + .bitmap(tag0.base_offset + offset as u64, size) + .await + .unwrap(); + assert_eq!(bitmap, BitVec::from_slice(&[0b0000_0001])); + let [offset, size] = unpack(fst0.get(b"b").unwrap()); + let bitmap = cached_reader + .bitmap(tag0.base_offset + offset as u64, size) + .await + .unwrap(); + assert_eq!(bitmap, BitVec::from_slice(&[0b0010_0000])); + let [offset, size] = unpack(fst0.get(b"c").unwrap()); + let bitmap = cached_reader + .bitmap(tag0.base_offset + offset as u64, size) + .await + .unwrap(); + assert_eq!(bitmap, BitVec::from_slice(&[0b0000_0001])); + + // tag1 + let tag1 = metadata.metas.get("tag1").unwrap(); + let stats1 = tag1.stats.as_ref().unwrap(); + assert_eq!(stats1.distinct_count, 3); + assert_eq!(stats1.null_count, 1); + assert_eq!(stats1.min_value, Bytes::from("x")); + assert_eq!(stats1.max_value, Bytes::from("z")); + let fst1 = cached_reader + .fst( + tag1.base_offset + tag1.relative_fst_offset as u64, + tag1.fst_size, + ) + .await + .unwrap(); + assert_eq!(fst1.len(), 3); + let [offset, size] = unpack(fst1.get(b"x").unwrap()); + let bitmap = cached_reader + .bitmap(tag1.base_offset + offset as u64, size) + .await + .unwrap(); + assert_eq!(bitmap, BitVec::from_slice(&[0b0000_0001])); + let [offset, size] = unpack(fst1.get(b"y").unwrap()); + let bitmap = cached_reader + .bitmap(tag1.base_offset + offset as u64, size) + .await + .unwrap(); + assert_eq!(bitmap, BitVec::from_slice(&[0b0010_0000])); + let [offset, size] = unpack(fst1.get(b"z").unwrap()); + let bitmap = cached_reader + .bitmap(tag1.base_offset + offset as u64, size) + .await + .unwrap(); + assert_eq!(bitmap, BitVec::from_slice(&[0b0000_0001])); + + // fuzz test + let mut rng = rand::thread_rng(); + for _ in 0..100 { + let offset = rng.gen_range(0..file_size); + let size = rng.gen_range(0..file_size as u32 - offset as u32); + let expected = cached_reader.range_read(offset, size).await.unwrap(); + let read = cached_reader.get_or_load(offset, size).await.unwrap(); + assert_eq!(read, expected); + } + } +} diff --git a/src/mito2/src/config.rs b/src/mito2/src/config.rs index dda3f4271059..963089c60aed 100644 --- a/src/mito2/src/config.rs +++ b/src/mito2/src/config.rs @@ -416,6 +416,8 @@ pub struct InvertedIndexConfig { pub metadata_cache_size: ReadableSize, /// Cache size for inverted index content. Setting it to 0 to disable the cache. pub content_cache_size: ReadableSize, + /// Page size for inverted index content. + pub content_cache_page_size: ReadableSize, } impl InvertedIndexConfig { @@ -441,6 +443,7 @@ impl Default for InvertedIndexConfig { intermediate_path: String::new(), metadata_cache_size: ReadableSize::mb(64), content_cache_size: ReadableSize::mb(128), + content_cache_page_size: ReadableSize::mb(8), }; if let Some(sys_memory) = common_config::utils::get_sys_total_memory() { diff --git a/src/mito2/src/error.rs b/src/mito2/src/error.rs index d5e47d213657..f6d1dbafeec9 100644 --- a/src/mito2/src/error.rs +++ b/src/mito2/src/error.rs @@ -893,6 +893,14 @@ pub enum Error { #[snafu(implicit)] location: Location, }, + + #[snafu(display("Failed to read file metadata"))] + Metadata { + #[snafu(source)] + error: std::io::Error, + #[snafu(implicit)] + location: Location, + }, } pub type Result<T, E = Error> = std::result::Result<T, E>; @@ -965,7 +973,8 @@ impl ErrorExt for Error { | CreateDir { .. } | ReadDataPart { .. } | CorruptedEntry { .. } - | BuildEntry { .. } => StatusCode::Internal, + | BuildEntry { .. } + | Metadata { .. } => StatusCode::Internal, OpenRegion { source, .. } => source.status_code(), diff --git a/src/mito2/src/sst/index.rs b/src/mito2/src/sst/index.rs index a4f4ab9e446b..1972f3d7abb6 100644 --- a/src/mito2/src/sst/index.rs +++ b/src/mito2/src/sst/index.rs @@ -18,7 +18,7 @@ pub(crate) mod intermediate; pub(crate) mod inverted_index; pub(crate) mod puffin_manager; mod statistics; -mod store; +pub(crate) mod store; use std::num::NonZeroUsize; diff --git a/src/mito2/src/sst/index/inverted_index/applier.rs b/src/mito2/src/sst/index/inverted_index/applier.rs index d060d4bec17b..0542fd7a59ea 100644 --- a/src/mito2/src/sst/index/inverted_index/applier.rs +++ b/src/mito2/src/sst/index/inverted_index/applier.rs @@ -16,6 +16,7 @@ pub mod builder; use std::sync::Arc; +use common_base::range_read::RangeReader; use common_telemetry::warn; use index::inverted_index::format::reader::InvertedIndexBlobReader; use index::inverted_index::search::index_apply::{ @@ -29,7 +30,9 @@ use store_api::storage::RegionId; use crate::cache::file_cache::{FileCacheRef, FileType, IndexKey}; use crate::cache::index::{CachedInvertedIndexBlobReader, InvertedIndexCacheRef}; -use crate::error::{ApplyInvertedIndexSnafu, PuffinBuildReaderSnafu, PuffinReadBlobSnafu, Result}; +use crate::error::{ + ApplyInvertedIndexSnafu, MetadataSnafu, PuffinBuildReaderSnafu, PuffinReadBlobSnafu, Result, +}; use crate::metrics::{INDEX_APPLY_ELAPSED, INDEX_APPLY_MEMORY_USAGE}; use crate::sst::file::FileId; use crate::sst::index::inverted_index::INDEX_BLOB_TYPE; @@ -123,7 +126,7 @@ impl InvertedIndexApplier { index_not_found_strategy: IndexNotFoundStrategy::ReturnEmpty, }; - let blob = match self.cached_blob_reader(file_id).await { + let mut blob = match self.cached_blob_reader(file_id).await { Ok(Some(puffin_reader)) => puffin_reader, other => { if let Err(err) = other { @@ -134,8 +137,14 @@ impl InvertedIndexApplier { }; if let Some(index_cache) = &self.inverted_index_cache { + let file_size = if let Some(file_size) = file_size_hint { + file_size + } else { + blob.metadata().await.context(MetadataSnafu)?.content_length + }; let mut index_reader = CachedInvertedIndexBlobReader::new( file_id, + file_size, InvertedIndexBlobReader::new(blob), index_cache.clone(), ); diff --git a/src/mito2/src/sst/index/inverted_index/creator.rs b/src/mito2/src/sst/index/inverted_index/creator.rs index 43cf54fa2811..15cba55c4437 100644 --- a/src/mito2/src/sst/index/inverted_index/creator.rs +++ b/src/mito2/src/sst/index/inverted_index/creator.rs @@ -448,7 +448,7 @@ mod tests { move |expr| { let _d = &d; - let cache = Arc::new(InvertedIndexCache::new(10, 10)); + let cache = Arc::new(InvertedIndexCache::new(10, 10, 100)); let puffin_metadata_cache = Arc::new(PuffinMetadataCache::new(10, &CACHE_BYTES)); let applier = InvertedIndexApplierBuilder::new( region_dir.clone(), diff --git a/src/mito2/src/test_util.rs b/src/mito2/src/test_util.rs index dec175e76ff6..314e886ba9ca 100644 --- a/src/mito2/src/test_util.rs +++ b/src/mito2/src/test_util.rs @@ -35,8 +35,7 @@ use api::v1::{OpType, Row, Rows, SemanticType}; use common_base::readable_size::ReadableSize; use common_base::Plugins; use common_datasource::compression::CompressionType; -use common_meta::cache::{new_schema_cache, new_table_info_cache, new_table_schema_cache}; -use common_meta::key::schema_name::{SchemaName, SchemaNameValue}; +use common_meta::cache::{new_schema_cache, new_table_schema_cache}; use common_meta::key::{SchemaMetadataManager, SchemaMetadataManagerRef}; use common_meta::kv_backend::memory::MemoryKvBackend; use common_meta::kv_backend::KvBackendRef; @@ -49,7 +48,7 @@ use datatypes::schema::ColumnSchema; use log_store::kafka::log_store::KafkaLogStore; use log_store::raft_engine::log_store::RaftEngineLogStore; use log_store::test_util::log_store_util; -use moka::future::{Cache, CacheBuilder}; +use moka::future::CacheBuilder; use object_store::manager::{ObjectStoreManager, ObjectStoreManagerRef}; use object_store::services::Fs; use object_store::ObjectStore; diff --git a/src/mito2/src/worker.rs b/src/mito2/src/worker.rs index f8ab9c3f4edb..233ab9f056b1 100644 --- a/src/mito2/src/worker.rs +++ b/src/mito2/src/worker.rs @@ -170,6 +170,7 @@ impl WorkerGroup { .selector_result_cache_size(config.selector_result_cache_size.as_bytes()) .index_metadata_size(config.inverted_index.metadata_cache_size.as_bytes()) .index_content_size(config.inverted_index.content_cache_size.as_bytes()) + .index_content_page_size(config.inverted_index.content_cache_page_size.as_bytes()) .puffin_metadata_size(config.index.metadata_cache_size.as_bytes()) .write_cache(write_cache) .build(), diff --git a/tests-integration/tests/http.rs b/tests-integration/tests/http.rs index ab2ec4ea6777..4843b81e9142 100644 --- a/tests-integration/tests/http.rs +++ b/tests-integration/tests/http.rs @@ -946,6 +946,7 @@ create_on_flush = "auto" create_on_compaction = "auto" apply_on_query = "auto" mem_threshold_on_create = "auto" +content_cache_page_size = "8MiB" [region_engine.mito.fulltext_index] create_on_flush = "auto"
refactor
cache inverted index with fixed-size page (#5114)
2e2eacf3b2065a7b9180839d1a4727978becf827
2024-08-12 17:57:11
Weny Xu
feat: add SASL and TLS config for Kafka client (#4536)
false
diff --git a/.github/workflows/develop.yml b/.github/workflows/develop.yml index 8b4e0b8416ad..13bbc41db125 100644 --- a/.github/workflows/develop.yml +++ b/.github/workflows/develop.yml @@ -719,6 +719,7 @@ jobs: GT_MINIO_ENDPOINT_URL: http://127.0.0.1:9000 GT_ETCD_ENDPOINTS: http://127.0.0.1:2379 GT_KAFKA_ENDPOINTS: 127.0.0.1:9092 + GT_KAFKA_SASL_ENDPOINTS: 127.0.0.1:9093 UNITTEST_LOG_DIR: "__unittest_logs" - name: Codecov upload uses: codecov/codecov-action@v4 diff --git a/Cargo.lock b/Cargo.lock index 255d07496839..21993b96d03f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2280,6 +2280,9 @@ dependencies = [ "futures-util", "humantime-serde", "rskafka", + "rustls 0.23.10", + "rustls-native-certs", + "rustls-pemfile 2.1.2", "serde", "serde_json", "serde_with", @@ -2445,6 +2448,15 @@ version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "06ea2b9bc92be3c2baa9334a323ebca2d6f074ff852cd1d7b11064035cd3868f" +[[package]] +name = "core2" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b49ba7ef1ad6107f8824dbe97de947cbaac53c44e7f9756a1fba0d37c1eec505" +dependencies = [ + "memchr", +] + [[package]] name = "cpp_demangle" version = "0.4.3" @@ -8253,7 +8265,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "22505a5c94da8e3b7c2996394d1c933236c4d743e81a410bcca4e6989fc066a4" dependencies = [ "bytes", - "heck 0.4.1", + "heck 0.5.0", "itertools 0.12.1", "log", "multimap", @@ -8274,7 +8286,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5bb182580f71dd070f88d01ce3de9f4da5021db7115d2e1c3605a754153b77c1" dependencies = [ "bytes", - "heck 0.4.1", + "heck 0.5.0", "itertools 0.13.0", "log", "multimap", @@ -9133,11 +9145,27 @@ dependencies = [ "zeroize", ] +[[package]] +name = "rsasl" +version = "2.0.2" +source = "git+https://github.com/wenyxu/rsasl.git?rev=06ebb683d5539c3410de4ce9fa37ff9b97e790a4#06ebb683d5539c3410de4ce9fa37ff9b97e790a4" +dependencies = [ + "base64 0.22.1", + "core2", + "digest", + "hmac", + "pbkdf2", + "rand", + "serde_json", + "sha2", + "stringprep", + "thiserror", +] + [[package]] name = "rskafka" version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "132ecfa3cd9c3825208524a80881f115337762904ad3f0174e87975b2d79162c" +source = "git+https://github.com/WenyXu/rskafka.git?rev=940c6030012c5b746fad819fb72e3325b26e39de#940c6030012c5b746fad819fb72e3325b26e39de" dependencies = [ "async-trait", "bytes", @@ -9150,11 +9178,14 @@ dependencies = [ "parking_lot 0.12.3", "pin-project-lite", "rand", + "rsasl", + "rustls 0.23.10", "snap", "thiserror", "tokio", + "tokio-rustls 0.26.0", "tracing", - "zstd 0.12.4", + "zstd 0.13.1", ] [[package]] @@ -9423,9 +9454,9 @@ dependencies = [ [[package]] name = "rustls-native-certs" -version = "0.7.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f1fb85efa936c42c6d5fc28d2629bb51e4b2f4b8a5211e297d599cc5a093792" +checksum = "a88d6d420651b496bdd98684116959239430022a115c1240e6c3993be0b15fba" dependencies = [ "openssl-probe", "rustls-pemfile 2.1.2", @@ -10127,12 +10158,13 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.117" +version = "1.0.122" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "455182ea6142b14f93f4bc5320a2b31c1f266b66a4a5c858b013302a5d8cbfc3" +checksum = "784b6203951c57ff748476b126ccb5e8e2959a5c19e5c617ab1956be3dbc68da" dependencies = [ "indexmap 2.2.6", "itoa", + "memchr", "ryu", "serde", ] @@ -10565,7 +10597,7 @@ version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "38d1e02fca405f6280643174a50c942219f0bbf4dbf7d480f1dd864d6f211ae5" dependencies = [ - "heck 0.4.1", + "heck 0.5.0", "proc-macro2", "quote", "syn 2.0.66", @@ -11694,18 +11726,18 @@ checksum = "23d434d3f8967a09480fb04132ebe0a3e088c173e6d0ee7897abbdf4eab0f8b9" [[package]] name = "thiserror" -version = "1.0.61" +version = "1.0.63" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c546c80d6be4bc6a00c0f01730c08df82eaa7a7a61f11d656526506112cc1709" +checksum = "c0342370b38b6a11b6cc11d6a805569958d54cfa061a29969c3b5ce2ea405724" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.61" +version = "1.0.63" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46c3384250002a6d5af4d114f2845d37b57521033f30d5c3f46c4d70e1197533" +checksum = "a4558b58466b9ad7ca0f102865eccc95938dca1a74a856f2b57b6629050da261" dependencies = [ "proc-macro2", "quote", diff --git a/Cargo.toml b/Cargo.toml index 2f4258f01baa..32a65dac6e01 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -151,7 +151,10 @@ reqwest = { version = "0.12", default-features = false, features = [ "stream", "multipart", ] } -rskafka = "0.5" +# SCRAM-SHA-512 requires https://github.com/dequbed/rsasl/pull/48, https://github.com/influxdata/rskafka/pull/247 +rskafka = { git = "https://github.com/WenyXu/rskafka.git", rev = "940c6030012c5b746fad819fb72e3325b26e39de", features = [ + "transport-tls", +] } rstest = "0.21" rstest_reuse = "0.7" rust_decimal = "1.33" diff --git a/config/config.md b/config/config.md index 0a7e8290f162..dfd2ab889c6d 100644 --- a/config/config.md +++ b/config/config.md @@ -67,6 +67,11 @@ | `wal.prefill_log_files` | Bool | `false` | Whether to pre-create log files on start up.<br/>**It's only used when the provider is `raft_engine`**. | | `wal.sync_period` | String | `10s` | Duration for fsyncing log files.<br/>**It's only used when the provider is `raft_engine`**. | | `wal.broker_endpoints` | Array | -- | The Kafka broker endpoints.<br/>**It's only used when the provider is `kafka`**. | +| `wal.num_topics` | Integer | `64` | Number of topics to be created upon start.<br/>**It's only used when the provider is `kafka`**. | +| `wal.selector_type` | String | `round_robin` | Topic selector type.<br/>Available selector types:<br/>- `round_robin` (default)<br/>**It's only used when the provider is `kafka`**. | +| `wal.topic_name_prefix` | String | `greptimedb_wal_topic` | A Kafka topic is constructed by concatenating `topic_name_prefix` and `topic_id`.<br/>**It's only used when the provider is `kafka`**. | +| `wal.replication_factor` | Integer | `1` | Expected number of replicas of each partition.<br/>**It's only used when the provider is `kafka`**. | +| `wal.create_topic_timeout` | String | `30s` | Above which a topic creation operation will be cancelled.<br/>**It's only used when the provider is `kafka`**. | | `wal.max_batch_bytes` | String | `1MB` | The max size of a single producer batch.<br/>Warning: Kafka has a default limit of 1MB per message in a topic.<br/>**It's only used when the provider is `kafka`**. | | `wal.consumer_wait_timeout` | String | `100ms` | The consumer wait timeout.<br/>**It's only used when the provider is `kafka`**. | | `wal.backoff_init` | String | `500ms` | The initial backoff delay.<br/>**It's only used when the provider is `kafka`**. | diff --git a/config/datanode.example.toml b/config/datanode.example.toml index 7754542c6238..81cbc4703c4d 100644 --- a/config/datanode.example.toml +++ b/config/datanode.example.toml @@ -187,6 +187,24 @@ backoff_base = 2 ## **It's only used when the provider is `kafka`**. backoff_deadline = "5mins" +# The Kafka SASL configuration. +# **It's only used when the provider is `kafka`**. +# Available SASL mechanisms: +# - `PLAIN` +# - `SCRAM-SHA-256` +# - `SCRAM-SHA-512` +# [wal.sasl] +# type = "SCRAM-SHA-512" +# username = "user_kafka" +# password = "secret" + +# The Kafka TLS configuration. +# **It's only used when the provider is `kafka`**. +# [wal.tls] +# server_ca_cert_path = "/path/to/server_cert" +# client_cert_path = "/path/to/client_cert" +# client_key_path = "/path/to/key" + # Example of using S3 as the storage. # [storage] # type = "S3" diff --git a/config/metasrv.example.toml b/config/metasrv.example.toml index f748d8586d0c..b4ed23b2fe02 100644 --- a/config/metasrv.example.toml +++ b/config/metasrv.example.toml @@ -124,6 +124,24 @@ backoff_base = 2 ## Stop reconnecting if the total wait time reaches the deadline. If this config is missing, the reconnecting won't terminate. backoff_deadline = "5mins" +# The Kafka SASL configuration. +# **It's only used when the provider is `kafka`**. +# Available SASL mechanisms: +# - `PLAIN` +# - `SCRAM-SHA-256` +# - `SCRAM-SHA-512` +# [wal.sasl] +# type = "SCRAM-SHA-512" +# username = "user_kafka" +# password = "secret" + +# The Kafka TLS configuration. +# **It's only used when the provider is `kafka`**. +# [wal.tls] +# server_ca_cert_path = "/path/to/server_cert" +# client_cert_path = "/path/to/client_cert" +# client_key_path = "/path/to/key" + ## The logging options. [logging] ## The directory to store the log files. diff --git a/config/standalone.example.toml b/config/standalone.example.toml index 73775b9fc1c4..43f36373706b 100644 --- a/config/standalone.example.toml +++ b/config/standalone.example.toml @@ -171,6 +171,28 @@ sync_period = "10s" ## **It's only used when the provider is `kafka`**. broker_endpoints = ["127.0.0.1:9092"] +## Number of topics to be created upon start. +## **It's only used when the provider is `kafka`**. +num_topics = 64 + +## Topic selector type. +## Available selector types: +## - `round_robin` (default) +## **It's only used when the provider is `kafka`**. +selector_type = "round_robin" + +## A Kafka topic is constructed by concatenating `topic_name_prefix` and `topic_id`. +## **It's only used when the provider is `kafka`**. +topic_name_prefix = "greptimedb_wal_topic" + +## Expected number of replicas of each partition. +## **It's only used when the provider is `kafka`**. +replication_factor = 1 + +## Above which a topic creation operation will be cancelled. +## **It's only used when the provider is `kafka`**. +create_topic_timeout = "30s" + ## The max size of a single producer batch. ## Warning: Kafka has a default limit of 1MB per message in a topic. ## **It's only used when the provider is `kafka`**. @@ -196,6 +218,24 @@ backoff_base = 2 ## **It's only used when the provider is `kafka`**. backoff_deadline = "5mins" +# The Kafka SASL configuration. +# **It's only used when the provider is `kafka`**. +# Available SASL mechanisms: +# - `PLAIN` +# - `SCRAM-SHA-256` +# - `SCRAM-SHA-512` +# [wal.sasl] +# type = "SCRAM-SHA-512" +# username = "user_kafka" +# password = "secret" + +# The Kafka TLS configuration. +# **It's only used when the provider is `kafka`**. +# [wal.tls] +# server_ca_cert_path = "/path/to/server_cert" +# client_cert_path = "/path/to/client_cert" +# client_key_path = "/path/to/key" + ## Metadata storage options. [metadata_store] ## Kv file size in bytes. diff --git a/src/common/meta/src/error.rs b/src/common/meta/src/error.rs index ccd887345c07..6d436edae03e 100644 --- a/src/common/meta/src/error.rs +++ b/src/common/meta/src/error.rs @@ -499,6 +499,13 @@ pub enum Error { error: rskafka::client::error::Error, }, + #[snafu(display("Failed to create TLS Config"))] + TlsConfig { + #[snafu(implicit)] + location: Location, + source: common_wal::error::Error, + }, + #[snafu(display("Failed to resolve Kafka broker endpoint."))] ResolveKafkaEndpoint { source: common_wal::error::Error }, @@ -714,7 +721,8 @@ impl ErrorExt for Error { | AlterLogicalTablesInvalidArguments { .. } | CreateLogicalTablesInvalidArguments { .. } | MismatchPrefix { .. } - | DelimiterNotFound { .. } => StatusCode::InvalidArguments, + | DelimiterNotFound { .. } + | TlsConfig { .. } => StatusCode::InvalidArguments, FlowNotFound { .. } => StatusCode::FlowNotFound, FlowRouteNotFound { .. } => StatusCode::Unexpected, diff --git a/src/common/meta/src/wal_options_allocator.rs b/src/common/meta/src/wal_options_allocator.rs index 5fb3db6e20eb..ba0c6f407fda 100644 --- a/src/common/meta/src/wal_options_allocator.rs +++ b/src/common/meta/src/wal_options_allocator.rs @@ -123,7 +123,7 @@ pub fn prepare_wal_options( #[cfg(test)] mod tests { - use common_wal::config::kafka::common::KafkaTopicConfig; + use common_wal::config::kafka::common::{KafkaConnectionConfig, KafkaTopicConfig}; use common_wal::config::kafka::MetasrvKafkaConfig; use common_wal::test_util::run_test_with_kafka_wal; @@ -166,7 +166,10 @@ mod tests { ..Default::default() }; let config = MetasrvKafkaConfig { - broker_endpoints, + connection: KafkaConnectionConfig { + broker_endpoints, + ..Default::default() + }, kafka_topic, ..Default::default() }; diff --git a/src/common/meta/src/wal_options_allocator/kafka/topic_manager.rs b/src/common/meta/src/wal_options_allocator/kafka/topic_manager.rs index ec88e37cd14d..060f82d8d71e 100644 --- a/src/common/meta/src/wal_options_allocator/kafka/topic_manager.rs +++ b/src/common/meta/src/wal_options_allocator/kafka/topic_manager.rs @@ -30,7 +30,7 @@ use snafu::{ensure, ResultExt}; use crate::error::{ BuildKafkaClientSnafu, BuildKafkaCtrlClientSnafu, BuildKafkaPartitionClientSnafu, CreateKafkaWalTopicSnafu, DecodeJsonSnafu, EncodeJsonSnafu, InvalidNumTopicsSnafu, - ProduceRecordSnafu, ResolveKafkaEndpointSnafu, Result, + ProduceRecordSnafu, ResolveKafkaEndpointSnafu, Result, TlsConfigSnafu, }; use crate::kv_backend::KvBackendRef; use crate::rpc::store::PutRequest; @@ -117,15 +117,22 @@ impl TopicManager { base: self.config.backoff.base as f64, deadline: self.config.backoff.deadline, }; - let broker_endpoints = common_wal::resolve_to_ipv4(&self.config.broker_endpoints) - .await - .context(ResolveKafkaEndpointSnafu)?; - let client = ClientBuilder::new(broker_endpoints) - .backoff_config(backoff_config) + let broker_endpoints = + common_wal::resolve_to_ipv4(&self.config.connection.broker_endpoints) + .await + .context(ResolveKafkaEndpointSnafu)?; + let mut builder = ClientBuilder::new(broker_endpoints).backoff_config(backoff_config); + if let Some(sasl) = &self.config.connection.sasl { + builder = builder.sasl_config(sasl.config.clone().into_sasl_config()); + }; + if let Some(tls) = &self.config.connection.tls { + builder = builder.tls_config(tls.to_tls_config().await.context(TlsConfigSnafu)?) + }; + let client = builder .build() .await .with_context(|_| BuildKafkaClientSnafu { - broker_endpoints: self.config.broker_endpoints.clone(), + broker_endpoints: self.config.connection.broker_endpoints.clone(), })?; let control_client = client @@ -242,7 +249,7 @@ impl TopicManager { #[cfg(test)] mod tests { - use common_wal::config::kafka::common::KafkaTopicConfig; + use common_wal::config::kafka::common::{KafkaConnectionConfig, KafkaTopicConfig}; use common_wal::test_util::run_test_with_kafka_wal; use super::*; @@ -289,7 +296,10 @@ mod tests { ..Default::default() }; let config = MetasrvKafkaConfig { - broker_endpoints, + connection: KafkaConnectionConfig { + broker_endpoints, + ..Default::default() + }, kafka_topic, ..Default::default() }; diff --git a/src/common/wal/Cargo.toml b/src/common/wal/Cargo.toml index a39baf438f19..0bced0dd38d8 100644 --- a/src/common/wal/Cargo.toml +++ b/src/common/wal/Cargo.toml @@ -18,6 +18,9 @@ common-telemetry.workspace = true futures-util.workspace = true humantime-serde.workspace = true rskafka.workspace = true +rustls = { version = "0.23", default-features = false, features = ["ring", "logging", "std", "tls12"] } +rustls-native-certs = "0.7" +rustls-pemfile = "2.1" serde.workspace = true serde_with.workspace = true snafu.workspace = true diff --git a/src/common/wal/src/config.rs b/src/common/wal/src/config.rs index 6edee1703c81..9bf3280c5a29 100644 --- a/src/common/wal/src/config.rs +++ b/src/common/wal/src/config.rs @@ -23,6 +23,7 @@ use crate::config::raft_engine::RaftEngineConfig; /// Wal configurations for metasrv. #[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)] #[serde(tag = "provider", rename_all = "snake_case")] +#[allow(clippy::large_enum_variant)] pub enum MetasrvWalConfig { #[default] RaftEngine, @@ -48,7 +49,7 @@ impl From<DatanodeWalConfig> for MetasrvWalConfig { match config { DatanodeWalConfig::RaftEngine(_) => Self::RaftEngine, DatanodeWalConfig::Kafka(config) => Self::Kafka(MetasrvKafkaConfig { - broker_endpoints: config.broker_endpoints, + connection: config.connection, backoff: config.backoff, kafka_topic: config.kafka_topic, }), @@ -61,7 +62,7 @@ impl From<MetasrvWalConfig> for DatanodeWalConfig { match config { MetasrvWalConfig::RaftEngine => Self::RaftEngine(RaftEngineConfig::default()), MetasrvWalConfig::Kafka(config) => Self::Kafka(DatanodeKafkaConfig { - broker_endpoints: config.broker_endpoints, + connection: config.connection, backoff: config.backoff, kafka_topic: config.kafka_topic, ..Default::default() @@ -75,6 +76,9 @@ mod tests { use std::time::Duration; use common_base::readable_size::ReadableSize; + use kafka::common::{ + KafkaClientSasl, KafkaClientSaslConfig, KafkaClientTls, KafkaConnectionConfig, + }; use tests::kafka::common::KafkaTopicConfig; use super::*; @@ -144,12 +148,31 @@ mod tests { replication_factor = 1 create_topic_timeout = "30s" topic_name_prefix = "greptimedb_wal_topic" + [tls] + server_ca_cert_path = "/path/to/server.pem" + [sasl] + type = "SCRAM-SHA-512" + username = "hi" + password = "test" "#; // Deserialized to MetasrvWalConfig. let metasrv_wal_config: MetasrvWalConfig = toml::from_str(toml_str).unwrap(); let expected = MetasrvKafkaConfig { - broker_endpoints: vec!["127.0.0.1:9092".to_string()], + connection: KafkaConnectionConfig { + broker_endpoints: vec!["127.0.0.1:9092".to_string()], + sasl: Some(KafkaClientSasl { + config: KafkaClientSaslConfig::ScramSha512 { + username: "hi".to_string(), + password: "test".to_string(), + }, + }), + tls: Some(KafkaClientTls { + server_ca_cert_path: Some("/path/to/server.pem".to_string()), + client_cert_path: None, + client_key_path: None, + }), + }, backoff: BackoffConfig { init: Duration::from_millis(500), max: Duration::from_secs(10), @@ -170,7 +193,20 @@ mod tests { // Deserialized to DatanodeWalConfig. let datanode_wal_config: DatanodeWalConfig = toml::from_str(toml_str).unwrap(); let expected = DatanodeKafkaConfig { - broker_endpoints: vec!["127.0.0.1:9092".to_string()], + connection: KafkaConnectionConfig { + broker_endpoints: vec!["127.0.0.1:9092".to_string()], + sasl: Some(KafkaClientSasl { + config: KafkaClientSaslConfig::ScramSha512 { + username: "hi".to_string(), + password: "test".to_string(), + }, + }), + tls: Some(KafkaClientTls { + server_ca_cert_path: Some("/path/to/server.pem".to_string()), + client_cert_path: None, + client_key_path: None, + }), + }, max_batch_bytes: ReadableSize::mb(1), consumer_wait_timeout: Duration::from_millis(100), backoff: BackoffConfig { diff --git a/src/common/wal/src/config/kafka/common.rs b/src/common/wal/src/config/kafka/common.rs index e61823938546..f68ddfa5d8b2 100644 --- a/src/common/wal/src/config/kafka/common.rs +++ b/src/common/wal/src/config/kafka/common.rs @@ -12,16 +12,22 @@ // See the License for the specific language governing permissions and // limitations under the License. +use std::io::Cursor; +use std::sync::Arc; use std::time::Duration; +use rskafka::client::{Credentials, SaslConfig}; +use rustls::{ClientConfig, RootCertStore}; use serde::{Deserialize, Serialize}; use serde_with::with_prefix; +use snafu::{OptionExt, ResultExt}; -use crate::{TopicSelectorType, TOPIC_NAME_PREFIX}; +use crate::error::{self, Result}; +use crate::{TopicSelectorType, BROKER_ENDPOINT, TOPIC_NAME_PREFIX}; with_prefix!(pub backoff_prefix "backoff_"); -/// Backoff configurations for kafka clients. +/// Backoff configurations for kafka client. #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] #[serde(default)] pub struct BackoffConfig { @@ -49,6 +55,134 @@ impl Default for BackoffConfig { } } +/// The SASL configurations for kafka client. +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +pub struct KafkaClientSasl { + #[serde(flatten)] + pub config: KafkaClientSaslConfig, +} + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +#[serde(tag = "type", rename_all = "SCREAMING-KEBAB-CASE")] +pub enum KafkaClientSaslConfig { + Plain { + username: String, + password: String, + }, + #[serde(rename = "SCRAM-SHA-256")] + ScramSha256 { + username: String, + password: String, + }, + #[serde(rename = "SCRAM-SHA-512")] + ScramSha512 { + username: String, + password: String, + }, +} + +impl KafkaClientSaslConfig { + /// Converts to [`SaslConfig`]. + pub fn into_sasl_config(self) -> SaslConfig { + match self { + KafkaClientSaslConfig::Plain { username, password } => { + SaslConfig::Plain(Credentials::new(username, password)) + } + KafkaClientSaslConfig::ScramSha256 { username, password } => { + SaslConfig::ScramSha256(Credentials::new(username, password)) + } + KafkaClientSaslConfig::ScramSha512 { username, password } => { + SaslConfig::ScramSha512(Credentials::new(username, password)) + } + } + } +} + +/// The TLS configurations for kafka client. +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +pub struct KafkaClientTls { + pub server_ca_cert_path: Option<String>, + pub client_cert_path: Option<String>, + pub client_key_path: Option<String>, +} + +impl KafkaClientTls { + /// Builds the [`ClientConfig`]. + pub async fn to_tls_config(&self) -> Result<Arc<ClientConfig>> { + let builder = ClientConfig::builder(); + let mut roots = RootCertStore::empty(); + + if let Some(server_ca_cert_path) = &self.server_ca_cert_path { + let root_cert_bytes = + tokio::fs::read(&server_ca_cert_path) + .await + .context(error::ReadFileSnafu { + path: server_ca_cert_path, + })?; + let mut cursor = Cursor::new(root_cert_bytes); + for cert in rustls_pemfile::certs(&mut cursor) + .collect::<std::result::Result<Vec<_>, _>>() + .context(error::ReadCertsSnafu { + path: server_ca_cert_path, + })? + { + roots.add(cert).context(error::AddCertSnafu)?; + } + }; + roots.add_parsable_certificates( + rustls_native_certs::load_native_certs().context(error::LoadSystemCertsSnafu)?, + ); + + let builder = builder.with_root_certificates(roots); + let config = if let (Some(cert_path), Some(key_path)) = + (&self.client_cert_path, &self.client_key_path) + { + let cert_bytes = tokio::fs::read(cert_path) + .await + .context(error::ReadFileSnafu { path: cert_path })?; + let client_certs = rustls_pemfile::certs(&mut Cursor::new(cert_bytes)) + .collect::<std::result::Result<Vec<_>, _>>() + .context(error::ReadCertsSnafu { path: cert_path })?; + let key_bytes = tokio::fs::read(key_path) + .await + .context(error::ReadFileSnafu { path: key_path })?; + let client_key = rustls_pemfile::private_key(&mut Cursor::new(key_bytes)) + .context(error::ReadKeySnafu { path: key_path })? + .context(error::KeyNotFoundSnafu { path: key_path })?; + + builder + .with_client_auth_cert(client_certs, client_key) + .context(error::SetClientAuthCertSnafu)? + } else { + builder.with_no_client_auth() + }; + + Ok(Arc::new(config)) + } +} + +/// The connection configurations for kafka clients. +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +#[serde(default)] +pub struct KafkaConnectionConfig { + /// The broker endpoints of the Kafka cluster. + pub broker_endpoints: Vec<String>, + /// Client SASL. + pub sasl: Option<KafkaClientSasl>, + /// Client TLS config + pub tls: Option<KafkaClientTls>, +} + +impl Default for KafkaConnectionConfig { + fn default() -> Self { + Self { + broker_endpoints: vec![BROKER_ENDPOINT.to_string()], + sasl: None, + tls: None, + } + } +} + /// Topic configurations for kafka clients. #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] #[serde(default)] diff --git a/src/common/wal/src/config/kafka/datanode.rs b/src/common/wal/src/config/kafka/datanode.rs index b01e0635f637..a1260c05effd 100644 --- a/src/common/wal/src/config/kafka/datanode.rs +++ b/src/common/wal/src/config/kafka/datanode.rs @@ -17,15 +17,16 @@ use std::time::Duration; use common_base::readable_size::ReadableSize; use serde::{Deserialize, Serialize}; +use super::common::KafkaConnectionConfig; use crate::config::kafka::common::{backoff_prefix, BackoffConfig, KafkaTopicConfig}; -use crate::BROKER_ENDPOINT; /// Kafka wal configurations for datanode. #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] #[serde(default)] pub struct DatanodeKafkaConfig { - /// The broker endpoints of the Kafka cluster. - pub broker_endpoints: Vec<String>, + /// The kafka connection config. + #[serde(flatten)] + pub connection: KafkaConnectionConfig, /// TODO(weny): Remove the alias once we release v0.9. /// The max size of a single producer batch. #[serde(alias = "max_batch_size")] @@ -44,7 +45,7 @@ pub struct DatanodeKafkaConfig { impl Default for DatanodeKafkaConfig { fn default() -> Self { Self { - broker_endpoints: vec![BROKER_ENDPOINT.to_string()], + connection: KafkaConnectionConfig::default(), // Warning: Kafka has a default limit of 1MB per message in a topic. max_batch_bytes: ReadableSize::mb(1), consumer_wait_timeout: Duration::from_millis(100), diff --git a/src/common/wal/src/config/kafka/metasrv.rs b/src/common/wal/src/config/kafka/metasrv.rs index 519992e17579..f61047315cda 100644 --- a/src/common/wal/src/config/kafka/metasrv.rs +++ b/src/common/wal/src/config/kafka/metasrv.rs @@ -14,15 +14,16 @@ use serde::{Deserialize, Serialize}; +use super::common::KafkaConnectionConfig; use crate::config::kafka::common::{backoff_prefix, BackoffConfig, KafkaTopicConfig}; -use crate::BROKER_ENDPOINT; /// Kafka wal configurations for metasrv. -#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)] #[serde(default)] pub struct MetasrvKafkaConfig { - /// The broker endpoints of the Kafka cluster. - pub broker_endpoints: Vec<String>, + /// The kafka connection config. + #[serde(flatten)] + pub connection: KafkaConnectionConfig, /// The backoff config. #[serde(flatten, with = "backoff_prefix")] pub backoff: BackoffConfig, @@ -30,14 +31,3 @@ pub struct MetasrvKafkaConfig { #[serde(flatten)] pub kafka_topic: KafkaTopicConfig, } - -impl Default for MetasrvKafkaConfig { - fn default() -> Self { - let broker_endpoints = vec![BROKER_ENDPOINT.to_string()]; - Self { - broker_endpoints, - backoff: BackoffConfig::default(), - kafka_topic: KafkaTopicConfig::default(), - } - } -} diff --git a/src/common/wal/src/error.rs b/src/common/wal/src/error.rs index 147eeb293da1..d4427c5ba204 100644 --- a/src/common/wal/src/error.rs +++ b/src/common/wal/src/error.rs @@ -13,7 +13,7 @@ // limitations under the License. use common_macro::stack_trace_debug; -use snafu::Snafu; +use snafu::{Location, Snafu}; #[derive(Snafu)] #[snafu(visibility(pub))] @@ -24,10 +24,74 @@ pub enum Error { broker_endpoint: String, #[snafu(source)] error: std::io::Error, + #[snafu(implicit)] + location: Location, }, #[snafu(display("Failed to find ipv4 endpoint: {:?}", broker_endpoint))] - EndpointIPV4NotFound { broker_endpoint: String }, + EndpointIPV4NotFound { + broker_endpoint: String, + #[snafu(implicit)] + location: Location, + }, + + #[snafu(display("Failed to read file, path: {}", path))] + ReadFile { + path: String, + #[snafu(source)] + error: std::io::Error, + #[snafu(implicit)] + location: Location, + }, + + #[snafu(display("Failed to add root cert"))] + AddCert { + #[snafu(source)] + error: rustls::Error, + #[snafu(implicit)] + location: Location, + }, + + #[snafu(display("Failed to read cert, path: {}", path))] + ReadCerts { + path: String, + #[snafu(source)] + error: std::io::Error, + #[snafu(implicit)] + location: Location, + }, + + #[snafu(display("Failed to read key, path: {}", path))] + ReadKey { + path: String, + #[snafu(source)] + error: std::io::Error, + #[snafu(implicit)] + location: Location, + }, + + #[snafu(display("Failed to parse key, path: {}", path))] + KeyNotFound { + path: String, + #[snafu(implicit)] + location: Location, + }, + + #[snafu(display("Failed to set client auth cert"))] + SetClientAuthCert { + #[snafu(source)] + error: rustls::Error, + #[snafu(implicit)] + location: Location, + }, + + #[snafu(display("Failed to load ca certs from system"))] + LoadSystemCerts { + #[snafu(source)] + error: std::io::Error, + #[snafu(implicit)] + location: Location, + }, } pub type Result<T> = std::result::Result<T, Error>; diff --git a/src/common/wal/src/lib.rs b/src/common/wal/src/lib.rs index 086846ab3960..659a045f57eb 100644 --- a/src/common/wal/src/lib.rs +++ b/src/common/wal/src/lib.rs @@ -61,6 +61,9 @@ async fn resolve_to_ipv4_one<T: AsRef<str>>(endpoint: T) -> Result<String> { mod tests { use std::assert_matches::assert_matches; + use common_telemetry::warn; + use rskafka::client::{Credentials, SaslConfig}; + use super::*; use crate::error::Error; @@ -86,4 +89,44 @@ mod tests { let got = resolve_to_ipv4_one(host).await; assert_matches!(got.unwrap_err(), Error::ResolveEndpoint { .. }); } + + #[tokio::test] + async fn test_sasl() { + common_telemetry::init_default_ut_logging(); + let Ok(broker_endpoints) = std::env::var("GT_KAFKA_SASL_ENDPOINTS") else { + warn!("The endpoints is empty, skipping the test 'test_sasl'"); + return; + }; + let broker_endpoints = broker_endpoints + .split(',') + .map(|s| s.trim().to_string()) + .collect::<Vec<_>>(); + + let username = "user_kafka"; + let password = "secret"; + let _ = rskafka::client::ClientBuilder::new(broker_endpoints.clone()) + .sasl_config(SaslConfig::Plain(Credentials::new( + username.to_string(), + password.to_string(), + ))) + .build() + .await + .unwrap(); + let _ = rskafka::client::ClientBuilder::new(broker_endpoints.clone()) + .sasl_config(SaslConfig::ScramSha256(Credentials::new( + username.to_string(), + password.to_string(), + ))) + .build() + .await + .unwrap(); + let _ = rskafka::client::ClientBuilder::new(broker_endpoints) + .sasl_config(SaslConfig::ScramSha512(Credentials::new( + username.to_string(), + password.to_string(), + ))) + .build() + .await + .unwrap(); + } } diff --git a/src/log-store/src/error.rs b/src/log-store/src/error.rs index 4918bdf3567b..222725d06ac7 100644 --- a/src/log-store/src/error.rs +++ b/src/log-store/src/error.rs @@ -27,6 +27,13 @@ use crate::kafka::producer::ProduceRequest; #[snafu(visibility(pub))] #[stack_trace_debug] pub enum Error { + #[snafu(display("Failed to create TLS Config"))] + TlsConfig { + #[snafu(implicit)] + location: Location, + source: common_wal::error::Error, + }, + #[snafu(display("Invalid provider type, expected: {}, actual: {}", expected, actual))] InvalidProvider { #[snafu(implicit)] diff --git a/src/log-store/src/kafka/client_manager.rs b/src/log-store/src/kafka/client_manager.rs index 089f05f008c4..64523e6d0b08 100644 --- a/src/log-store/src/kafka/client_manager.rs +++ b/src/log-store/src/kafka/client_manager.rs @@ -25,7 +25,7 @@ use tokio::sync::{Mutex, RwLock}; use super::producer::OrderedBatchProducer; use crate::error::{ - BuildClientSnafu, BuildPartitionClientSnafu, ResolveKafkaEndpointSnafu, Result, + BuildClientSnafu, BuildPartitionClientSnafu, ResolveKafkaEndpointSnafu, Result, TlsConfigSnafu, }; use crate::kafka::producer::OrderedBatchProducerRef; @@ -80,16 +80,20 @@ impl ClientManager { base: config.backoff.base as f64, deadline: config.backoff.deadline, }; - let broker_endpoints = common_wal::resolve_to_ipv4(&config.broker_endpoints) + let broker_endpoints = common_wal::resolve_to_ipv4(&config.connection.broker_endpoints) .await .context(ResolveKafkaEndpointSnafu)?; - let client = ClientBuilder::new(broker_endpoints) - .backoff_config(backoff_config) - .build() - .await - .with_context(|_| BuildClientSnafu { - broker_endpoints: config.broker_endpoints.clone(), - })?; + let mut builder = ClientBuilder::new(broker_endpoints).backoff_config(backoff_config); + if let Some(sasl) = &config.connection.sasl { + builder = builder.sasl_config(sasl.config.clone().into_sasl_config()); + }; + if let Some(tls) = &config.connection.tls { + builder = builder.tls_config(tls.to_tls_config().await.context(TlsConfigSnafu)?) + }; + + let client = builder.build().await.with_context(|_| BuildClientSnafu { + broker_endpoints: config.connection.broker_endpoints.clone(), + })?; Ok(Self { client, @@ -161,6 +165,7 @@ impl ClientManager { #[cfg(test)] mod tests { + use common_wal::config::kafka::common::KafkaConnectionConfig; use common_wal::test_util::run_test_with_kafka_wal; use tokio::sync::Barrier; @@ -206,7 +211,10 @@ mod tests { .await; let config = DatanodeKafkaConfig { - broker_endpoints, + connection: KafkaConnectionConfig { + broker_endpoints, + ..Default::default() + }, ..Default::default() }; let manager = ClientManager::try_new(&config).await.unwrap(); diff --git a/src/log-store/src/kafka/log_store.rs b/src/log-store/src/kafka/log_store.rs index 19518575315e..23fb19461789 100644 --- a/src/log-store/src/kafka/log_store.rs +++ b/src/log-store/src/kafka/log_store.rs @@ -360,6 +360,7 @@ mod tests { use common_base::readable_size::ReadableSize; use common_telemetry::info; use common_telemetry::tracing::warn; + use common_wal::config::kafka::common::KafkaConnectionConfig; use common_wal::config::kafka::DatanodeKafkaConfig; use futures::TryStreamExt; use rand::prelude::SliceRandom; @@ -461,7 +462,10 @@ mod tests { .map(|s| s.trim().to_string()) .collect::<Vec<_>>(); let config = DatanodeKafkaConfig { - broker_endpoints, + connection: KafkaConnectionConfig { + broker_endpoints, + ..Default::default() + }, max_batch_bytes: ReadableSize::kb(32), ..Default::default() }; @@ -530,7 +534,10 @@ mod tests { .map(|s| s.trim().to_string()) .collect::<Vec<_>>(); let config = DatanodeKafkaConfig { - broker_endpoints, + connection: KafkaConnectionConfig { + broker_endpoints, + ..Default::default() + }, max_batch_bytes: ReadableSize::kb(8), ..Default::default() }; diff --git a/src/log-store/src/test_util/log_store_util.rs b/src/log-store/src/test_util/log_store_util.rs index dacdf5088227..f78b5a965d0c 100644 --- a/src/log-store/src/test_util/log_store_util.rs +++ b/src/log-store/src/test_util/log_store_util.rs @@ -15,6 +15,7 @@ use std::path::Path; use common_base::readable_size::ReadableSize; +use common_wal::config::kafka::common::KafkaConnectionConfig; use common_wal::config::kafka::DatanodeKafkaConfig; use common_wal::config::raft_engine::RaftEngineConfig; @@ -34,7 +35,10 @@ pub async fn create_tmp_local_file_log_store<P: AsRef<Path>>(path: P) -> RaftEng /// Create a [KafkaLogStore]. pub async fn create_kafka_log_store(broker_endpoints: Vec<String>) -> KafkaLogStore { KafkaLogStore::try_new(&DatanodeKafkaConfig { - broker_endpoints, + connection: KafkaConnectionConfig { + broker_endpoints, + ..Default::default() + }, ..Default::default() }) .await diff --git a/tests-integration/fixtures/kafka/docker-compose-standalone.yml b/tests-integration/fixtures/kafka/docker-compose-standalone.yml index 9c257418a5d8..4208fe3f67ed 100644 --- a/tests-integration/fixtures/kafka/docker-compose-standalone.yml +++ b/tests-integration/fixtures/kafka/docker-compose-standalone.yml @@ -1,21 +1,28 @@ version: '3.8' services: + zookeeper: + image: bitnami/zookeeper:3.7 + ports: + - '2181:2181' + environment: + - ALLOW_ANONYMOUS_LOGIN=yes kafka: image: bitnami/kafka:3.6.0 container_name: kafka ports: - 9092:9092 + - 9093:9093 environment: # KRaft settings - KAFKA_KRAFT_CLUSTER_ID: Kmp-xkTnSf-WWXhWmiorDg - KAFKA_ENABLE_KRAFT: "yes" KAFKA_CFG_NODE_ID: "1" KAFKA_CFG_PROCESS_ROLES: broker,controller KAFKA_CFG_CONTROLLER_QUORUM_VOTERS: [email protected]:2181 # Listeners - KAFKA_CFG_ADVERTISED_LISTENERS: PLAINTEXT://127.0.0.1:9092 + KAFKA_CFG_ADVERTISED_LISTENERS: PLAINTEXT://127.0.0.1:9092,SECURE://localhost:9093 KAFKA_CFG_CONTROLLER_LISTENER_NAMES: CONTROLLER - KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP: CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT - KAFKA_CFG_LISTENERS: PLAINTEXT://:9092,CONTROLLER://:2181 + KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP: CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT,SECURE:SASL_PLAINTEXT + KAFKA_CFG_LISTENERS: PLAINTEXT://:9092,CONTROLLER://:2181,SECURE://:9093 ALLOW_PLAINTEXT_LISTENER: "yes" KAFKA_BROKER_ID: "1" + KAFKA_CLIENT_USERS: "user_kafka" + KAFKA_CLIENT_PASSWORDS: "secret" diff --git a/tests-integration/src/tests/test_util.rs b/tests-integration/src/tests/test_util.rs index 491a93086953..0f93766ec7e9 100644 --- a/tests-integration/src/tests/test_util.rs +++ b/tests-integration/src/tests/test_util.rs @@ -21,7 +21,7 @@ use common_query::Output; use common_recordbatch::util; use common_telemetry::warn; use common_test_util::find_workspace_path; -use common_wal::config::kafka::common::KafkaTopicConfig; +use common_wal::config::kafka::common::{KafkaConnectionConfig, KafkaTopicConfig}; use common_wal::config::kafka::{DatanodeKafkaConfig, MetasrvKafkaConfig}; use common_wal::config::{DatanodeWalConfig, MetasrvWalConfig}; use frontend::instance::Instance; @@ -227,11 +227,17 @@ pub(crate) async fn standalone_with_kafka_wal() -> Option<Box<dyn RebuildableMoc let test_name = uuid::Uuid::new_v4().to_string(); let builder = GreptimeDbStandaloneBuilder::new(&test_name) .with_datanode_wal_config(DatanodeWalConfig::Kafka(DatanodeKafkaConfig { - broker_endpoints: endpoints.clone(), + connection: KafkaConnectionConfig { + broker_endpoints: endpoints.clone(), + ..Default::default() + }, ..Default::default() })) .with_metasrv_wal_config(MetasrvWalConfig::Kafka(MetasrvKafkaConfig { - broker_endpoints: endpoints, + connection: KafkaConnectionConfig { + broker_endpoints: endpoints, + ..Default::default() + }, kafka_topic: KafkaTopicConfig { topic_name_prefix: test_name.to_string(), num_topics: 3, @@ -260,11 +266,17 @@ pub(crate) async fn distributed_with_kafka_wal() -> Option<Box<dyn RebuildableMo let builder = GreptimeDbClusterBuilder::new(&test_name) .await .with_datanode_wal_config(DatanodeWalConfig::Kafka(DatanodeKafkaConfig { - broker_endpoints: endpoints.clone(), + connection: KafkaConnectionConfig { + broker_endpoints: endpoints.clone(), + ..Default::default() + }, ..Default::default() })) .with_metasrv_wal_config(MetasrvWalConfig::Kafka(MetasrvKafkaConfig { - broker_endpoints: endpoints, + connection: KafkaConnectionConfig { + broker_endpoints: endpoints, + ..Default::default() + }, kafka_topic: KafkaTopicConfig { topic_name_prefix: test_name.to_string(), num_topics: 3, diff --git a/tests-integration/tests/region_migration.rs b/tests-integration/tests/region_migration.rs index 73316685c60d..056aa2ab7fde 100644 --- a/tests-integration/tests/region_migration.rs +++ b/tests-integration/tests/region_migration.rs @@ -23,7 +23,7 @@ use common_recordbatch::RecordBatches; use common_telemetry::info; use common_test_util::recordbatch::check_output_stream; use common_test_util::temp_dir::create_temp_dir; -use common_wal::config::kafka::common::KafkaTopicConfig; +use common_wal::config::kafka::common::{KafkaConnectionConfig, KafkaTopicConfig}; use common_wal::config::kafka::{DatanodeKafkaConfig, MetasrvKafkaConfig}; use common_wal::config::{DatanodeWalConfig, MetasrvWalConfig}; use datatypes::prelude::ScalarVector; @@ -114,11 +114,17 @@ pub async fn test_region_migration(store_type: StorageType, endpoints: Vec<Strin .with_datanodes(datanodes as u32) .with_store_config(store_config) .with_datanode_wal_config(DatanodeWalConfig::Kafka(DatanodeKafkaConfig { - broker_endpoints: endpoints.clone(), + connection: KafkaConnectionConfig { + broker_endpoints: endpoints.clone(), + ..Default::default() + }, ..Default::default() })) .with_metasrv_wal_config(MetasrvWalConfig::Kafka(MetasrvKafkaConfig { - broker_endpoints: endpoints, + connection: KafkaConnectionConfig { + broker_endpoints: endpoints, + ..Default::default() + }, kafka_topic: KafkaTopicConfig { num_topics: 3, topic_name_prefix: Uuid::new_v4().to_string(), @@ -246,11 +252,17 @@ pub async fn test_metric_table_region_migration_by_sql( .with_datanodes(datanodes as u32) .with_store_config(store_config) .with_datanode_wal_config(DatanodeWalConfig::Kafka(DatanodeKafkaConfig { - broker_endpoints: endpoints.clone(), + connection: KafkaConnectionConfig { + broker_endpoints: endpoints.clone(), + ..Default::default() + }, ..Default::default() })) .with_metasrv_wal_config(MetasrvWalConfig::Kafka(MetasrvKafkaConfig { - broker_endpoints: endpoints, + connection: KafkaConnectionConfig { + broker_endpoints: endpoints, + ..Default::default() + }, kafka_topic: KafkaTopicConfig { num_topics: 3, topic_name_prefix: Uuid::new_v4().to_string(), @@ -371,11 +383,17 @@ pub async fn test_region_migration_by_sql(store_type: StorageType, endpoints: Ve .with_datanodes(datanodes as u32) .with_store_config(store_config) .with_datanode_wal_config(DatanodeWalConfig::Kafka(DatanodeKafkaConfig { - broker_endpoints: endpoints.clone(), + connection: KafkaConnectionConfig { + broker_endpoints: endpoints.clone(), + ..Default::default() + }, ..Default::default() })) .with_metasrv_wal_config(MetasrvWalConfig::Kafka(MetasrvKafkaConfig { - broker_endpoints: endpoints, + connection: KafkaConnectionConfig { + broker_endpoints: endpoints, + ..Default::default() + }, kafka_topic: KafkaTopicConfig { num_topics: 3, topic_name_prefix: Uuid::new_v4().to_string(), @@ -495,11 +513,17 @@ pub async fn test_region_migration_multiple_regions( .with_datanodes(datanodes as u32) .with_store_config(store_config) .with_datanode_wal_config(DatanodeWalConfig::Kafka(DatanodeKafkaConfig { - broker_endpoints: endpoints.clone(), + connection: KafkaConnectionConfig { + broker_endpoints: endpoints.clone(), + ..Default::default() + }, ..Default::default() })) .with_metasrv_wal_config(MetasrvWalConfig::Kafka(MetasrvKafkaConfig { - broker_endpoints: endpoints, + connection: KafkaConnectionConfig { + broker_endpoints: endpoints, + ..Default::default() + }, kafka_topic: KafkaTopicConfig { num_topics: 3, topic_name_prefix: Uuid::new_v4().to_string(), @@ -634,11 +658,17 @@ pub async fn test_region_migration_all_regions(store_type: StorageType, endpoint .with_datanodes(datanodes as u32) .with_store_config(store_config) .with_datanode_wal_config(DatanodeWalConfig::Kafka(DatanodeKafkaConfig { - broker_endpoints: endpoints.clone(), + connection: KafkaConnectionConfig { + broker_endpoints: endpoints.clone(), + ..Default::default() + }, ..Default::default() })) .with_metasrv_wal_config(MetasrvWalConfig::Kafka(MetasrvKafkaConfig { - broker_endpoints: endpoints, + connection: KafkaConnectionConfig { + broker_endpoints: endpoints, + ..Default::default() + }, kafka_topic: KafkaTopicConfig { num_topics: 3, topic_name_prefix: Uuid::new_v4().to_string(), @@ -768,11 +798,17 @@ pub async fn test_region_migration_incorrect_from_peer( .with_datanodes(datanodes as u32) .with_store_config(store_config) .with_datanode_wal_config(DatanodeWalConfig::Kafka(DatanodeKafkaConfig { - broker_endpoints: endpoints.clone(), + connection: KafkaConnectionConfig { + broker_endpoints: endpoints.clone(), + ..Default::default() + }, ..Default::default() })) .with_metasrv_wal_config(MetasrvWalConfig::Kafka(MetasrvKafkaConfig { - broker_endpoints: endpoints, + connection: KafkaConnectionConfig { + broker_endpoints: endpoints, + ..Default::default() + }, kafka_topic: KafkaTopicConfig { num_topics: 3, topic_name_prefix: Uuid::new_v4().to_string(), @@ -845,11 +881,17 @@ pub async fn test_region_migration_incorrect_region_id( .with_datanodes(datanodes as u32) .with_store_config(store_config) .with_datanode_wal_config(DatanodeWalConfig::Kafka(DatanodeKafkaConfig { - broker_endpoints: endpoints.clone(), + connection: KafkaConnectionConfig { + broker_endpoints: endpoints.clone(), + ..Default::default() + }, ..Default::default() })) .with_metasrv_wal_config(MetasrvWalConfig::Kafka(MetasrvKafkaConfig { - broker_endpoints: endpoints, + connection: KafkaConnectionConfig { + broker_endpoints: endpoints, + ..Default::default() + }, kafka_topic: KafkaTopicConfig { num_topics: 3, topic_name_prefix: Uuid::new_v4().to_string(),
feat
add SASL and TLS config for Kafka client (#4536)
bbbeaa709bfeda73f1da44e8971c19262fcfdf0e
2023-08-02 12:03:10
Ruihang Xia
fix(deps): update greptime-proto rev to the one after merge (#2063)
false
diff --git a/Cargo.toml b/Cargo.toml index fcae106b3930..b9af4a696a05 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -74,7 +74,7 @@ datafusion-substrait = { git = "https://github.com/waynexia/arrow-datafusion.git derive_builder = "0.12" futures = "0.3" futures-util = "0.3" -greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "3d8ac534a0c8fd1c6ec66d129345b44c95665ebc" } +greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "eeae2d0dfa8ee320a7b9e987b4631a6c1c732ebd" } itertools = "0.10" lazy_static = "1.4" once_cell = "1.18"
fix
update greptime-proto rev to the one after merge (#2063)
6f07d6915515a2b7634dc2b2210c9037a1324114
2024-01-15 14:38:07
Zhenchi
feat(mito): enable inverted index (#3158)
false
diff --git a/config/datanode.example.toml b/config/datanode.example.toml index 6ed277cf1856..62502cabe53c 100644 --- a/config/datanode.example.toml +++ b/config/datanode.example.toml @@ -116,6 +116,25 @@ parallel_scan_channel_size = 32 # Whether to allow stale WAL entries read during replay. allow_stale_entries = false +[region_engine.mito.inverted_index] +# Whether to create the index on flush. +# - "auto": automatically +# - "disable": never +create_on_flush = "auto" +# Whether to create the index on compaction. +# - "auto": automatically +# - "disable": never +create_on_compaction = "auto" +# Whether to apply the index on query +# - "auto": automatically +# - "disable": never +apply_on_query = "auto" +# Memory threshold for performing an external sort during index creation. +# Setting to empty will disable external sorting, forcing all sorting operations to happen in memory. +mem_threshold_on_create = "64MB" +# File system path to store intermediate files for external sorting (default `{data_home}/index_intermediate`). +intermediate_path = "" + # Log options, see `standalone.example.toml` # [logging] # dir = "/tmp/greptimedb/logs" diff --git a/config/standalone.example.toml b/config/standalone.example.toml index 565c94cdb019..a49ffa835e9a 100644 --- a/config/standalone.example.toml +++ b/config/standalone.example.toml @@ -216,6 +216,25 @@ parallel_scan_channel_size = 32 # Whether to allow stale WAL entries read during replay. allow_stale_entries = false +[region_engine.mito.inverted_index] +# Whether to create the index on flush. +# - "auto": automatically +# - "disable": never +create_on_flush = "auto" +# Whether to create the index on compaction. +# - "auto": automatically +# - "disable": never +create_on_compaction = "auto" +# Whether to apply the index on query +# - "auto": automatically +# - "disable": never +apply_on_query = "auto" +# Memory threshold for performing an external sort during index creation. +# Setting to empty will disable external sorting, forcing all sorting operations to happen in memory. +mem_threshold_on_create = "64M" +# File system path to store intermediate files for external sorting (default `{data_home}/index_intermediate`). +intermediate_path = "" + # Log options # [logging] # Specify logs directory. diff --git a/src/datanode/src/config.rs b/src/datanode/src/config.rs index 9f469f0f484b..ac8d3a48b607 100644 --- a/src/datanode/src/config.rs +++ b/src/datanode/src/config.rs @@ -284,6 +284,7 @@ impl DatanodeOptions { } } +#[allow(clippy::large_enum_variant)] #[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)] pub enum RegionEngineConfig { #[serde(rename = "mito")] diff --git a/src/datanode/src/datanode.rs b/src/datanode/src/datanode.rs index 2d40ad81245e..644460130992 100644 --- a/src/datanode/src/datanode.rs +++ b/src/datanode/src/datanode.rs @@ -41,7 +41,7 @@ use metric_engine::engine::MetricEngine; use mito2::config::MitoConfig; use mito2::engine::MitoEngine; use object_store::manager::{ObjectStoreManager, ObjectStoreManagerRef}; -use object_store::util::{join_dir, normalize_dir}; +use object_store::util::normalize_dir; use query::QueryEngineFactory; use servers::export_metrics::ExportMetricsTask; use servers::server::{start_server, ServerHandlers}; @@ -374,19 +374,11 @@ impl DatanodeBuilder { async fn build_mito_engine( opts: &DatanodeOptions, object_store_manager: ObjectStoreManagerRef, - mut config: MitoConfig, + config: MitoConfig, ) -> Result<MitoEngine> { - // Sets write cache path if it is empty. - if config.experimental_write_cache_path.is_empty() { - config.experimental_write_cache_path = join_dir(&opts.storage.data_home, "write_cache"); - info!( - "Sets write cache path to {}", - config.experimental_write_cache_path - ); - } - let mito_engine = match &opts.wal { WalConfig::RaftEngine(raft_engine_config) => MitoEngine::new( + &opts.storage.data_home, config, Self::build_raft_engine_log_store(&opts.storage.data_home, raft_engine_config) .await?, @@ -394,7 +386,9 @@ impl DatanodeBuilder { ) .await .context(BuildMitoEngineSnafu)?, + WalConfig::Kafka(kafka_config) => MitoEngine::new( + &opts.storage.data_home, config, Self::build_kafka_log_store(kafka_config).await?, object_store_manager, diff --git a/src/index/src/inverted_index/create.rs b/src/index/src/inverted_index/create.rs index e17f987b5c67..15674d696cd6 100644 --- a/src/index/src/inverted_index/create.rs +++ b/src/index/src/inverted_index/create.rs @@ -23,7 +23,7 @@ use crate::inverted_index::BytesRef; /// `InvertedIndexCreator` provides functionality to construct an inverted index #[async_trait] -pub trait InvertedIndexCreator { +pub trait InvertedIndexCreator: Send { /// Adds a value to the named index. A `None` value represents an absence of data (null) /// /// - `index_name`: Identifier for the index being built diff --git a/src/index/src/lib.rs b/src/index/src/lib.rs index 296efb315d09..e7f448c398ef 100644 --- a/src/index/src/lib.rs +++ b/src/index/src/lib.rs @@ -13,7 +13,5 @@ // limitations under the License. #![feature(iter_partition_in_place)] -// TODO(zhongzc): remove once further code is added -#![allow(dead_code)] pub mod inverted_index; diff --git a/src/mito2/src/access_layer.rs b/src/mito2/src/access_layer.rs index ecbeea2f0b0b..dddf5e7b3f72 100644 --- a/src/mito2/src/access_layer.rs +++ b/src/mito2/src/access_layer.rs @@ -25,6 +25,8 @@ use crate::cache::CacheManagerRef; use crate::error::{CleanDirSnafu, DeleteIndexSnafu, DeleteSstSnafu, OpenDalSnafu, Result}; use crate::read::Source; use crate::sst::file::{FileHandle, FileId, FileMeta}; +use crate::sst::index::intermediate::IntermediateManager; +use crate::sst::index::IndexerBuilder; use crate::sst::location; use crate::sst::parquet::reader::ParquetReaderBuilder; use crate::sst::parquet::writer::ParquetWriter; @@ -37,6 +39,8 @@ pub struct AccessLayer { region_dir: String, /// Target object store. object_store: ObjectStore, + /// Intermediate manager for inverted index. + intermediate_manager: IntermediateManager, } impl std::fmt::Debug for AccessLayer { @@ -49,10 +53,15 @@ impl std::fmt::Debug for AccessLayer { impl AccessLayer { /// Returns a new [AccessLayer] for specific `region_dir`. - pub fn new(region_dir: impl Into<String>, object_store: ObjectStore) -> AccessLayer { + pub fn new( + region_dir: impl Into<String>, + object_store: ObjectStore, + intermediate_manager: IntermediateManager, + ) -> AccessLayer { AccessLayer { region_dir: region_dir.into(), object_store, + intermediate_manager, } } @@ -105,16 +114,15 @@ impl AccessLayer { let file_path = location::sst_file_path(&self.region_dir, request.file_id); let index_file_path = location::index_file_path(&self.region_dir, request.file_id); let region_id = request.metadata.region_id; + let file_id = request.file_id; + let cache_manager = request.cache_manager.clone(); - let sst_info = if let Some(write_cache) = request.cache_manager.write_cache() { + let sst_info = if let Some(write_cache) = cache_manager.write_cache() { // Write to the write cache. write_cache .write_and_upload_sst( + request, SstUploadRequest { - file_id: request.file_id, - metadata: request.metadata, - source: request.source, - storage: request.storage, upload_path: file_path, index_upload_path: index_file_path, remote_store: self.object_store.clone(), @@ -124,19 +132,30 @@ impl AccessLayer { .await? } else { // Write cache is disabled. - let mut writer = - ParquetWriter::new(file_path, request.metadata, self.object_store.clone()); + let indexer = IndexerBuilder { + create_inverted_index: request.create_inverted_index, + mem_threshold_index_create: request.mem_threshold_index_create, + file_id, + file_path: index_file_path, + metadata: &request.metadata, + row_group_size: write_opts.row_group_size, + object_store: self.object_store.clone(), + intermediate_manager: self.intermediate_manager.clone(), + } + .build(); + let mut writer = ParquetWriter::new( + file_path, + request.metadata, + self.object_store.clone(), + indexer, + ); writer.write_all(request.source, write_opts).await? }; // Put parquet metadata to cache manager. if let Some(sst_info) = &sst_info { if let Some(parquet_metadata) = &sst_info.file_metadata { - request.cache_manager.put_parquet_meta_data( - region_id, - request.file_id, - parquet_metadata.clone(), - ) + cache_manager.put_parquet_meta_data(region_id, file_id, parquet_metadata.clone()) } } @@ -150,7 +169,12 @@ pub(crate) struct SstWriteRequest { pub(crate) metadata: RegionMetadataRef, pub(crate) source: Source, pub(crate) cache_manager: CacheManagerRef, + #[allow(dead_code)] pub(crate) storage: Option<String>, + /// Whether to create inverted index. + pub(crate) create_inverted_index: bool, + /// The threshold of memory size to create inverted index. + pub(crate) mem_threshold_index_create: Option<usize>, } /// Creates a fs object store with atomic write dir. diff --git a/src/mito2/src/cache/write_cache.rs b/src/mito2/src/cache/write_cache.rs index 5871853b99f6..7a23cda47180 100644 --- a/src/mito2/src/cache/write_cache.rs +++ b/src/mito2/src/cache/write_cache.rs @@ -27,12 +27,14 @@ use snafu::ResultExt; use store_api::metadata::RegionMetadataRef; use store_api::storage::RegionId; -use crate::access_layer::new_fs_object_store; +use crate::access_layer::{new_fs_object_store, SstWriteRequest}; use crate::cache::file_cache::{FileCache, FileCacheRef, FileType, IndexKey, IndexValue}; use crate::error::{self, Result}; use crate::metrics::{FLUSH_ELAPSED, UPLOAD_BYTES_TOTAL}; use crate::read::Source; use crate::sst::file::FileId; +use crate::sst::index::intermediate::IntermediateManager; +use crate::sst::index::{Indexer, IndexerBuilder}; use crate::sst::parquet::writer::ParquetWriter; use crate::sst::parquet::{SstInfo, WriteOptions}; use crate::sst::DEFAULT_WRITE_BUFFER_SIZE; @@ -45,6 +47,8 @@ pub struct WriteCache { file_cache: FileCacheRef, /// Object store manager. object_store_manager: ObjectStoreManagerRef, + /// Intermediate manager for inverted index. + intermediate_manager: IntermediateManager, } pub type WriteCacheRef = Arc<WriteCache>; @@ -56,6 +60,7 @@ impl WriteCache { local_store: ObjectStore, object_store_manager: ObjectStoreManagerRef, cache_capacity: ReadableSize, + intermediate_manager: IntermediateManager, ) -> Result<Self> { let file_cache = FileCache::new(local_store, cache_capacity); file_cache.recover().await?; @@ -63,6 +68,7 @@ impl WriteCache { Ok(Self { file_cache: Arc::new(file_cache), object_store_manager, + intermediate_manager, }) } @@ -71,11 +77,18 @@ impl WriteCache { cache_dir: &str, object_store_manager: ObjectStoreManagerRef, cache_capacity: ReadableSize, + intermediate_manager: IntermediateManager, ) -> Result<Self> { info!("Init write cache on {cache_dir}, capacity: {cache_capacity}"); let local_store = new_fs_object_store(cache_dir).await?; - Self::new(local_store, object_store_manager, cache_capacity).await + Self::new( + local_store, + object_store_manager, + cache_capacity, + intermediate_manager, + ) + .await } /// Returns the file cache of the write cache. @@ -84,27 +97,42 @@ impl WriteCache { } /// Writes SST to the cache and then uploads it to the remote object store. - pub async fn write_and_upload_sst( + pub(crate) async fn write_and_upload_sst( &self, - request: SstUploadRequest, + write_request: SstWriteRequest, + upload_request: SstUploadRequest, write_opts: &WriteOptions, ) -> Result<Option<SstInfo>> { let timer = FLUSH_ELAPSED .with_label_values(&["write_sst"]) .start_timer(); - let region_id = request.metadata.region_id; - let file_id = request.file_id; + let region_id = write_request.metadata.region_id; + let file_id = write_request.file_id; let parquet_key = IndexKey::new(region_id, file_id, FileType::Parquet); + let puffin_key = IndexKey::new(region_id, file_id, FileType::Puffin); + + let indexer = IndexerBuilder { + create_inverted_index: write_request.create_inverted_index, + mem_threshold_index_create: write_request.mem_threshold_index_create, + file_id, + file_path: self.file_cache.cache_file_path(puffin_key), + metadata: &write_request.metadata, + row_group_size: write_opts.row_group_size, + object_store: self.file_cache.local_store(), + intermediate_manager: self.intermediate_manager.clone(), + } + .build(); // Write to FileCache. let mut writer = ParquetWriter::new( self.file_cache.cache_file_path(parquet_key), - request.metadata, + write_request.metadata, self.file_cache.local_store(), + indexer, ); - let sst_info = writer.write_all(request.source, write_opts).await?; + let sst_info = writer.write_all(write_request.source, write_opts).await?; timer.stop_and_record(); @@ -114,13 +142,13 @@ impl WriteCache { return Ok(None); }; - let parquet_path = &request.upload_path; - let remote_store = &request.remote_store; + let parquet_path = &upload_request.upload_path; + let remote_store = &upload_request.remote_store; self.upload(parquet_key, parquet_path, remote_store).await?; if sst_info.inverted_index_available { let puffin_key = IndexKey::new(region_id, file_id, FileType::Puffin); - let puffin_path = &request.index_upload_path; + let puffin_path = &upload_request.index_upload_path; self.upload(puffin_key, puffin_path, remote_store).await?; } @@ -193,10 +221,6 @@ impl WriteCache { /// Request to write and upload a SST. pub struct SstUploadRequest { - pub file_id: FileId, - pub metadata: RegionMetadataRef, - pub source: Source, - pub storage: Option<String>, /// Path to upload the file. pub upload_path: String, /// Path to upload the index file. @@ -212,6 +236,7 @@ mod tests { use common_test_util::temp_dir::create_temp_dir; use object_store::manager::ObjectStoreManager; use object_store::services::Fs; + use object_store::util::join_dir; use object_store::ObjectStore; use store_api::storage::RegionId; @@ -230,10 +255,14 @@ mod tests { // TODO(QuenKar): maybe find a way to create some object server for testing, // and now just use local file system to mock. let mut env = TestEnv::new(); + let data_home = env.data_home().display().to_string(); let mock_store = env.init_object_store_manager(); let file_id = FileId::random(); let upload_path = sst_file_path("test", file_id); let index_upload_path = index_file_path("test", file_id); + let intm_mgr = IntermediateManager::init_fs(join_dir(&data_home, "intm")) + .await + .unwrap(); // Create WriteCache let local_dir = create_temp_dir(""); @@ -243,6 +272,7 @@ mod tests { local_store.clone(), object_store_manager, ReadableSize::mb(10), + intm_mgr, ) .await .unwrap(); @@ -256,13 +286,19 @@ mod tests { new_batch_by_range(&["b", "h"], 100, 200), ]); - let request = SstUploadRequest { + let write_request = SstWriteRequest { file_id, metadata, source, storage: None, + create_inverted_index: true, + mem_threshold_index_create: None, + cache_manager: Default::default(), + }; + + let request = SstUploadRequest { upload_path: upload_path.clone(), - index_upload_path, + index_upload_path: index_upload_path.clone(), remote_store: mock_store.clone(), }; @@ -273,7 +309,7 @@ mod tests { // Write to cache and upload sst to mock remote store let sst_info = write_cache - .write_and_upload_sst(request, &write_opts) + .write_and_upload_sst(write_request, request, &write_opts) .await .unwrap() .unwrap(); @@ -289,5 +325,16 @@ mod tests { .await .unwrap(); assert_eq!(remote_data, cache_data); + + // Check write cache contains the index key + let index_key = IndexKey::new(region_id, file_id, FileType::Puffin); + assert!(write_cache.file_cache.contains_key(&index_key)); + + let remote_index_data = mock_store.read(&index_upload_path).await.unwrap(); + let cache_index_data = local_store + .read(&write_cache.file_cache.cache_file_path(index_key)) + .await + .unwrap(); + assert_eq!(remote_index_data, cache_index_data); } } diff --git a/src/mito2/src/compaction.rs b/src/mito2/src/compaction.rs index 0ddcec61d0f2..000a6e2a88c0 100644 --- a/src/mito2/src/compaction.rs +++ b/src/mito2/src/compaction.rs @@ -21,7 +21,6 @@ use std::collections::HashMap; use std::sync::Arc; use std::time::Instant; -use common_base::readable_size::ReadableSize; use common_telemetry::{debug, error}; pub use picker::CompactionPickerRef; use snafu::ResultExt; @@ -44,6 +43,7 @@ use crate::sst::file_purger::FilePurgerRef; /// Region compaction request. pub struct CompactionRequest { + pub(crate) engine_config: Arc<MitoConfig>, pub(crate) current_version: VersionRef, pub(crate) access_layer: AccessLayerRef, /// Sender to send notification to the region worker. @@ -53,8 +53,6 @@ pub struct CompactionRequest { pub(crate) file_purger: FilePurgerRef, /// Start time of compaction task. pub(crate) start_time: Instant, - /// Buffering threshold while writing SST files. - pub(crate) sst_write_buffer_size: ReadableSize, pub(crate) cache_manager: CacheManagerRef, } @@ -331,13 +329,13 @@ impl CompactionStatus { let current_version = self.version_control.current().version; let start_time = Instant::now(); let mut req = CompactionRequest { + engine_config, current_version, access_layer: self.access_layer.clone(), request_sender: request_sender.clone(), waiters: Vec::new(), file_purger: self.file_purger.clone(), start_time, - sst_write_buffer_size: engine_config.sst_write_buffer_size, cache_manager, }; @@ -363,7 +361,7 @@ mod tests { #[tokio::test] async fn test_schedule_empty() { - let env = SchedulerEnv::new(); + let env = SchedulerEnv::new().await; let (tx, _rx) = mpsc::channel(4); let mut scheduler = env.mock_compaction_scheduler(tx); let mut builder = VersionControlBuilder::new(); @@ -432,7 +430,7 @@ mod tests { #[tokio::test] async fn test_schedule_on_finished() { let job_scheduler = Arc::new(VecScheduler::default()); - let env = SchedulerEnv::new().scheduler(job_scheduler.clone()); + let env = SchedulerEnv::new().await.scheduler(job_scheduler.clone()); let (tx, _rx) = mpsc::channel(4); let mut scheduler = env.mock_compaction_scheduler(tx); let mut builder = VersionControlBuilder::new(); diff --git a/src/mito2/src/compaction/twcs.rs b/src/mito2/src/compaction/twcs.rs index 3bc96dc2dda8..b0adc897b6aa 100644 --- a/src/mito2/src/compaction/twcs.rs +++ b/src/mito2/src/compaction/twcs.rs @@ -17,7 +17,6 @@ use std::fmt::{Debug, Formatter}; use std::sync::Arc; use std::time::{Duration, Instant}; -use common_base::readable_size::ReadableSize; use common_telemetry::{debug, error, info}; use common_time::timestamp::TimeUnit; use common_time::timestamp_millis::BucketAligned; @@ -32,6 +31,7 @@ use crate::access_layer::{AccessLayerRef, SstWriteRequest}; use crate::cache::CacheManagerRef; use crate::compaction::picker::{CompactionTask, Picker}; use crate::compaction::CompactionRequest; +use crate::config::MitoConfig; use crate::error::{self, CompactRegionSnafu}; use crate::metrics::{COMPACTION_FAILURE_COUNT, COMPACTION_STAGE_ELAPSED}; use crate::read::projection::ProjectionMapper; @@ -123,13 +123,13 @@ impl TwcsPicker { impl Picker for TwcsPicker { fn pick(&self, req: CompactionRequest) -> Option<Box<dyn CompactionTask>> { let CompactionRequest { + engine_config, current_version, access_layer, request_sender, waiters, file_purger, start_time, - sst_write_buffer_size, cache_manager, } = req; @@ -173,12 +173,12 @@ impl Picker for TwcsPicker { return None; } let task = TwcsCompactionTask { + engine_config, region_id, metadata: region_metadata, sst_layer: access_layer, outputs, expired_ssts, - sst_write_buffer_size, compaction_time_window: Some(time_window_size), request_sender, waiters, @@ -234,12 +234,12 @@ fn find_latest_window_in_seconds<'a>( } pub(crate) struct TwcsCompactionTask { + pub engine_config: Arc<MitoConfig>, pub region_id: RegionId, pub metadata: RegionMetadataRef, pub sst_layer: AccessLayerRef, pub outputs: Vec<CompactionOutput>, pub expired_ssts: Vec<FileHandle>, - pub sst_write_buffer_size: ReadableSize, pub compaction_time_window: Option<i64>, pub file_purger: FilePurgerRef, /// Request sender to notify the worker. @@ -301,9 +301,20 @@ impl TwcsCompactionTask { ); let write_opts = WriteOptions { - write_buffer_size: self.sst_write_buffer_size, + write_buffer_size: self.engine_config.sst_write_buffer_size, ..Default::default() }; + let create_inverted_index = self + .engine_config + .inverted_index + .create_on_compaction + .auto(); + let mem_threshold_index_create = self + .engine_config + .inverted_index + .mem_threshold_on_create + .map(|m| m.as_bytes() as _); + let metadata = self.metadata.clone(); let sst_layer = self.sst_layer.clone(); let region_id = self.region_id; @@ -321,6 +332,8 @@ impl TwcsCompactionTask { source: Source::Reader(reader), cache_manager, storage, + create_inverted_index, + mem_threshold_index_create, }, &write_opts, ) diff --git a/src/mito2/src/config.rs b/src/mito2/src/config.rs index 0723c702ae70..dce77a1233fd 100644 --- a/src/mito2/src/config.rs +++ b/src/mito2/src/config.rs @@ -18,10 +18,11 @@ use std::time::Duration; use common_base::readable_size::ReadableSize; use common_telemetry::warn; +use object_store::util::join_dir; use serde::{Deserialize, Serialize}; -use snafu::ensure; +use serde_with::{serde_as, NoneAsEmptyString}; -use crate::error::{InvalidConfigSnafu, Result}; +use crate::error::Result; /// Default max running background job. const DEFAULT_MAX_BG_JOB: usize = 4; @@ -72,7 +73,7 @@ pub struct MitoConfig { pub page_cache_size: ReadableSize, /// Whether to enable the experimental write cache. pub enable_experimental_write_cache: bool, - /// Path for write cache. + /// File system path for write cache, defaults to `{data_home}/write_cache`. pub experimental_write_cache_path: String, /// Capacity for write cache. pub experimental_write_cache_size: ReadableSize, @@ -89,6 +90,9 @@ pub struct MitoConfig { pub parallel_scan_channel_size: usize, /// Whether to allow stale entries read during replay. pub allow_stale_entries: bool, + + /// Inverted index configs. + pub inverted_index: InvertedIndexConfig, } impl Default for MitoConfig { @@ -113,6 +117,7 @@ impl Default for MitoConfig { scan_parallelism: divide_num_cpus(4), parallel_scan_channel_size: DEFAULT_SCAN_CHANNEL_SIZE, allow_stale_entries: false, + inverted_index: InvertedIndexConfig::default(), } } } @@ -121,7 +126,7 @@ impl MitoConfig { /// Sanitize incorrect configurations. /// /// Returns an error if there is a configuration that unable to sanitize. - pub(crate) fn sanitize(&mut self) -> Result<()> { + pub(crate) fn sanitize(&mut self, data_home: &str) -> Result<()> { // Use default value if `num_workers` is 0. if self.num_workers == 0 { self.num_workers = divide_num_cpus(2); @@ -167,13 +172,75 @@ impl MitoConfig { ); } - if self.enable_experimental_write_cache { - ensure!( - !self.experimental_write_cache_path.is_empty(), - InvalidConfigSnafu { - reason: "experimental_write_cache_path should not be empty", - } - ); + // Sets write cache path if it is empty. + if self.experimental_write_cache_path.is_empty() { + self.experimental_write_cache_path = join_dir(data_home, "write_cache"); + } + + self.inverted_index.sanitize(data_home)?; + + Ok(()) + } +} + +/// Operational mode for certain actions. +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, Default)] +#[serde(rename_all = "snake_case")] +pub enum Mode { + /// The action is performed automatically based on internal criteria. + #[default] + Auto, + /// The action is explicitly disabled. + Disable, +} + +impl Mode { + /// Whether the action is disabled. + pub fn disabled(&self) -> bool { + matches!(self, Mode::Disable) + } + + /// Whether the action is automatic. + pub fn auto(&self) -> bool { + matches!(self, Mode::Auto) + } +} + +/// Configuration options for the inverted index. +#[serde_as] +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)] +#[serde(default)] +pub struct InvertedIndexConfig { + /// Whether to create the index on flush: automatically or never. + pub create_on_flush: Mode, + /// Whether to create the index on compaction: automatically or never. + pub create_on_compaction: Mode, + /// Whether to apply the index on query: automatically or never. + pub apply_on_query: Mode, + /// Memory threshold for performing an external sort during index creation. + /// `None` means all sorting will happen in memory. + #[serde_as(as = "NoneAsEmptyString")] + pub mem_threshold_on_create: Option<ReadableSize>, + /// File system path to store intermediate files for external sort, defaults to `{data_home}/index_intermediate`. + pub intermediate_path: String, +} + +impl Default for InvertedIndexConfig { + fn default() -> Self { + Self { + create_on_flush: Mode::Auto, + create_on_compaction: Mode::Auto, + apply_on_query: Mode::Auto, + mem_threshold_on_create: Some(ReadableSize::mb(64)), + intermediate_path: String::new(), + } + } +} + +impl InvertedIndexConfig { + pub fn sanitize(&mut self, data_home: &str) -> Result<()> { + if self.intermediate_path.is_empty() { + self.intermediate_path = join_dir(data_home, "index_intermediate"); } Ok(()) diff --git a/src/mito2/src/engine.rs b/src/mito2/src/engine.rs index 577d5131d0c1..02c7533c75c3 100644 --- a/src/mito2/src/engine.rs +++ b/src/mito2/src/engine.rs @@ -78,11 +78,12 @@ pub struct MitoEngine { impl MitoEngine { /// Returns a new [MitoEngine] with specific `config`, `log_store` and `object_store`. pub async fn new<S: LogStore>( + data_home: &str, mut config: MitoConfig, log_store: Arc<S>, object_store_manager: ObjectStoreManagerRef, ) -> Result<MitoEngine> { - config.sanitize()?; + config.sanitize(data_home)?; Ok(MitoEngine { inner: Arc::new(EngineInner::new(config, log_store, object_store_manager).await?), @@ -192,7 +193,8 @@ impl EngineInner { request, Some(cache_manager), ) - .with_parallelism(scan_parallelism); + .with_parallelism(scan_parallelism) + .ignore_inverted_index(self.config.inverted_index.apply_on_query.disabled()); scan_region.scanner() } @@ -315,13 +317,14 @@ impl RegionEngine for MitoEngine { impl MitoEngine { /// Returns a new [MitoEngine] for tests. pub async fn new_for_test<S: LogStore>( + data_home: &str, mut config: MitoConfig, log_store: Arc<S>, object_store_manager: ObjectStoreManagerRef, write_buffer_manager: Option<crate::flush::WriteBufferManagerRef>, listener: Option<crate::engine::listener::EventListenerRef>, ) -> Result<MitoEngine> { - config.sanitize()?; + config.sanitize(data_home)?; let config = Arc::new(config); Ok(MitoEngine { diff --git a/src/mito2/src/engine/basic_test.rs b/src/mito2/src/engine/basic_test.rs index 2dff27290ca8..47aed723cf33 100644 --- a/src/mito2/src/engine/basic_test.rs +++ b/src/mito2/src/engine/basic_test.rs @@ -550,8 +550,8 @@ async fn test_region_usage() { let region_stat = region.region_usage().await; assert_eq!(region_stat.wal_usage, 0); - assert_eq!(region_stat.sst_usage, 2742); + assert_eq!(region_stat.sst_usage, 3006); // region total usage - assert_eq!(region_stat.disk_usage(), 3791); + assert_eq!(region_stat.disk_usage(), 4072); } diff --git a/src/mito2/src/flush.rs b/src/mito2/src/flush.rs index f79d811ba473..6ee585e40c2d 100644 --- a/src/mito2/src/flush.rs +++ b/src/mito2/src/flush.rs @@ -315,6 +315,12 @@ impl RegionFlushTask { let file_id = FileId::random(); let iter = mem.iter(None, None); let source = Source::Iter(iter); + let create_inverted_index = self.engine_config.inverted_index.create_on_flush.auto(); + let mem_threshold_index_create = self + .engine_config + .inverted_index + .mem_threshold_on_create + .map(|m| m.as_bytes() as _); // Flush to level 0. let write_request = SstWriteRequest { @@ -323,6 +329,8 @@ impl RegionFlushTask { source, cache_manager: self.cache_manager.clone(), storage: version.options.storage.clone(), + create_inverted_index, + mem_threshold_index_create, }; let Some(sst_info) = self .access_layer @@ -732,7 +740,7 @@ mod tests { #[tokio::test] async fn test_schedule_empty() { - let env = SchedulerEnv::new(); + let env = SchedulerEnv::new().await; let (tx, _rx) = mpsc::channel(4); let mut scheduler = env.mock_flush_scheduler(); let builder = VersionControlBuilder::new(); diff --git a/src/mito2/src/read/scan_region.rs b/src/mito2/src/read/scan_region.rs index c2b0939d9b2a..d5f7dbe10023 100644 --- a/src/mito2/src/read/scan_region.rs +++ b/src/mito2/src/read/scan_region.rs @@ -122,6 +122,8 @@ pub(crate) struct ScanRegion { cache_manager: Option<CacheManagerRef>, /// Parallelism to scan. parallelism: ScanParallism, + /// Whether to ignore inverted index. + ignore_inverted_index: bool, } impl ScanRegion { @@ -138,6 +140,7 @@ impl ScanRegion { request, cache_manager, parallelism: ScanParallism::default(), + ignore_inverted_index: false, } } @@ -148,6 +151,12 @@ impl ScanRegion { self } + #[must_use] + pub(crate) fn ignore_inverted_index(mut self, ignore: bool) -> Self { + self.ignore_inverted_index = ignore; + self + } + /// Returns a [Scanner] to scan the region. pub(crate) fn scanner(self) -> Result<Scanner> { self.seq_scan().map(Scanner::Seq) @@ -234,6 +243,10 @@ impl ScanRegion { /// Use the latest schema to build the index applier. fn build_index_applier(&self) -> Option<SstIndexApplierRef> { + if self.ignore_inverted_index { + return None; + } + let file_cache = || -> Option<FileCacheRef> { let cache_manager = self.cache_manager.as_ref()?; let write_cache = cache_manager.write_cache()?; diff --git a/src/mito2/src/region/opener.rs b/src/mito2/src/region/opener.rs index 80116ea9fdd8..9e53f52b8bbb 100644 --- a/src/mito2/src/region/opener.rs +++ b/src/mito2/src/region/opener.rs @@ -45,6 +45,7 @@ use crate::region_write_ctx::RegionWriteCtx; use crate::request::OptionOutputTx; use crate::schedule::scheduler::SchedulerRef; use crate::sst::file_purger::LocalFilePurger; +use crate::sst::index::intermediate::IntermediateManager; use crate::wal::{EntryId, Wal}; /// Builder to create a new [MitoRegion] or open an existing one. @@ -58,6 +59,7 @@ pub(crate) struct RegionOpener { options: Option<RegionOptions>, cache_manager: Option<CacheManagerRef>, skip_wal_replay: bool, + intermediate_manager: IntermediateManager, } impl RegionOpener { @@ -68,6 +70,7 @@ impl RegionOpener { memtable_builder: MemtableBuilderRef, object_store_manager: ObjectStoreManagerRef, scheduler: SchedulerRef, + intermediate_manager: IntermediateManager, ) -> RegionOpener { RegionOpener { region_id, @@ -79,6 +82,7 @@ impl RegionOpener { options: None, cache_manager: None, skip_wal_replay: false, + intermediate_manager, } } @@ -170,7 +174,11 @@ impl RegionOpener { .options(options) .build(); let version_control = Arc::new(VersionControl::new(version)); - let access_layer = Arc::new(AccessLayer::new(self.region_dir, object_store)); + let access_layer = Arc::new(AccessLayer::new( + self.region_dir, + object_store, + self.intermediate_manager, + )); Ok(MitoRegion { region_id, @@ -240,7 +248,11 @@ impl RegionOpener { let region_id = self.region_id; let object_store = self.object_store(&region_options.storage)?.clone(); - let access_layer = Arc::new(AccessLayer::new(self.region_dir.clone(), object_store)); + let access_layer = Arc::new(AccessLayer::new( + self.region_dir.clone(), + object_store, + self.intermediate_manager.clone(), + )); let file_purger = Arc::new(LocalFilePurger::new( self.scheduler.clone(), access_layer.clone(), diff --git a/src/mito2/src/sst/file_purger.rs b/src/mito2/src/sst/file_purger.rs index cc913c1a7e22..623e5695aad5 100644 --- a/src/mito2/src/sst/file_purger.rs +++ b/src/mito2/src/sst/file_purger.rs @@ -97,6 +97,7 @@ impl FilePurger for LocalFilePurger { mod tests { use common_test_util::temp_dir::create_temp_dir; use object_store::services::Fs; + use object_store::util::join_dir; use object_store::ObjectStore; use smallvec::SmallVec; @@ -104,6 +105,7 @@ mod tests { use crate::access_layer::AccessLayer; use crate::schedule::scheduler::{LocalScheduler, Scheduler}; use crate::sst::file::{FileHandle, FileId, FileMeta, FileTimeRange, IndexType}; + use crate::sst::index::intermediate::IntermediateManager; use crate::sst::location; #[tokio::test] @@ -111,17 +113,21 @@ mod tests { common_telemetry::init_default_ut_logging(); let dir = create_temp_dir("file-purge"); + let dir_path = dir.path().display().to_string(); let mut builder = Fs::default(); - builder.root(dir.path().to_str().unwrap()); - let object_store = ObjectStore::new(builder).unwrap().finish(); + builder.root(&dir_path); let sst_file_id = FileId::random(); let sst_dir = "table1"; let path = location::sst_file_path(sst_dir, sst_file_id); + let intm_mgr = IntermediateManager::init_fs(join_dir(&dir_path, "intm")) + .await + .unwrap(); + let object_store = ObjectStore::new(builder).unwrap().finish(); object_store.write(&path, vec![0; 4096]).await.unwrap(); let scheduler = Arc::new(LocalScheduler::new(3)); - let layer = Arc::new(AccessLayer::new(sst_dir, object_store.clone())); + let layer = Arc::new(AccessLayer::new(sst_dir, object_store.clone(), intm_mgr)); let file_purger = Arc::new(LocalFilePurger::new(scheduler.clone(), layer, None)); @@ -152,13 +158,17 @@ mod tests { common_telemetry::init_default_ut_logging(); let dir = create_temp_dir("file-purge"); + let dir_path = dir.path().display().to_string(); let mut builder = Fs::default(); - builder.root(dir.path().to_str().unwrap()); - let object_store = ObjectStore::new(builder).unwrap().finish(); + builder.root(&dir_path); let sst_file_id = FileId::random(); let sst_dir = "table1"; - + let intm_mgr = IntermediateManager::init_fs(join_dir(&dir_path, "intm")) + .await + .unwrap(); let path = location::sst_file_path(sst_dir, sst_file_id); + + let object_store = ObjectStore::new(builder).unwrap().finish(); object_store.write(&path, vec![0; 4096]).await.unwrap(); let index_path = location::index_file_path(sst_dir, sst_file_id); @@ -168,7 +178,7 @@ mod tests { .unwrap(); let scheduler = Arc::new(LocalScheduler::new(3)); - let layer = Arc::new(AccessLayer::new(sst_dir, object_store.clone())); + let layer = Arc::new(AccessLayer::new(sst_dir, object_store.clone(), intm_mgr)); let file_purger = Arc::new(LocalFilePurger::new(scheduler.clone(), layer, None)); diff --git a/src/mito2/src/sst/index.rs b/src/mito2/src/sst/index.rs index 7e6cefa9929b..3e18cabe62a9 100644 --- a/src/mito2/src/sst/index.rs +++ b/src/mito2/src/sst/index.rs @@ -12,13 +12,24 @@ // See the License for the specific language governing permissions and // limitations under the License. -#![allow(dead_code)] - -pub mod applier; +pub(crate) mod applier; mod codec; -pub mod creator; +pub(crate) mod creator; +pub(crate) mod intermediate; mod store; +use std::num::NonZeroUsize; + +use common_telemetry::{debug, warn}; +use creator::SstIndexCreator; +use object_store::ObjectStore; +use store_api::metadata::RegionMetadataRef; +use store_api::storage::RegionId; + +use crate::read::Batch; +use crate::sst::file::FileId; +use crate::sst::index::intermediate::IntermediateManager; + const INDEX_BLOB_TYPE: &str = "greptime-inverted-index-v1"; // TODO(zhongzc): how to determine this value? @@ -27,3 +38,267 @@ const MIN_MEMORY_USAGE_THRESHOLD: usize = 8192; /// The buffer size for the pipe used to send index data to the puffin blob. const PIPE_BUFFER_SIZE_FOR_SENDING_BLOB: usize = 8192; + +/// The index creator that hides the error handling details. +#[derive(Default)] +pub struct Indexer { + file_id: FileId, + region_id: RegionId, + inner: Option<SstIndexCreator>, +} + +impl Indexer { + /// Update the index with the given batch. + pub async fn update(&mut self, batch: &Batch) { + if let Some(creator) = self.inner.as_mut() { + if let Err(err) = creator.update(batch).await { + warn!( + err; "Failed to update index, skip creating index, region_id: {}, file_id: {}", + self.region_id, self.file_id, + ); + + // Skip index creation if error occurs. + self.inner = None; + } + } + } + + /// Finish the index creation. + /// Returns the number of bytes written if success or None if failed. + pub async fn finish(&mut self) -> Option<usize> { + if let Some(mut creator) = self.inner.take() { + match creator.finish().await { + Ok((row_count, byte_count)) => { + debug!( + "Create index successfully, region_id: {}, file_id: {}, bytes: {}, rows: {}", + self.region_id, self.file_id, byte_count, row_count + ); + return Some(byte_count); + } + Err(err) => { + warn!( + err; "Failed to create index, region_id: {}, file_id: {}", + self.region_id, self.file_id, + ); + } + } + } + + None + } + + /// Abort the index creation. + pub async fn abort(&mut self) { + if let Some(mut creator) = self.inner.take() { + if let Err(err) = creator.abort().await { + warn!( + err; "Failed to abort index, region_id: {}, file_id: {}", + self.region_id, self.file_id, + ); + } + } + } +} + +pub(crate) struct IndexerBuilder<'a> { + pub(crate) create_inverted_index: bool, + pub(crate) mem_threshold_index_create: Option<usize>, + pub(crate) file_id: FileId, + pub(crate) file_path: String, + pub(crate) metadata: &'a RegionMetadataRef, + pub(crate) row_group_size: usize, + pub(crate) object_store: ObjectStore, + pub(crate) intermediate_manager: IntermediateManager, +} + +impl<'a> IndexerBuilder<'a> { + /// Sanity check for arguments and create a new [Indexer] + /// with inner [SstIndexCreator] if arguments are valid. + pub(crate) fn build(self) -> Indexer { + if !self.create_inverted_index { + debug!( + "Skip creating index due to request, region_id: {}, file_id: {}", + self.metadata.region_id, self.file_id, + ); + return Indexer::default(); + } + + if self.metadata.primary_key.is_empty() { + debug!( + "No tag columns, skip creating index, region_id: {}, file_id: {}", + self.metadata.region_id, self.file_id, + ); + return Indexer::default(); + } + + let Some(row_group_size) = NonZeroUsize::new(self.row_group_size) else { + warn!( + "Row group size is 0, skip creating index, region_id: {}, file_id: {}", + self.metadata.region_id, self.file_id, + ); + return Indexer::default(); + }; + + let creator = SstIndexCreator::new( + self.file_path, + self.file_id, + self.metadata, + self.object_store, + self.intermediate_manager, + self.mem_threshold_index_create, + row_group_size, + ); + + Indexer { + file_id: self.file_id, + region_id: self.metadata.region_id, + inner: Some(creator), + } + } +} + +#[cfg(test)] +mod tests { + use std::sync::Arc; + + use api::v1::SemanticType; + use datatypes::data_type::ConcreteDataType; + use datatypes::schema::ColumnSchema; + use object_store::services::Memory; + use store_api::metadata::{ColumnMetadata, RegionMetadataBuilder}; + + use super::*; + + fn mock_region_metadata() -> RegionMetadataRef { + let mut builder = RegionMetadataBuilder::new(RegionId::new(1, 2)); + builder + .push_column_metadata(ColumnMetadata { + column_schema: ColumnSchema::new("a", ConcreteDataType::int64_datatype(), false), + semantic_type: SemanticType::Tag, + column_id: 1, + }) + .push_column_metadata(ColumnMetadata { + column_schema: ColumnSchema::new("b", ConcreteDataType::float64_datatype(), false), + semantic_type: SemanticType::Field, + column_id: 2, + }) + .push_column_metadata(ColumnMetadata { + column_schema: ColumnSchema::new( + "c", + ConcreteDataType::timestamp_millisecond_datatype(), + false, + ), + semantic_type: SemanticType::Timestamp, + column_id: 3, + }) + .primary_key(vec![1]); + + Arc::new(builder.build().unwrap()) + } + + fn no_tag_region_metadata() -> RegionMetadataRef { + let mut builder = RegionMetadataBuilder::new(RegionId::new(1, 2)); + builder + .push_column_metadata(ColumnMetadata { + column_schema: ColumnSchema::new("a", ConcreteDataType::int64_datatype(), false), + semantic_type: SemanticType::Field, + column_id: 1, + }) + .push_column_metadata(ColumnMetadata { + column_schema: ColumnSchema::new("b", ConcreteDataType::float64_datatype(), false), + semantic_type: SemanticType::Field, + column_id: 2, + }) + .push_column_metadata(ColumnMetadata { + column_schema: ColumnSchema::new( + "c", + ConcreteDataType::timestamp_millisecond_datatype(), + false, + ), + semantic_type: SemanticType::Timestamp, + column_id: 3, + }); + + Arc::new(builder.build().unwrap()) + } + + fn mock_object_store() -> ObjectStore { + ObjectStore::new(Memory::default()).unwrap().finish() + } + + fn mock_intm_mgr() -> IntermediateManager { + IntermediateManager::new(mock_object_store()) + } + + #[test] + fn test_build_indexer_basic() { + let metadata = mock_region_metadata(); + let indexer = IndexerBuilder { + create_inverted_index: true, + mem_threshold_index_create: Some(1024), + file_id: FileId::random(), + file_path: "test".to_string(), + metadata: &metadata, + row_group_size: 1024, + object_store: mock_object_store(), + intermediate_manager: mock_intm_mgr(), + } + .build(); + + assert!(indexer.inner.is_some()); + } + + #[test] + fn test_build_indexer_disable_create() { + let metadata = mock_region_metadata(); + let indexer = IndexerBuilder { + create_inverted_index: false, + mem_threshold_index_create: Some(1024), + file_id: FileId::random(), + file_path: "test".to_string(), + metadata: &metadata, + row_group_size: 1024, + object_store: mock_object_store(), + intermediate_manager: mock_intm_mgr(), + } + .build(); + + assert!(indexer.inner.is_none()); + } + + #[test] + fn test_build_indexer_no_tag() { + let metadata = no_tag_region_metadata(); + let indexer = IndexerBuilder { + create_inverted_index: true, + mem_threshold_index_create: Some(1024), + file_id: FileId::random(), + file_path: "test".to_string(), + metadata: &metadata, + row_group_size: 1024, + object_store: mock_object_store(), + intermediate_manager: mock_intm_mgr(), + } + .build(); + + assert!(indexer.inner.is_none()); + } + + #[test] + fn test_build_indexer_zero_row_group() { + let metadata = mock_region_metadata(); + let indexer = IndexerBuilder { + create_inverted_index: true, + mem_threshold_index_create: Some(1024), + file_id: FileId::random(), + file_path: "test".to_string(), + metadata: &metadata, + row_group_size: 0, + object_store: mock_object_store(), + intermediate_manager: mock_intm_mgr(), + } + .build(); + + assert!(indexer.inner.is_none()); + } +} diff --git a/src/mito2/src/sst/index/creator.rs b/src/mito2/src/sst/index/creator.rs index b88b47c1394f..c143bc9aacd3 100644 --- a/src/mito2/src/sst/index/creator.rs +++ b/src/mito2/src/sst/index/creator.rs @@ -43,22 +43,19 @@ use crate::sst::file::FileId; use crate::sst::index::codec::{IndexValueCodec, IndexValuesCodec}; use crate::sst::index::creator::statistics::Statistics; use crate::sst::index::creator::temp_provider::TempFileProvider; +use crate::sst::index::intermediate::{IntermediateLocation, IntermediateManager}; use crate::sst::index::store::InstrumentedStore; use crate::sst::index::{ INDEX_BLOB_TYPE, MIN_MEMORY_USAGE_THRESHOLD, PIPE_BUFFER_SIZE_FOR_SENDING_BLOB, }; -use crate::sst::location::{self, IntermediateLocation}; type ByteCount = usize; type RowCount = usize; /// Creates SST index. pub struct SstIndexCreator { - /// Directory of the region. - region_dir: String, - /// ID of the SST file. - sst_file_id: FileId, - + /// Path of index file to write. + file_path: String, /// The store to write index files. store: InstrumentedStore, /// The index creator. @@ -81,11 +78,11 @@ impl SstIndexCreator { /// Creates a new `SstIndexCreator`. /// Should ensure that the number of tag columns is greater than 0. pub fn new( - region_dir: String, + file_path: String, sst_file_id: FileId, metadata: &RegionMetadataRef, index_store: ObjectStore, - intermediate_store: ObjectStore, // prefer to use local store + intermediate_manager: IntermediateManager, memory_usage_threshold: Option<usize>, row_group_size: NonZeroUsize, ) -> Self { @@ -95,16 +92,15 @@ impl SstIndexCreator { (threshold / metadata.primary_key.len()).max(MIN_MEMORY_USAGE_THRESHOLD) }); let temp_file_provider = Arc::new(TempFileProvider::new( - IntermediateLocation::new(&region_dir, &sst_file_id), - InstrumentedStore::new(intermediate_store), + IntermediateLocation::new(&metadata.region_id, &sst_file_id), + intermediate_manager, )); let sorter = ExternalSorter::factory(temp_file_provider.clone() as _, memory_threshold); let index_creator = Box::new(SortIndexCreator::new(sorter, row_group_size)); let codec = IndexValuesCodec::from_tag_columns(metadata.primary_key_columns()); Self { - region_dir, - sst_file_id, + file_path, store: InstrumentedStore::new(index_store), codec, index_creator, @@ -129,10 +125,7 @@ impl SstIndexCreator { if let Err(update_err) = self.do_update(batch).await { // clean up garbage if failed to update if let Err(err) = self.do_cleanup().await { - warn!( - err; "Failed to clean up index creator, region_dir: {}, sst_file_id: {}", - self.region_dir, self.sst_file_id, - ); + warn!(err; "Failed to clean up index creator, file_path: {}", self.file_path); } return Err(update_err); } @@ -153,10 +146,7 @@ impl SstIndexCreator { let finish_res = self.do_finish().await; // clean up garbage no matter finish successfully or not if let Err(err) = self.do_cleanup().await { - warn!( - err; "Failed to clean up index creator, region_dir: {}, sst_file_id: {}", - self.region_dir, self.sst_file_id, - ); + warn!(err; "Failed to clean up index creator, file_path: {}", self.file_path); } finish_res.map(|_| (self.stats.row_count(), self.stats.byte_count())) @@ -216,11 +206,10 @@ impl SstIndexCreator { async fn do_finish(&mut self) -> Result<()> { let mut guard = self.stats.record_finish(); - let file_path = location::index_file_path(&self.region_dir, self.sst_file_id); let file_writer = self .store .writer( - &file_path, + &self.file_path, &INDEX_PUFFIN_WRITE_BYTES_TOTAL, &INDEX_PUFFIN_WRITE_OP_TOTAL, &INDEX_PUFFIN_FLUSH_OP_TOTAL, diff --git a/src/mito2/src/sst/index/creator/temp_provider.rs b/src/mito2/src/sst/index/creator/temp_provider.rs index d8dfff3d7d20..d938b236c868 100644 --- a/src/mito2/src/sst/index/creator/temp_provider.rs +++ b/src/mito2/src/sst/index/creator/temp_provider.rs @@ -27,16 +27,15 @@ use crate::metrics::{ INDEX_INTERMEDIATE_READ_OP_TOTAL, INDEX_INTERMEDIATE_SEEK_OP_TOTAL, INDEX_INTERMEDIATE_WRITE_BYTES_TOTAL, INDEX_INTERMEDIATE_WRITE_OP_TOTAL, }; -use crate::sst::index::store::InstrumentedStore; -use crate::sst::location::IntermediateLocation; +use crate::sst::index::intermediate::{IntermediateLocation, IntermediateManager}; /// `TempFileProvider` implements `ExternalTempFileProvider`. /// It uses `InstrumentedStore` to create and read intermediate files. pub(crate) struct TempFileProvider { /// Provides the location of intermediate files. location: IntermediateLocation, - /// Provides access to files in the object store. - store: InstrumentedStore, + /// Provides store to access to intermediate files. + manager: IntermediateManager, } #[async_trait] @@ -48,7 +47,8 @@ impl ExternalTempFileProvider for TempFileProvider { ) -> IndexResult<Box<dyn AsyncWrite + Unpin + Send>> { let path = self.location.file_path(column_id, file_id); let writer = self - .store + .manager + .store() .writer( &path, &INDEX_INTERMEDIATE_WRITE_BYTES_TOTAL, @@ -67,7 +67,8 @@ impl ExternalTempFileProvider for TempFileProvider { ) -> IndexResult<Vec<Box<dyn AsyncRead + Unpin + Send>>> { let column_path = self.location.column_path(column_id); let entries = self - .store + .manager + .store() .list(&column_path) .await .map_err(BoxedError::new) @@ -81,7 +82,8 @@ impl ExternalTempFileProvider for TempFileProvider { } let reader = self - .store + .manager + .store() .reader( entry.path(), &INDEX_INTERMEDIATE_READ_BYTES_TOTAL, @@ -100,30 +102,35 @@ impl ExternalTempFileProvider for TempFileProvider { impl TempFileProvider { /// Creates a new `TempFileProvider`. - pub fn new(location: IntermediateLocation, store: InstrumentedStore) -> Self { - Self { location, store } + pub fn new(location: IntermediateLocation, manager: IntermediateManager) -> Self { + Self { location, manager } } /// Removes all intermediate files. pub async fn cleanup(&self) -> Result<()> { - self.store.remove_all(self.location.root_path()).await + self.manager + .store() + .remove_all(self.location.root_path()) + .await } } #[cfg(test)] mod tests { + use common_test_util::temp_dir; use futures::{AsyncReadExt, AsyncWriteExt}; - use object_store::services::Memory; - use object_store::ObjectStore; + use store_api::storage::RegionId; use super::*; use crate::sst::file::FileId; #[tokio::test] async fn test_temp_file_provider_basic() { - let location = IntermediateLocation::new("region_dir", &FileId::random()); - let object_store = ObjectStore::new(Memory::default()).unwrap().finish(); - let store = InstrumentedStore::new(object_store); + let temp_dir = temp_dir::create_temp_dir("intermediate"); + let path = temp_dir.path().display().to_string(); + + let location = IntermediateLocation::new(&RegionId::new(0, 0), &FileId::random()); + let store = IntermediateManager::init_fs(path).await.unwrap(); let provider = TempFileProvider::new(location.clone(), store); let column_name = "tag0"; @@ -163,7 +170,8 @@ mod tests { provider.cleanup().await.unwrap(); assert!(provider - .store + .manager + .store() .list(location.root_path()) .await .unwrap() diff --git a/src/mito2/src/sst/index/intermediate.rs b/src/mito2/src/sst/index/intermediate.rs new file mode 100644 index 000000000000..ab10b0e68032 --- /dev/null +++ b/src/mito2/src/sst/index/intermediate.rs @@ -0,0 +1,153 @@ +// Copyright 2023 Greptime Team +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use common_telemetry::warn; +use object_store::util::{self, normalize_dir}; +use store_api::storage::RegionId; +use uuid::Uuid; + +use crate::access_layer::new_fs_object_store; +use crate::error::Result; +use crate::sst::file::FileId; +use crate::sst::index::store::InstrumentedStore; + +const INTERMEDIATE_DIR: &str = "__intm"; + +/// `IntermediateManager` provides store to access to intermediate files. +#[derive(Clone)] +pub struct IntermediateManager { + store: InstrumentedStore, +} + +impl IntermediateManager { + /// Create a new `IntermediateManager` with the given root path. + /// It will clean up all garbage intermediate files from previous runs. + pub async fn init_fs(root_path: impl AsRef<str>) -> Result<Self> { + let store = new_fs_object_store(&normalize_dir(root_path.as_ref())).await?; + let store = InstrumentedStore::new(store); + + // Remove all garbage intermediate files from previous runs. + if let Err(err) = store.remove_all(INTERMEDIATE_DIR).await { + warn!(err; "Failed to remove garbage intermediate files"); + } + + Ok(Self { store }) + } + + /// Returns the store to access to intermediate files. + pub(crate) fn store(&self) -> &InstrumentedStore { + &self.store + } + + #[cfg(test)] + pub(crate) fn new(store: object_store::ObjectStore) -> Self { + Self { + store: InstrumentedStore::new(store), + } + } +} + +/// `IntermediateLocation` produces paths for intermediate files +/// during external sorting. +#[derive(Debug, Clone)] +pub struct IntermediateLocation { + root_path: String, +} + +impl IntermediateLocation { + /// Create a new `IntermediateLocation`. Set the root directory to + /// `__intm/{region_id}/{sst_file_id}/{uuid}/`, incorporating + /// uuid to differentiate active sorting files from orphaned data due to unexpected + /// process termination. + pub fn new(region_id: &RegionId, sst_file_id: &FileId) -> Self { + let region_id = region_id.as_u64(); + let uuid = Uuid::new_v4(); + Self { + root_path: format!("{INTERMEDIATE_DIR}/{region_id}/{sst_file_id}/{uuid}/"), + } + } + + /// Returns the root directory of the intermediate files + pub fn root_path(&self) -> &str { + &self.root_path + } + + /// Returns the path of the directory for intermediate files associated with a column: + /// `__intm/{region_id}/{sst_file_id}/{uuid}/{column_id}/` + pub fn column_path(&self, column_id: &str) -> String { + util::join_path(&self.root_path, &format!("{column_id}/")) + } + + /// Returns the path of the intermediate file with the given id for a column: + /// `__intm/{region_id}/{sst_file_id}/{uuid}/{column_id}/{im_file_id}.im` + pub fn file_path(&self, column_id: &str, im_file_id: &str) -> String { + util::join_path(&self.column_path(column_id), &format!("{im_file_id}.im")) + } +} + +#[cfg(test)] +mod tests { + use common_test_util::temp_dir; + use regex::Regex; + + use super::*; + + #[tokio::test] + async fn test_manager() { + let temp_dir = temp_dir::create_temp_dir("index_intermediate"); + let path = temp_dir.path().to_str().unwrap(); + + // write a garbage file + tokio::fs::create_dir_all(format!("{path}/{INTERMEDIATE_DIR}")) + .await + .unwrap(); + tokio::fs::write(format!("{path}/{INTERMEDIATE_DIR}/garbage.im"), "blahblah") + .await + .unwrap(); + + let _manager = IntermediateManager::init_fs(path).await.unwrap(); + + // cleaned up by `init_fs` + assert!(!tokio::fs::try_exists(format!("{path}/{INTERMEDIATE_DIR}")) + .await + .unwrap()); + } + + #[test] + fn test_intermediate_location() { + let sst_file_id = FileId::random(); + let location = IntermediateLocation::new(&RegionId::new(0, 0), &sst_file_id); + + let re = Regex::new(&format!( + "{INTERMEDIATE_DIR}/0/{sst_file_id}/{}/", + r"\w{8}-\w{4}-\w{4}-\w{4}-\w{12}" + )) + .unwrap(); + assert!(re.is_match(location.root_path())); + + let uuid = location.root_path().split('/').nth(3).unwrap(); + + let column_id = "1"; + assert_eq!( + location.column_path(column_id), + format!("{INTERMEDIATE_DIR}/0/{sst_file_id}/{uuid}/{column_id}/") + ); + + let im_file_id = "000000000010"; + assert_eq!( + location.file_path(column_id, im_file_id), + format!("{INTERMEDIATE_DIR}/0/{sst_file_id}/{uuid}/{column_id}/{im_file_id}.im") + ); + } +} diff --git a/src/mito2/src/sst/location.rs b/src/mito2/src/sst/location.rs index d3b69d9c73d8..179e9159c94c 100644 --- a/src/mito2/src/sst/location.rs +++ b/src/mito2/src/sst/location.rs @@ -13,7 +13,6 @@ // limitations under the License. use object_store::util; -use uuid::Uuid; use crate::sst::file::FileId; @@ -30,48 +29,8 @@ pub fn index_file_path(region_dir: &str, sst_file_id: FileId) -> String { util::join_path(&dir, &sst_file_id.as_puffin()) } -/// `IntermediateLocation` produces paths for intermediate files -/// during external sorting. -#[derive(Debug, Clone)] -pub struct IntermediateLocation { - root_path: String, -} - -impl IntermediateLocation { - /// Create a new `IntermediateLocation`. Set the root directory to - /// `{region_dir}/index/__intermediate/{sst_file_id}/{uuid}/`, incorporating - /// uuid to differentiate active sorting files from orphaned data due to unexpected - /// process termination. - pub fn new(region_dir: &str, sst_file_id: &FileId) -> Self { - let uuid = Uuid::new_v4(); - let child = format!("index/__intermediate/{sst_file_id}/{uuid}/"); - Self { - root_path: util::join_path(region_dir, &child), - } - } - - /// Returns the root directory of the intermediate files - pub fn root_path(&self) -> &str { - &self.root_path - } - - /// Returns the path of the directory for intermediate files associated with a column: - /// `{region_dir}/index/__intermediate/{sst_file_id}/{uuid}/{column_id}/` - pub fn column_path(&self, column_id: &str) -> String { - util::join_path(&self.root_path, &format!("{column_id}/")) - } - - /// Returns the path of the intermediate file with the given id for a column: - /// `{region_dir}/index/__intermediate/{sst_file_id}/{uuid}/{column_id}/{im_file_id}.im` - pub fn file_path(&self, column_id: &str, im_file_id: &str) -> String { - util::join_path(&self.column_path(column_id), &format!("{im_file_id}.im")) - } -} - #[cfg(test)] mod tests { - use regex::Regex; - use super::*; #[test] @@ -91,33 +50,4 @@ mod tests { format!("region_dir/index/{file_id}.puffin") ); } - - #[test] - fn test_intermediate_location() { - let sst_file_id = FileId::random(); - let location = IntermediateLocation::new("region_dir", &sst_file_id); - - let re = Regex::new(&format!( - "region_dir/index/__intermediate/{sst_file_id}/{}/", - r"\w{8}-\w{4}-\w{4}-\w{4}-\w{12}" - )) - .unwrap(); - assert!(re.is_match(location.root_path())); - - let uuid = location.root_path().split('/').nth(4).unwrap(); - - let column_id = "1"; - assert_eq!( - location.column_path(column_id), - format!("region_dir/index/__intermediate/{sst_file_id}/{uuid}/{column_id}/") - ); - - let im_file_id = "000000000010"; - assert_eq!( - location.file_path(column_id, im_file_id), - format!( - "region_dir/index/__intermediate/{sst_file_id}/{uuid}/{column_id}/{im_file_id}.im" - ) - ); - } } diff --git a/src/mito2/src/sst/parquet.rs b/src/mito2/src/sst/parquet.rs index 6c9ec3e9802f..924df76e1dd6 100644 --- a/src/mito2/src/sst/parquet.rs +++ b/src/mito2/src/sst/parquet.rs @@ -80,6 +80,7 @@ mod tests { use super::*; use crate::cache::{CacheManager, PageKey}; + use crate::sst::index::Indexer; use crate::sst::parquet::reader::ParquetReaderBuilder; use crate::sst::parquet::writer::ParquetWriter; use crate::test_util::sst_util::{ @@ -107,7 +108,12 @@ mod tests { ..Default::default() }; - let mut writer = ParquetWriter::new(file_path, metadata, object_store.clone()); + let mut writer = ParquetWriter::new( + file_path, + metadata, + object_store.clone(), + Indexer::default(), + ); let info = writer .write_all(source, &write_opts) .await @@ -156,7 +162,12 @@ mod tests { ..Default::default() }; // Prepare data. - let mut writer = ParquetWriter::new(file_path, metadata.clone(), object_store.clone()); + let mut writer = ParquetWriter::new( + file_path, + metadata.clone(), + object_store.clone(), + Indexer::default(), + ); writer .write_all(source, &write_opts) .await @@ -225,7 +236,12 @@ mod tests { // write the sst file and get sst info // sst info contains the parquet metadata, which is converted from FileMetaData - let mut writer = ParquetWriter::new(file_path, metadata.clone(), object_store.clone()); + let mut writer = ParquetWriter::new( + file_path, + metadata.clone(), + object_store.clone(), + Indexer::default(), + ); let sst_info = writer .write_all(source, &write_opts) .await diff --git a/src/mito2/src/sst/parquet/writer.rs b/src/mito2/src/sst/parquet/writer.rs index e1d8765f5f45..d2d9cc2d492c 100644 --- a/src/mito2/src/sst/parquet/writer.rs +++ b/src/mito2/src/sst/parquet/writer.rs @@ -19,6 +19,7 @@ use std::sync::Arc; use common_datasource::file_format::parquet::BufferedWriter; use common_telemetry::debug; use common_time::Timestamp; +use futures::TryFutureExt; use object_store::ObjectStore; use parquet::basic::{Compression, Encoding, ZstdLevel}; use parquet::file::metadata::KeyValue; @@ -28,10 +29,11 @@ use snafu::ResultExt; use store_api::metadata::RegionMetadataRef; use store_api::storage::consts::SEQUENCE_COLUMN_NAME; -use super::helper::parse_parquet_metadata; use crate::error::{InvalidMetadataSnafu, Result, WriteBufferSnafu}; use crate::read::{Batch, Source}; +use crate::sst::index::Indexer; use crate::sst::parquet::format::WriteFormat; +use crate::sst::parquet::helper::parse_parquet_metadata; use crate::sst::parquet::{SstInfo, WriteOptions, PARQUET_METADATA_KEY}; /// Parquet SST writer. @@ -41,6 +43,7 @@ pub struct ParquetWriter { /// Region metadata of the source and the target SST. metadata: RegionMetadataRef, object_store: ObjectStore, + indexer: Indexer, } impl ParquetWriter { @@ -49,11 +52,13 @@ impl ParquetWriter { file_path: String, metadata: RegionMetadataRef, object_store: ObjectStore, + indexer: Indexer, ) -> ParquetWriter { ParquetWriter { file_path, metadata, object_store, + indexer, } } @@ -90,16 +95,22 @@ impl ParquetWriter { .context(WriteBufferSnafu)?; let mut stats = SourceStats::default(); - while let Some(batch) = source.next_batch().await? { + while let Some(batch) = write_next_batch(&mut source, &write_format, &mut buffered_writer) + .or_else(|err| async { + // abort index creation if error occurs. + self.indexer.abort().await; + Err(err) + }) + .await? + { stats.update(&batch); - let arrow_batch = write_format.convert_batch(&batch)?; - - buffered_writer - .write(&arrow_batch) - .await - .context(WriteBufferSnafu)?; + self.indexer.update(&batch).await; } + let index_size = self.indexer.finish().await; + let inverted_index_available = index_size.is_some(); + let index_file_size = index_size.unwrap_or(0) as u64; + if stats.num_rows == 0 { debug!( "No data written, try to stop the writer: {}", @@ -124,8 +135,8 @@ impl ParquetWriter { file_size, num_rows: stats.num_rows, file_metadata: Some(Arc::new(parquet_metadata)), - inverted_index_available: false, - index_file_size: 0, + inverted_index_available, + index_file_size, })) } @@ -149,6 +160,24 @@ impl ParquetWriter { } } +async fn write_next_batch( + source: &mut Source, + write_format: &WriteFormat, + buffered_writer: &mut BufferedWriter, +) -> Result<Option<Batch>> { + let Some(batch) = source.next_batch().await? else { + return Ok(None); + }; + + let arrow_batch = write_format.convert_batch(&batch)?; + buffered_writer + .write(&arrow_batch) + .await + .context(WriteBufferSnafu)?; + + Ok(Some(batch)) +} + #[derive(Default)] struct SourceStats { /// Number of rows fetched. diff --git a/src/mito2/src/test_util.rs b/src/mito2/src/test_util.rs index 73795744ffc0..63d73c776e27 100644 --- a/src/mito2/src/test_util.rs +++ b/src/mito2/src/test_util.rs @@ -136,7 +136,8 @@ impl TestEnv { let object_store_manager = Arc::new(object_store_manager); self.logstore = Some(logstore.clone()); self.object_store_manager = Some(object_store_manager.clone()); - MitoEngine::new(config, logstore, object_store_manager) + let data_home = self.data_home().display().to_string(); + MitoEngine::new(&data_home, config, logstore, object_store_manager) .await .unwrap() } @@ -145,8 +146,8 @@ impl TestEnv { pub async fn create_follower_engine(&mut self, config: MitoConfig) -> MitoEngine { let logstore = self.logstore.as_ref().unwrap().clone(); let object_store_manager = self.object_store_manager.as_ref().unwrap().clone(); - - MitoEngine::new(config, logstore, object_store_manager) + let data_home = self.data_home().display().to_string(); + MitoEngine::new(&data_home, config, logstore, object_store_manager) .await .unwrap() } @@ -164,9 +165,19 @@ impl TestEnv { let object_store_manager = Arc::new(object_store_manager); self.logstore = Some(logstore.clone()); self.object_store_manager = Some(object_store_manager.clone()); - MitoEngine::new_for_test(config, logstore, object_store_manager, manager, listener) - .await - .unwrap() + + let data_home = self.data_home().display().to_string(); + + MitoEngine::new_for_test( + &data_home, + config, + logstore, + object_store_manager, + manager, + listener, + ) + .await + .unwrap() } pub async fn create_engine_with_multiple_object_stores( @@ -195,9 +206,18 @@ impl TestEnv { let object_store_manager = Arc::new(object_store_manager); self.logstore = Some(logstore.clone()); self.object_store_manager = Some(object_store_manager.clone()); - MitoEngine::new_for_test(config, logstore, object_store_manager, manager, listener) - .await - .unwrap() + let data_home = self.data_home().display().to_string(); + + MitoEngine::new_for_test( + &data_home, + config, + logstore, + object_store_manager, + manager, + listener, + ) + .await + .unwrap() } /// Reopen the engine. @@ -205,6 +225,7 @@ impl TestEnv { engine.stop().await.unwrap(); MitoEngine::new( + &self.data_home().display().to_string(), config, self.logstore.clone().unwrap(), self.object_store_manager.clone().unwrap(), @@ -216,6 +237,7 @@ impl TestEnv { /// Open the engine. pub async fn open_engine(&mut self, config: MitoConfig) -> MitoEngine { MitoEngine::new( + &self.data_home().display().to_string(), config, self.logstore.clone().unwrap(), self.object_store_manager.clone().unwrap(), @@ -231,9 +253,11 @@ impl TestEnv { } /// Creates a new [WorkerGroup] with specific config under this env. - pub(crate) async fn create_worker_group(&self, config: MitoConfig) -> WorkerGroup { + pub(crate) async fn create_worker_group(&self, mut config: MitoConfig) -> WorkerGroup { let (log_store, object_store_manager) = self.create_log_and_object_store_manager().await; + let data_home = self.data_home().display().to_string(); + config.sanitize(&data_home).unwrap(); WorkerGroup::start( Arc::new(config), Arc::new(log_store), diff --git a/src/mito2/src/test_util/scheduler_util.rs b/src/mito2/src/test_util/scheduler_util.rs index 445151f12f5a..4a0809939184 100644 --- a/src/mito2/src/test_util/scheduler_util.rs +++ b/src/mito2/src/test_util/scheduler_util.rs @@ -18,6 +18,7 @@ use std::sync::Arc; use common_test_util::temp_dir::{create_temp_dir, TempDir}; use object_store::services::Fs; +use object_store::util::join_dir; use object_store::ObjectStore; use tokio::sync::mpsc::Sender; @@ -27,6 +28,7 @@ use crate::compaction::CompactionScheduler; use crate::flush::FlushScheduler; use crate::request::WorkerRequest; use crate::schedule::scheduler::{LocalScheduler, SchedulerRef}; +use crate::sst::index::intermediate::IntermediateManager; /// Scheduler mocker. pub(crate) struct SchedulerEnv { @@ -39,15 +41,20 @@ pub(crate) struct SchedulerEnv { impl SchedulerEnv { /// Creates a new mocker. - pub(crate) fn new() -> SchedulerEnv { + pub(crate) async fn new() -> SchedulerEnv { let path = create_temp_dir(""); + let path_str = path.path().display().to_string(); let mut builder = Fs::default(); - builder.root(path.path().to_str().unwrap()); + builder.root(&path_str); + + let intm_mgr = IntermediateManager::init_fs(join_dir(&path_str, "intm")) + .await + .unwrap(); let object_store = ObjectStore::new(builder).unwrap().finish(); - let access_layer = Arc::new(AccessLayer::new("", object_store.clone())); + let access_layer = Arc::new(AccessLayer::new("", object_store.clone(), intm_mgr)); SchedulerEnv { - path: create_temp_dir(""), + path, access_layer, scheduler: None, } diff --git a/src/mito2/src/worker.rs b/src/mito2/src/worker.rs index 09cb59aa1b17..ef54b42514da 100644 --- a/src/mito2/src/worker.rs +++ b/src/mito2/src/worker.rs @@ -55,6 +55,7 @@ use crate::request::{ BackgroundNotify, DdlRequest, SenderDdlRequest, SenderWriteRequest, WorkerRequest, }; use crate::schedule::scheduler::{LocalScheduler, SchedulerRef}; +use crate::sst::index::intermediate::IntermediateManager; use crate::wal::Wal; /// Identifier for a worker. @@ -120,8 +121,15 @@ impl WorkerGroup { let write_buffer_manager = Arc::new(WriteBufferManagerImpl::new( config.global_write_buffer_size.as_bytes() as usize, )); + let intermediate_manager = + IntermediateManager::init_fs(&config.inverted_index.intermediate_path).await?; let scheduler = Arc::new(LocalScheduler::new(config.max_background_jobs)); - let write_cache = write_cache_from_config(&config, object_store_manager.clone()).await?; + let write_cache = write_cache_from_config( + &config, + object_store_manager.clone(), + intermediate_manager.clone(), + ) + .await?; let cache_manager = Arc::new( CacheManager::builder() .sst_meta_cache_size(config.sst_meta_cache_size.as_bytes()) @@ -142,6 +150,7 @@ impl WorkerGroup { scheduler: scheduler.clone(), listener: WorkerListener::default(), cache_manager: cache_manager.clone(), + intermediate_manager: intermediate_manager.clone(), } .start() }) @@ -222,7 +231,14 @@ impl WorkerGroup { )) }); let scheduler = Arc::new(LocalScheduler::new(config.max_background_jobs)); - let write_cache = write_cache_from_config(&config, object_store_manager.clone()).await?; + let intermediate_manager = + IntermediateManager::init_fs(&config.inverted_index.intermediate_path).await?; + let write_cache = write_cache_from_config( + &config, + object_store_manager.clone(), + intermediate_manager.clone(), + ) + .await?; let cache_manager = Arc::new( CacheManager::builder() .sst_meta_cache_size(config.sst_meta_cache_size.as_bytes()) @@ -231,7 +247,6 @@ impl WorkerGroup { .write_cache(write_cache) .build(), ); - let workers = (0..config.num_workers) .map(|id| { WorkerStarter { @@ -243,6 +258,7 @@ impl WorkerGroup { scheduler: scheduler.clone(), listener: WorkerListener::new(listener.clone()), cache_manager: cache_manager.clone(), + intermediate_manager: intermediate_manager.clone(), } .start() }) @@ -263,6 +279,7 @@ fn value_to_index(value: usize, num_workers: usize) -> usize { async fn write_cache_from_config( config: &MitoConfig, object_store_manager: ObjectStoreManagerRef, + intermediate_manager: IntermediateManager, ) -> Result<Option<WriteCacheRef>> { if !config.enable_experimental_write_cache { return Ok(None); @@ -275,6 +292,7 @@ async fn write_cache_from_config( &config.experimental_write_cache_path, object_store_manager, config.experimental_write_cache_size, + intermediate_manager, ) .await?; Ok(Some(Arc::new(cache))) @@ -290,6 +308,7 @@ struct WorkerStarter<S> { scheduler: SchedulerRef, listener: WorkerListener, cache_manager: CacheManagerRef, + intermediate_manager: IntermediateManager, } impl<S: LogStore> WorkerStarter<S> { @@ -323,6 +342,7 @@ impl<S: LogStore> WorkerStarter<S> { stalled_requests: StalledRequests::default(), listener: self.listener, cache_manager: self.cache_manager, + intermediate_manager: self.intermediate_manager, }; let handle = common_runtime::spawn_write(async move { worker_thread.run().await; @@ -479,6 +499,8 @@ struct RegionWorkerLoop<S> { listener: WorkerListener, /// Cache. cache_manager: CacheManagerRef, + /// Intermediate manager for inverted index. + intermediate_manager: IntermediateManager, } impl<S: LogStore> RegionWorkerLoop<S> { diff --git a/src/mito2/src/worker/handle_catchup.rs b/src/mito2/src/worker/handle_catchup.rs index 9841c4eb43f6..3622793273af 100644 --- a/src/mito2/src/worker/handle_catchup.rs +++ b/src/mito2/src/worker/handle_catchup.rs @@ -54,6 +54,7 @@ impl<S: LogStore> RegionWorkerLoop<S> { self.memtable_builder.clone(), self.object_store_manager.clone(), self.scheduler.clone(), + self.intermediate_manager.clone(), ) .cache(Some(self.cache_manager.clone())) .options(region.version().options.clone()) diff --git a/src/mito2/src/worker/handle_create.rs b/src/mito2/src/worker/handle_create.rs index 0af60793a0e9..0a87ba2ed54c 100644 --- a/src/mito2/src/worker/handle_create.rs +++ b/src/mito2/src/worker/handle_create.rs @@ -61,6 +61,7 @@ impl<S: LogStore> RegionWorkerLoop<S> { self.memtable_builder.clone(), self.object_store_manager.clone(), self.scheduler.clone(), + self.intermediate_manager.clone(), ) .metadata(metadata) .parse_options(request.options)? diff --git a/src/mito2/src/worker/handle_open.rs b/src/mito2/src/worker/handle_open.rs index a2a7f7d6609a..9163b6f174c6 100644 --- a/src/mito2/src/worker/handle_open.rs +++ b/src/mito2/src/worker/handle_open.rs @@ -68,6 +68,7 @@ impl<S: LogStore> RegionWorkerLoop<S> { self.memtable_builder.clone(), self.object_store_manager.clone(), self.scheduler.clone(), + self.intermediate_manager.clone(), ) .skip_wal_replay(request.skip_wal_replay) .parse_options(request.options)? diff --git a/tests-integration/tests/http.rs b/tests-integration/tests/http.rs index 620d1bf4c56c..cbe18e7fbf8a 100644 --- a/tests-integration/tests/http.rs +++ b/tests-integration/tests/http.rs @@ -777,6 +777,13 @@ sst_write_buffer_size = "8MiB" parallel_scan_channel_size = 32 allow_stale_entries = false +[datanode.region_engine.mito.inverted_index] +create_on_flush = "auto" +create_on_compaction = "auto" +apply_on_query = "auto" +mem_threshold_on_create = "64.0MiB" +intermediate_path = "" + [[datanode.region_engine]] [datanode.region_engine.file] diff --git a/tests/cases/standalone/common/insert/logical_metric_table.result b/tests/cases/standalone/common/insert/logical_metric_table.result index d3f59786830a..09ddac341c94 100644 --- a/tests/cases/standalone/common/insert/logical_metric_table.result +++ b/tests/cases/standalone/common/insert/logical_metric_table.result @@ -19,19 +19,14 @@ SELECT * from t1; | 1970-01-01T00:00:00 | 0.0 | host1 | +-------------------------+-----+-------+ --- TODO(ruihang): fix this. t2 should not contains data from t1 CREATE TABLE t2 (ts timestamp time index, job string primary key, val double) engine = metric with ("on_physical_table" = "phy"); Affected Rows: 0 SELECT * from t2; -+-------------------------+-----+-----+ -| ts | job | val | -+-------------------------+-----+-----+ -| 1970-01-01T00:00:00.001 | | 1.0 | -| 1970-01-01T00:00:00 | | 0.0 | -+-------------------------+-----+-----+ +++ +++ INSERT INTO t2 VALUES (0, 'job1', 0), (1, 'job2', 1); @@ -42,8 +37,6 @@ SELECT * from t2; +-------------------------+------+-----+ | ts | job | val | +-------------------------+------+-----+ -| 1970-01-01T00:00:00.001 | | 1.0 | -| 1970-01-01T00:00:00 | | 0.0 | | 1970-01-01T00:00:00.001 | job2 | 1.0 | | 1970-01-01T00:00:00 | job1 | 0.0 | +-------------------------+------+-----+ diff --git a/tests/cases/standalone/common/insert/logical_metric_table.sql b/tests/cases/standalone/common/insert/logical_metric_table.sql index 6583833de510..fa2b6e0b6d7a 100644 --- a/tests/cases/standalone/common/insert/logical_metric_table.sql +++ b/tests/cases/standalone/common/insert/logical_metric_table.sql @@ -6,7 +6,6 @@ INSERT INTO t1 VALUES (0, 0, 'host1'), (1, 1, 'host2'); SELECT * from t1; --- TODO(ruihang): fix this. t2 should not contains data from t1 CREATE TABLE t2 (ts timestamp time index, job string primary key, val double) engine = metric with ("on_physical_table" = "phy"); SELECT * from t2; diff --git a/tests/runner/src/env.rs b/tests/runner/src/env.rs index 3df634d5f9b1..2378f088513e 100644 --- a/tests/runner/src/env.rs +++ b/tests/runner/src/env.rs @@ -322,7 +322,7 @@ impl Env { } } - /// Setup kafka wal cluster if needed. The conterpart is in [GreptimeDB::stop]. + /// Setup kafka wal cluster if needed. The counterpart is in [GreptimeDB::stop]. fn setup_wal(&self) { if matches!(self.wal, WalConfig::Kafka { needs_kafka_cluster, .. } if needs_kafka_cluster) { util::setup_wal();
feat
enable inverted index (#3158)
7c1c6e8b8c66c28e46a56f22b47660722520b555
2024-03-26 09:58:14
tison
refactor: try upgrade regex-automata (#3575)
false
diff --git a/Cargo.lock b/Cargo.lock index 5d491ee3d11a..85909a5f3fc0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4378,7 +4378,7 @@ dependencies = [ "prost 0.12.3", "rand", "regex", - "regex-automata 0.2.0", + "regex-automata 0.4.3", "snafu", "tempfile", "tokio", @@ -7801,17 +7801,6 @@ dependencies = [ "regex-syntax 0.6.29", ] -[[package]] -name = "regex-automata" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e9368763f5a9b804326f3af749e16f9abf378d227bcdee7634b13d8f17793782" -dependencies = [ - "fst", - "memchr", - "regex-syntax 0.6.29", -] - [[package]] name = "regex-automata" version = "0.4.3" diff --git a/Cargo.toml b/Cargo.toml index 350880e1bcc8..cebad1ef89b2 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -125,7 +125,7 @@ prost = "0.12" raft-engine = { version = "0.4.1", default-features = false } rand = "0.8" regex = "1.8" -regex-automata = { version = "0.2", features = ["transducer"] } +regex-automata = { version = "0.4" } reqwest = { version = "0.11", default-features = false, features = [ "json", "rustls-tls-native-roots", diff --git a/src/index/src/inverted_index/error.rs b/src/index/src/inverted_index/error.rs index c3c40dddeae7..d831eaa1b12c 100644 --- a/src/index/src/inverted_index/error.rs +++ b/src/index/src/inverted_index/error.rs @@ -113,7 +113,7 @@ pub enum Error { #[snafu(display("Failed to parse regex DFA"))] ParseDFA { #[snafu(source)] - error: Box<regex_automata::dfa::Error>, + error: Box<regex_automata::dfa::dense::BuildError>, location: Location, }, diff --git a/src/index/src/inverted_index/search/fst_apply/intersection_apply.rs b/src/index/src/inverted_index/search/fst_apply/intersection_apply.rs index d76b44fe9d4b..ed8d48323042 100644 --- a/src/index/src/inverted_index/search/fst_apply/intersection_apply.rs +++ b/src/index/src/inverted_index/search/fst_apply/intersection_apply.rs @@ -17,6 +17,10 @@ use std::mem::size_of; use fst::map::OpBuilder; use fst::{IntoStreamer, Streamer}; use regex_automata::dfa::dense::DFA; +use regex_automata::dfa::Automaton; +use regex_automata::util::primitives::StateID; +use regex_automata::util::start::Config; +use regex_automata::Anchored; use snafu::{ensure, ResultExt}; use crate::inverted_index::error::{ @@ -32,7 +36,53 @@ pub struct IntersectionFstApplier { ranges: Vec<Range>, /// A list of `Dfa` compiled from regular expression patterns. - dfas: Vec<DFA<Vec<u32>>>, + dfas: Vec<DfaFstAutomaton>, +} + +#[derive(Debug)] +struct DfaFstAutomaton(DFA<Vec<u32>>); + +impl fst::Automaton for DfaFstAutomaton { + type State = StateID; + + #[inline] + fn start(&self) -> Self::State { + let config = Config::new().anchored(Anchored::No); + self.0.start_state(&config).unwrap() + } + + #[inline] + fn is_match(&self, state: &Self::State) -> bool { + self.0.is_match_state(*state) + } + + #[inline] + fn can_match(&self, state: &Self::State) -> bool { + !self.0.is_dead_state(*state) + } + + #[inline] + fn accept_eof(&self, state: &StateID) -> Option<StateID> { + if self.0.is_match_state(*state) { + return Some(*state); + } + Some(self.0.next_eoi_state(*state)) + } + + #[inline] + fn accept(&self, state: &Self::State, byte: u8) -> Self::State { + if self.0.is_match_state(*state) { + return *state; + } + self.0.next_state(*state, byte) + } +} + +impl IntersectionFstApplier { + fn new(ranges: Vec<Range>, dfas: Vec<DFA<Vec<u32>>>) -> Self { + let dfas = dfas.into_iter().map(DfaFstAutomaton).collect(); + Self { ranges, dfas } + } } impl FstApplier for IntersectionFstApplier { @@ -86,7 +136,7 @@ impl FstApplier for IntersectionFstApplier { size += self.dfas.capacity() * size_of::<DFA<Vec<u32>>>(); for dfa in &self.dfas { - size += dfa.memory_usage(); + size += dfa.0.memory_usage(); } size } @@ -119,7 +169,7 @@ impl IntersectionFstApplier { } } - Ok(Self { dfas, ranges }) + Ok(Self::new(ranges, dfas)) } } @@ -365,18 +415,15 @@ mod tests { #[test] fn test_intersection_fst_applier_memory_usage() { - let applier = IntersectionFstApplier { - ranges: vec![], - dfas: vec![], - }; + let applier = IntersectionFstApplier::new(vec![], vec![]); assert_eq!(applier.memory_usage(), 0); let dfa = DFA::new("^abc$").unwrap(); assert_eq!(dfa.memory_usage(), 320); - let applier = IntersectionFstApplier { - ranges: vec![Range { + let applier = IntersectionFstApplier::new( + vec![Range { lower: Some(Bound { value: b"aa".to_vec(), inclusive: true, @@ -386,9 +433,8 @@ mod tests { inclusive: true, }), }], - dfas: vec![dfa], - }; - + vec![dfa], + ); assert_eq!( applier.memory_usage(), size_of::<Range>() + 4 + size_of::<DFA<Vec<u32>>>() + 320
refactor
try upgrade regex-automata (#3575)
c75845c570ab3c7ddd34704cea84d575707bbd15
2023-04-23 08:55:38
Zheming Li
fix: wrong next column in manifest (#1440)
false
diff --git a/src/mito/src/table.rs b/src/mito/src/table.rs index eecb74ac081a..ae4a99f6ae4b 100644 --- a/src/mito/src/table.rs +++ b/src/mito/src/table.rs @@ -241,6 +241,8 @@ impl<R: Region> Table for MitoTable<R> { new_info.meta = new_meta; } } + // Do create_alter_operation first to bump next_column_id in meta. + let alter_op = create_alter_operation(table_name, &req.alter_kind, &mut new_info.meta)?; // Increase version of the table. new_info.ident.version = table_info.ident.version + 1; @@ -263,9 +265,7 @@ impl<R: Region> Table for MitoTable<R> { .map_err(BoxedError::new) .context(table_error::TableOperationSnafu)?; - if let Some(alter_op) = - create_alter_operation(table_name, &req.alter_kind, &mut new_info.meta)? - { + if let Some(alter_op) = alter_op { // TODO(yingwen): Error handling. Maybe the region need to provide a method to // validate the request first. let regions = self.regions(); diff --git a/tests/cases/distributed/alter/alter_table.result b/tests/cases/distributed/alter/alter_table.result new file mode 100644 index 000000000000..36269aad5c5f --- /dev/null +++ b/tests/cases/distributed/alter/alter_table.result @@ -0,0 +1,46 @@ +CREATE TABLE t(i INTEGER, j BIGINT TIME INDEX); + +Affected Rows: 0 + +DESC TABLE t; + ++-------+-------+------+---------+---------------+ +| Field | Type | Null | Default | Semantic Type | ++-------+-------+------+---------+---------------+ +| i | Int32 | YES | | FIELD | +| j | Int64 | NO | | TIME INDEX | ++-------+-------+------+---------+---------------+ + +ALTER TABLE t ADD COLUMN k INTEGER; + +Affected Rows: 0 + +DESC TABLE t; + ++-------+-------+------+---------+---------------+ +| Field | Type | Null | Default | Semantic Type | ++-------+-------+------+---------+---------------+ +| i | Int32 | YES | | FIELD | +| j | Int64 | NO | | TIME INDEX | +| k | Int32 | YES | | FIELD | ++-------+-------+------+---------+---------------+ + +ALTER TABLE t ADD COLUMN m INTEGER; + +Affected Rows: 0 + +DESC TABLE t; + ++-------+-------+------+---------+---------------+ +| Field | Type | Null | Default | Semantic Type | ++-------+-------+------+---------+---------------+ +| i | Int32 | YES | | FIELD | +| j | Int64 | NO | | TIME INDEX | +| k | Int32 | YES | | FIELD | +| m | Int32 | YES | | FIELD | ++-------+-------+------+---------+---------------+ + +DROP TABLE t; + +Affected Rows: 1 + diff --git a/tests/cases/distributed/alter/alter_table.sql b/tests/cases/distributed/alter/alter_table.sql new file mode 100644 index 000000000000..66c217743d36 --- /dev/null +++ b/tests/cases/distributed/alter/alter_table.sql @@ -0,0 +1,13 @@ +CREATE TABLE t(i INTEGER, j BIGINT TIME INDEX); + +DESC TABLE t; + +ALTER TABLE t ADD COLUMN k INTEGER; + +DESC TABLE t; + +ALTER TABLE t ADD COLUMN m INTEGER; + +DESC TABLE t; + +DROP TABLE t; diff --git a/tests/cases/standalone/alter/alter_table.result b/tests/cases/standalone/alter/alter_table.result new file mode 100644 index 000000000000..d213ab8a56f6 --- /dev/null +++ b/tests/cases/standalone/alter/alter_table.result @@ -0,0 +1,47 @@ +CREATE TABLE t(i INTEGER, j BIGINT TIME INDEX); + +Affected Rows: 0 + +DESC TABLE t; + ++-------+-------+------+---------+---------------+ +| Field | Type | Null | Default | Semantic Type | ++-------+-------+------+---------+---------------+ +| i | Int32 | YES | | FIELD | +| j | Int64 | NO | | TIME INDEX | ++-------+-------+------+---------+---------------+ + +ALTER TABLE t ADD COLUMN k INTEGER; + +Affected Rows: 0 + +DESC TABLE t; + ++-------+-------+------+---------+---------------+ +| Field | Type | Null | Default | Semantic Type | ++-------+-------+------+---------+---------------+ +| i | Int32 | YES | | FIELD | +| j | Int64 | NO | | TIME INDEX | +| k | Int32 | YES | | FIELD | ++-------+-------+------+---------+---------------+ + +-- SQLNESS ARG restart=true +ALTER TABLE t ADD COLUMN m INTEGER; + +Affected Rows: 0 + +DESC TABLE t; + ++-------+-------+------+---------+---------------+ +| Field | Type | Null | Default | Semantic Type | ++-------+-------+------+---------+---------------+ +| i | Int32 | YES | | FIELD | +| j | Int64 | NO | | TIME INDEX | +| k | Int32 | YES | | FIELD | +| m | Int32 | YES | | FIELD | ++-------+-------+------+---------+---------------+ + +DROP TABLE t; + +Affected Rows: 1 + diff --git a/tests/cases/standalone/alter/alter_table.sql b/tests/cases/standalone/alter/alter_table.sql new file mode 100644 index 000000000000..46165625dc45 --- /dev/null +++ b/tests/cases/standalone/alter/alter_table.sql @@ -0,0 +1,14 @@ +CREATE TABLE t(i INTEGER, j BIGINT TIME INDEX); + +DESC TABLE t; + +ALTER TABLE t ADD COLUMN k INTEGER; + +DESC TABLE t; + +-- SQLNESS ARG restart=true +ALTER TABLE t ADD COLUMN m INTEGER; + +DESC TABLE t; + +DROP TABLE t;
fix
wrong next column in manifest (#1440)
000df8cf1e37828845f615ce0f1d1a92a5fc5d6a
2023-07-04 17:02:02
Weny Xu
feat: add ddl client (#1856)
false
diff --git a/Cargo.lock b/Cargo.lock index 5c407f4d7d84..cf6085bdd8eb 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4108,7 +4108,7 @@ checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" [[package]] name = "greptime-proto" version = "0.1.0" -source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=7aeaeaba1e0ca6a5c736b6ab2eb63144ae3d284b#7aeaeaba1e0ca6a5c736b6ab2eb63144ae3d284b" +source = "git+https://github.com/WenyXu/greptime-proto.git?rev=aab7d9a35900f995f9328c8588781e4d75253cba#aab7d9a35900f995f9328c8588781e4d75253cba" dependencies = [ "prost", "serde", diff --git a/Cargo.toml b/Cargo.toml index be1554ce6795..415cc378d446 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -73,7 +73,7 @@ datafusion-sql = { git = "https://github.com/waynexia/arrow-datafusion.git", rev datafusion-substrait = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "63e52dde9e44cac4b1f6c6e6b6bf6368ba3bd323" } futures = "0.3" futures-util = "0.3" -greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "7aeaeaba1e0ca6a5c736b6ab2eb63144ae3d284b" } +greptime-proto = { git = "https://github.com/WenyXu/greptime-proto.git", rev = "aab7d9a35900f995f9328c8588781e4d75253cba" } itertools = "0.10" parquet = "40.0" paste = "1.0" diff --git a/src/meta-client/src/client.rs b/src/meta-client/src/client.rs index 85a5051d8b0d..efd8fe6ec5cf 100644 --- a/src/meta-client/src/client.rs +++ b/src/meta-client/src/client.rs @@ -12,6 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +mod ddl; mod heartbeat; mod load_balance; mod lock; @@ -29,6 +30,7 @@ use common_meta::rpc::store::{ RangeRequest, RangeResponse, }; use common_telemetry::info; +use ddl::Client as DdlClient; use heartbeat::Client as HeartbeatClient; use lock::Client as LockClient; use router::Client as RouterClient; @@ -49,6 +51,7 @@ pub struct MetaClientBuilder { enable_router: bool, enable_store: bool, enable_lock: bool, + enable_ddl: bool, channel_manager: Option<ChannelManager>, } @@ -89,6 +92,13 @@ impl MetaClientBuilder { } } + pub fn enable_ddl(self) -> Self { + Self { + enable_ddl: true, + ..self + } + } + pub fn channel_manager(self, channel_manager: ChannelManager) -> Self { Self { channel_manager: Some(channel_manager), @@ -119,7 +129,10 @@ impl MetaClientBuilder { client.store = Some(StoreClient::new(self.id, self.role, mgr.clone())); } if self.enable_lock { - client.lock = Some(LockClient::new(self.id, self.role, mgr)); + client.lock = Some(LockClient::new(self.id, self.role, mgr.clone())); + } + if self.enable_ddl { + client.ddl = Some(DdlClient::new(self.id, self.role, mgr)); } client @@ -134,6 +147,7 @@ pub struct MetaClient { router: Option<RouterClient>, store: Option<StoreClient>, lock: Option<LockClient>, + ddl: Option<DdlClient>, } impl MetaClient { diff --git a/src/meta-client/src/client/ddl.rs b/src/meta-client/src/client/ddl.rs new file mode 100644 index 000000000000..510e0e6f2f8f --- /dev/null +++ b/src/meta-client/src/client/ddl.rs @@ -0,0 +1,145 @@ +// Copyright 2023 Greptime Team +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::sync::Arc; + +use api::v1::meta::ddl_task_client::DdlTaskClient; +use api::v1::meta::{ErrorCode, Role, SubmitDdlTaskRequest, SubmitDdlTaskResponse}; +use common_grpc::channel_manager::ChannelManager; +use snafu::{ensure, ResultExt}; +use tokio::sync::RwLock; +use tonic::transport::Channel; + +use crate::client::heartbeat::Inner as HeartbeatInner; +use crate::client::Id; +use crate::error; +use crate::error::Result; + +#[derive(Clone, Debug)] +// TODO(weny): removes this in following PRs. +#[allow(unused)] +pub struct Client { + inner: Arc<RwLock<Inner>>, +} + +// TODO(weny): removes this in following PRs. +#[allow(dead_code)] +impl Client { + pub fn new(id: Id, role: Role, channel_manager: ChannelManager) -> Self { + let inner = Arc::new(RwLock::new(Inner { + id, + role, + channel_manager: channel_manager.clone(), + heartbeat_inner: HeartbeatInner::new(id, role, channel_manager), + })); + + Self { inner } + } + + pub async fn start<U, A>(&mut self, urls: A) -> Result<()> + where + U: AsRef<str>, + A: AsRef<[U]>, + { + let mut inner = self.inner.write().await; + inner.start(urls).await + } + + pub async fn is_started(&self) -> bool { + let inner = self.inner.read().await; + inner.is_started() + } + + pub async fn submit_ddl_task( + &self, + req: SubmitDdlTaskRequest, + ) -> Result<SubmitDdlTaskResponse> { + let mut inner = self.inner.write().await; + inner.submit_ddl_task(req).await + } +} + +#[derive(Debug)] +// TODO(weny): removes this in following PRs. +#[allow(unused)] +struct Inner { + id: Id, + role: Role, + channel_manager: ChannelManager, + heartbeat_inner: HeartbeatInner, +} + +impl Inner { + async fn start<U, A>(&mut self, urls: A) -> Result<()> + where + U: AsRef<str>, + A: AsRef<[U]>, + { + ensure!( + !self.is_started(), + error::IllegalGrpcClientStateSnafu { + err_msg: "Router client already started", + } + ); + + self.heartbeat_inner.start(urls).await?; + Ok(()) + } + + fn make_client(&self, addr: impl AsRef<str>) -> Result<DdlTaskClient<Channel>> { + let channel = self + .channel_manager + .get(addr) + .context(error::CreateChannelSnafu)?; + + Ok(DdlTaskClient::new(channel)) + } + + #[inline] + fn is_started(&self) -> bool { + self.heartbeat_inner.is_started() + } + + pub async fn submit_ddl_task( + &mut self, + mut req: SubmitDdlTaskRequest, + ) -> Result<SubmitDdlTaskResponse> { + req.set_header(self.id, self.role); + + loop { + if let Some(leader) = &self.heartbeat_inner.get_leader() { + let mut client = self.make_client(leader)?; + let res = client + .submit_ddl_task(req.clone()) + .await + .context(error::TonicStatusSnafu)?; + + let res = res.into_inner(); + + if let Some(header) = res.header.as_ref() { + if let Some(err) = header.error.as_ref() { + if err.code == ErrorCode::NotLeader as i32 { + self.heartbeat_inner.ask_leader().await?; + continue; + } + } + } + + return Ok(res); + } else if let Err(err) = self.heartbeat_inner.ask_leader().await { + return Err(err); + } + } + } +} diff --git a/src/meta-client/src/client/heartbeat.rs b/src/meta-client/src/client/heartbeat.rs index 1b563bc691b8..108ca85de110 100644 --- a/src/meta-client/src/client/heartbeat.rs +++ b/src/meta-client/src/client/heartbeat.rs @@ -133,7 +133,7 @@ impl Client { } #[derive(Debug)] -struct Inner { +pub(crate) struct Inner { id: Id, role: Role, channel_manager: ChannelManager, @@ -142,7 +142,16 @@ struct Inner { } impl Inner { - async fn start<U, A>(&mut self, urls: A) -> Result<()> + pub(crate) fn new(id: Id, role: Role, channel_manager: ChannelManager) -> Self { + Self { + id, + role, + channel_manager, + peers: HashSet::new(), + leader: None, + } + } + pub(crate) async fn start<U, A>(&mut self, urls: A) -> Result<()> where U: AsRef<str>, A: AsRef<[U]>, @@ -163,7 +172,11 @@ impl Inner { Ok(()) } - async fn ask_leader(&mut self) -> Result<()> { + pub(crate) fn get_leader(&self) -> Option<String> { + self.leader.clone() + } + + pub(crate) async fn ask_leader(&mut self) -> Result<()> { ensure!( self.is_started(), error::IllegalGrpcClientStateSnafu { @@ -242,7 +255,7 @@ impl Inner { } #[inline] - fn is_started(&self) -> bool { + pub(crate) fn is_started(&self) -> bool { !self.peers.is_empty() } }
feat
add ddl client (#1856)
f5eede4ce1ff76659d5cc45a06e4446ff826ce2f
2023-11-10 16:03:28
tison
feat: support prometheus format_query endpoint (#2731)
false
diff --git a/src/servers/src/http.rs b/src/servers/src/http.rs index 7ca2b4261d71..7a422ef9941d 100644 --- a/src/servers/src/http.rs +++ b/src/servers/src/http.rs @@ -66,7 +66,7 @@ use self::influxdb::{influxdb_health, influxdb_ping, influxdb_write_v1, influxdb use crate::configurator::ConfiguratorRef; use crate::error::{AlreadyStartedSnafu, Result, StartHttpSnafu}; use crate::http::prometheus::{ - instant_query, label_values_query, labels_query, range_query, series_query, + format_query, instant_query, label_values_query, labels_query, range_query, series_query, }; use crate::metrics::{ HTTP_TRACK_METRICS, METRIC_HTTP_REQUESTS_ELAPSED, METRIC_HTTP_REQUESTS_TOTAL, @@ -623,6 +623,10 @@ impl HttpServer { fn route_prometheus<S>(&self, prometheus_handler: PrometheusHandlerRef) -> Router<S> { Router::new() + .route( + "/format_query", + routing::post(format_query).get(format_query), + ) .route("/query", routing::post(instant_query).get(instant_query)) .route("/query_range", routing::post(range_query).get(range_query)) .route("/labels", routing::post(labels_query).get(labels_query)) diff --git a/src/servers/src/http/prometheus.rs b/src/servers/src/http/prometheus.rs index 82389ba66d20..2aae0b3a0009 100644 --- a/src/servers/src/http/prometheus.rs +++ b/src/servers/src/http/prometheus.rs @@ -70,6 +70,7 @@ pub enum PrometheusResponse { Labels(Vec<String>), Series(Vec<HashMap<String, String>>), LabelValues(Vec<String>), + FormatQuery(String), } impl Default for PrometheusResponse { @@ -290,6 +291,33 @@ impl PrometheusJsonResponse { } } +#[derive(Debug, Default, Serialize, Deserialize, JsonSchema)] +pub struct FormatQuery { + query: Option<String>, +} + +#[axum_macros::debug_handler] +pub async fn format_query( + State(_handler): State<PrometheusHandlerRef>, + Query(params): Query<InstantQuery>, + Extension(_query_ctx): Extension<QueryContextRef>, + Form(form_params): Form<InstantQuery>, +) -> Json<PrometheusJsonResponse> { + let _timer = crate::metrics::METRIC_HTTP_PROMQL_FORMAT_QUERY_ELAPSED.start_timer(); + + let query = params.query.or(form_params.query).unwrap_or_default(); + match promql_parser::parser::parse(&query) { + Ok(expr) => { + let pretty = expr.prettify(); + PrometheusJsonResponse::success(PrometheusResponse::FormatQuery(pretty)) + } + Err(reason) => { + let err = InvalidQuerySnafu { reason }.build(); + PrometheusJsonResponse::error(err.status_code().to_string(), err.output_msg()) + } + } +} + #[derive(Debug, Default, Serialize, Deserialize, JsonSchema)] pub struct InstantQuery { query: Option<String>, diff --git a/src/servers/src/metrics.rs b/src/servers/src/metrics.rs index f5e0be116e72..0c630ad3ac0a 100644 --- a/src/servers/src/metrics.rs +++ b/src/servers/src/metrics.rs @@ -100,6 +100,11 @@ lazy_static! { "servers opentsdb line write elapsed" ) .unwrap(); + pub static ref METRIC_HTTP_PROMQL_FORMAT_QUERY_ELAPSED: Histogram = register_histogram!( + "servers_http_promql_format_query_elapsed", + "servers http promql format query elapsed" + ) + .unwrap(); pub static ref METRIC_HTTP_PROMQL_INSTANT_QUERY_ELAPSED: Histogram = register_histogram!( "servers_http_promql_instant_query_elapsed", "servers http promql instant query elapsed" diff --git a/tests-integration/tests/http.rs b/tests-integration/tests/http.rs index e9af72ef9e70..6a54707a1ad3 100644 --- a/tests-integration/tests/http.rs +++ b/tests-integration/tests/http.rs @@ -341,6 +341,17 @@ pub async fn test_prom_http_api(store_type: StorageType) { let (app, mut guard) = setup_test_prom_app_with_frontend(store_type, "promql_api").await; let client = TestClient::new(app); + // format_query + let res = client + .get("/v1/prometheus/api/v1/format_query?query=foo/bar") + .send() + .await; + assert_eq!(res.status(), StatusCode::OK); + assert_eq!( + res.text().await.as_str(), + r#"{"status":"success","data":"foo / bar"}"# + ); + // instant query let res = client .get("/v1/prometheus/api/v1/query?query=up&time=1")
feat
support prometheus format_query endpoint (#2731)
1855dccdf19693983f5582429f01ba156d22d986
2025-01-14 14:30:08
ZonaHe
feat: update dashboard to v0.7.8 (#5355)
false
diff --git a/src/servers/dashboard/VERSION b/src/servers/dashboard/VERSION index b977a66d97cc..151a6866b0a4 100644 --- a/src/servers/dashboard/VERSION +++ b/src/servers/dashboard/VERSION @@ -1 +1 @@ -v0.7.7 +v0.7.8
feat
update dashboard to v0.7.8 (#5355)
82dbc3e1ae563171c0b3d282dc4fddb27f8da477
2023-10-23 07:52:19
Yingwen
feat(mito): Ports InMemoryRowGroup from parquet crate (#2633)
false
diff --git a/src/mito2/src/sst/parquet.rs b/src/mito2/src/sst/parquet.rs index 872c0e410408..481f98f1af12 100644 --- a/src/mito2/src/sst/parquet.rs +++ b/src/mito2/src/sst/parquet.rs @@ -16,6 +16,7 @@ mod format; pub mod reader; +pub mod row_group; mod stats; pub mod writer; diff --git a/src/mito2/src/sst/parquet/row_group.rs b/src/mito2/src/sst/parquet/row_group.rs new file mode 100644 index 000000000000..a80f7c874253 --- /dev/null +++ b/src/mito2/src/sst/parquet/row_group.rs @@ -0,0 +1,230 @@ +// Copyright 2023 Greptime Team +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Ports private structs from [parquet crate](https://github.com/apache/arrow-rs/blob/7e134f4d277c0b62c27529fc15a4739de3ad0afd/parquet/src/arrow/async_reader/mod.rs#L644-L650). + +use std::sync::Arc; + +use bytes::{Buf, Bytes}; +use parquet::arrow::arrow_reader::{RowGroups, RowSelection}; +use parquet::arrow::async_reader::AsyncFileReader; +use parquet::arrow::ProjectionMask; +use parquet::column::page::{PageIterator, PageReader}; +use parquet::errors::{ParquetError, Result}; +use parquet::file::metadata::RowGroupMetaData; +use parquet::file::reader::{ChunkReader, Length}; +use parquet::file::serialized_reader::SerializedPageReader; +use parquet::format::PageLocation; + +/// An in-memory collection of column chunks +pub struct InMemoryRowGroup<'a> { + metadata: &'a RowGroupMetaData, + page_locations: Option<&'a [Vec<PageLocation>]>, + column_chunks: Vec<Option<Arc<ColumnChunkData>>>, + row_count: usize, +} + +impl<'a> InMemoryRowGroup<'a> { + /// Fetches the necessary column data into memory + // TODO(yingwen): Fix clippy warnings. + #[allow(clippy::filter_map_bool_then)] + #[allow(clippy::useless_conversion)] + pub async fn fetch<T: AsyncFileReader + Send>( + &mut self, + input: &mut T, + projection: &ProjectionMask, + selection: Option<&RowSelection>, + ) -> Result<()> { + if let Some((selection, page_locations)) = selection.zip(self.page_locations) { + // If we have a `RowSelection` and an `OffsetIndex` then only fetch pages required for the + // `RowSelection` + let mut page_start_offsets: Vec<Vec<usize>> = vec![]; + + let fetch_ranges = self + .column_chunks + .iter() + .zip(self.metadata.columns()) + .enumerate() + .filter_map(|(idx, (chunk, chunk_meta))| { + (chunk.is_none() && projection.leaf_included(idx)).then(|| { + // If the first page does not start at the beginning of the column, + // then we need to also fetch a dictionary page. + let mut ranges = vec![]; + let (start, _len) = chunk_meta.byte_range(); + match page_locations[idx].first() { + Some(first) if first.offset as u64 != start => { + ranges.push(start as usize..first.offset as usize); + } + _ => (), + } + + ranges.extend(selection.scan_ranges(&page_locations[idx])); + page_start_offsets.push(ranges.iter().map(|range| range.start).collect()); + + ranges + }) + }) + .flatten() + .collect(); + + let mut chunk_data = input.get_byte_ranges(fetch_ranges).await?.into_iter(); + let mut page_start_offsets = page_start_offsets.into_iter(); + + for (idx, chunk) in self.column_chunks.iter_mut().enumerate() { + if chunk.is_some() || !projection.leaf_included(idx) { + continue; + } + + if let Some(offsets) = page_start_offsets.next() { + let mut chunks = Vec::with_capacity(offsets.len()); + for _ in 0..offsets.len() { + chunks.push(chunk_data.next().unwrap()); + } + + *chunk = Some(Arc::new(ColumnChunkData::Sparse { + length: self.metadata.column(idx).byte_range().1 as usize, + data: offsets.into_iter().zip(chunks.into_iter()).collect(), + })) + } + } + } else { + let fetch_ranges = self + .column_chunks + .iter() + .enumerate() + .filter_map(|(idx, chunk)| { + (chunk.is_none() && projection.leaf_included(idx)).then(|| { + let column = self.metadata.column(idx); + let (start, length) = column.byte_range(); + start as usize..(start + length) as usize + }) + }) + .collect(); + + let mut chunk_data = input.get_byte_ranges(fetch_ranges).await?.into_iter(); + + for (idx, chunk) in self.column_chunks.iter_mut().enumerate() { + if chunk.is_some() || !projection.leaf_included(idx) { + continue; + } + + if let Some(data) = chunk_data.next() { + *chunk = Some(Arc::new(ColumnChunkData::Dense { + offset: self.metadata.column(idx).byte_range().0 as usize, + data, + })); + } + } + } + + Ok(()) + } +} + +impl<'a> RowGroups for InMemoryRowGroup<'a> { + fn num_rows(&self) -> usize { + self.row_count + } + + fn column_chunks(&self, i: usize) -> Result<Box<dyn PageIterator>> { + match &self.column_chunks[i] { + None => Err(ParquetError::General(format!( + "Invalid column index {i}, column was not fetched" + ))), + Some(data) => { + let page_locations = self.page_locations.map(|index| index[i].clone()); + let page_reader: Box<dyn PageReader> = Box::new(SerializedPageReader::new( + data.clone(), + self.metadata.column(i), + self.row_count, + page_locations, + )?); + + Ok(Box::new(ColumnChunkIterator { + reader: Some(Ok(page_reader)), + })) + } + } + } +} + +/// An in-memory column chunk +#[derive(Clone)] +enum ColumnChunkData { + /// Column chunk data representing only a subset of data pages + Sparse { + /// Length of the full column chunk + length: usize, + /// Set of data pages included in this sparse chunk. Each element is a tuple + /// of (page offset, page data) + data: Vec<(usize, Bytes)>, + }, + /// Full column chunk and its offset + Dense { offset: usize, data: Bytes }, +} + +impl ColumnChunkData { + fn get(&self, start: u64) -> Result<Bytes> { + match &self { + ColumnChunkData::Sparse { data, .. } => data + .binary_search_by_key(&start, |(offset, _)| *offset as u64) + .map(|idx| data[idx].1.clone()) + .map_err(|_| { + ParquetError::General(format!( + "Invalid offset in sparse column chunk data: {start}" + )) + }), + ColumnChunkData::Dense { offset, data } => { + let start = start as usize - *offset; + Ok(data.slice(start..)) + } + } + } +} + +impl Length for ColumnChunkData { + fn len(&self) -> u64 { + match &self { + ColumnChunkData::Sparse { length, .. } => *length as u64, + ColumnChunkData::Dense { data, .. } => data.len() as u64, + } + } +} + +impl ChunkReader for ColumnChunkData { + type T = bytes::buf::Reader<Bytes>; + + fn get_read(&self, start: u64) -> Result<Self::T> { + Ok(self.get(start)?.reader()) + } + + fn get_bytes(&self, start: u64, length: usize) -> Result<Bytes> { + Ok(self.get(start)?.slice(..length)) + } +} + +/// Implements [`PageIterator`] for a single column chunk, yielding a single [`PageReader`] +struct ColumnChunkIterator { + reader: Option<Result<Box<dyn PageReader>>>, +} + +impl Iterator for ColumnChunkIterator { + type Item = Result<Box<dyn PageReader>>; + + fn next(&mut self) -> Option<Self::Item> { + self.reader.take() + } +} + +impl PageIterator for ColumnChunkIterator {}
feat
Ports InMemoryRowGroup from parquet crate (#2633)
fcd0ceea94dab8f70be52c7f4b53e176f9b1e216
2024-11-07 18:55:05
Ruihang Xia
fix: column already exists (#4961)
false
diff --git a/src/metric-engine/src/engine/region_metadata.rs b/src/metric-engine/src/engine/region_metadata.rs index 171480c58978..9f00235e9682 100644 --- a/src/metric-engine/src/engine/region_metadata.rs +++ b/src/metric-engine/src/engine/region_metadata.rs @@ -14,6 +14,8 @@ //! Implementation of retrieving logical region's region metadata. +use std::collections::HashMap; + use store_api::metadata::ColumnMetadata; use store_api::storage::RegionId; @@ -46,23 +48,36 @@ impl MetricEngineInner { .read_lock_logical_region(logical_region_id) .await; // Load logical and physical columns, and intersect them to get logical column metadata. - let mut logical_column_metadata = self + let logical_column_metadata = self .metadata_region .logical_columns(physical_region_id, logical_region_id) .await? .into_iter() .map(|(_, column_metadata)| column_metadata) .collect::<Vec<_>>(); - // Sort columns on column name to ensure the order - logical_column_metadata - .sort_unstable_by(|c1, c2| c1.column_schema.name.cmp(&c2.column_schema.name)); + // Update cache - self.state - .write() - .unwrap() - .add_logical_columns(logical_region_id, logical_column_metadata.clone()); + let mut mutable_state = self.state.write().unwrap(); + // Merge with existing cached columns. + let existing_columns = mutable_state + .logical_columns() + .get(&logical_region_id) + .cloned() + .unwrap_or_default() + .into_iter(); + let mut dedup_columns = logical_column_metadata + .into_iter() + .chain(existing_columns) + .map(|c| (c.column_id, c)) + .collect::<HashMap<_, _>>() + .values() + .cloned() + .collect::<Vec<_>>(); + // Sort columns on column name to ensure the order + dedup_columns.sort_unstable_by(|c1, c2| c1.column_schema.name.cmp(&c2.column_schema.name)); + mutable_state.set_logical_columns(logical_region_id, dedup_columns.clone()); - Ok(logical_column_metadata) + Ok(dedup_columns) } /// Load logical column names of a logical region. diff --git a/src/metric-engine/src/engine/state.rs b/src/metric-engine/src/engine/state.rs index 24ab5a31bfd1..197e3f9da730 100644 --- a/src/metric-engine/src/engine/state.rs +++ b/src/metric-engine/src/engine/state.rs @@ -85,19 +85,13 @@ impl MetricEngineState { .insert(logical_region_id, physical_region_id); } - /// Add and reorder logical columns. - /// - /// Caller should make sure: - /// 1. there is no duplicate columns - /// 2. the column order is the same with the order in the metadata, which is - /// alphabetically ordered on column name. - pub fn add_logical_columns( + /// Replace the logical columns of the logical region with given columns. + pub fn set_logical_columns( &mut self, logical_region_id: RegionId, - new_columns: impl IntoIterator<Item = ColumnMetadata>, + columns: Vec<ColumnMetadata>, ) { - let columns = self.logical_columns.entry(logical_region_id).or_default(); - columns.extend(new_columns); + self.logical_columns.insert(logical_region_id, columns); } pub fn get_physical_region_id(&self, logical_region_id: RegionId) -> Option<RegionId> {
fix
column already exists (#4961)
22f31f592930828f6bc4a40f85720dd6acd93eb4
2024-11-07 08:31:12
jeremyhi
chore: paginated query region stats (#4942)
false
diff --git a/Cargo.lock b/Cargo.lock index 001c2821f332..6d406716eef1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6468,6 +6468,7 @@ dependencies = [ "common-telemetry", "datatypes", "futures", + "futures-util", "humantime-serde", "meta-srv", "rand", diff --git a/src/meta-client/Cargo.toml b/src/meta-client/Cargo.toml index 73042744990b..4d22fe4bd31d 100644 --- a/src/meta-client/Cargo.toml +++ b/src/meta-client/Cargo.toml @@ -15,6 +15,8 @@ common-grpc.workspace = true common-macro.workspace = true common-meta.workspace = true common-telemetry.workspace = true +futures.workspace = true +futures-util.workspace = true humantime-serde.workspace = true rand.workspace = true serde.workspace = true diff --git a/src/meta-client/src/client.rs b/src/meta-client/src/client.rs index ed6fdf13fba3..723bb099ddd8 100644 --- a/src/meta-client/src/client.rs +++ b/src/meta-client/src/client.rs @@ -21,6 +21,8 @@ mod cluster; mod store; mod util; +use std::sync::Arc; + use api::v1::meta::{ProcedureDetailResponse, Role}; use cluster::Client as ClusterClient; use common_error::ext::BoxedError; @@ -30,7 +32,8 @@ use common_meta::cluster::{ }; use common_meta::datanode::{DatanodeStatKey, DatanodeStatValue, RegionStat}; use common_meta::ddl::{ExecutorContext, ProcedureExecutor}; -use common_meta::error::{self as meta_error, Result as MetaResult}; +use common_meta::error::{self as meta_error, ExternalSnafu, Result as MetaResult}; +use common_meta::range_stream::PaginationStream; use common_meta::rpc::ddl::{SubmitDdlTaskRequest, SubmitDdlTaskResponse}; use common_meta::rpc::procedure::{ MigrateRegionRequest, MigrateRegionResponse, ProcedureStateResponse, @@ -40,8 +43,10 @@ use common_meta::rpc::store::{ BatchPutResponse, CompareAndPutRequest, CompareAndPutResponse, DeleteRangeRequest, DeleteRangeResponse, PutRequest, PutResponse, RangeRequest, RangeResponse, }; +use common_meta::rpc::KeyValue; use common_meta::ClusterId; use common_telemetry::info; +use futures::TryStreamExt; use heartbeat::Client as HeartbeatClient; use procedure::Client as ProcedureClient; use snafu::{OptionExt, ResultExt}; @@ -314,16 +319,15 @@ impl ClusterInfo for MetaClient { } async fn list_region_stats(&self) -> Result<Vec<RegionStat>> { - let cluster_client = self.cluster_client()?; + let cluster_kv_backend = Arc::new(self.cluster_client()?); let range_prefix = DatanodeStatKey::key_prefix_with_cluster_id(self.id.0); let req = RangeRequest::new().with_prefix(range_prefix); - let mut datanode_stats = cluster_client - .range(req) - .await? - .kvs - .into_iter() - .map(|kv| DatanodeStatValue::try_from(kv.value).context(ConvertMetaRequestSnafu)) - .collect::<Result<Vec<_>>>()?; + let stream = PaginationStream::new(cluster_kv_backend, req, 256, Arc::new(decode_stats)) + .into_stream(); + let mut datanode_stats = stream + .try_collect::<Vec<_>>() + .await + .context(ConvertMetaResponseSnafu)?; let region_stats = datanode_stats .iter_mut() .flat_map(|datanode_stat| { @@ -336,6 +340,12 @@ impl ClusterInfo for MetaClient { } } +fn decode_stats(kv: KeyValue) -> MetaResult<DatanodeStatValue> { + DatanodeStatValue::try_from(kv.value) + .map_err(BoxedError::new) + .context(ExternalSnafu) +} + impl MetaClient { pub fn new(id: Id) -> Self { Self { diff --git a/src/meta-client/src/client/cluster.rs b/src/meta-client/src/client/cluster.rs index 60ce52ecb69b..b1c7ff1089a1 100644 --- a/src/meta-client/src/client/cluster.rs +++ b/src/meta-client/src/client/cluster.rs @@ -12,14 +12,22 @@ // See the License for the specific language governing permissions and // limitations under the License. +use std::any::Any; use std::future::Future; use std::sync::Arc; use api::greptime_proto::v1; use api::v1::meta::cluster_client::ClusterClient; use api::v1::meta::{MetasrvNodeInfo, MetasrvPeersRequest, ResponseHeader, Role}; +use common_error::ext::BoxedError; use common_grpc::channel_manager::ChannelManager; -use common_meta::rpc::store::{BatchGetRequest, BatchGetResponse, RangeRequest, RangeResponse}; +use common_meta::error::{Error as MetaError, ExternalSnafu, Result as MetaResult}; +use common_meta::kv_backend::{KvBackend, TxnService}; +use common_meta::rpc::store::{ + BatchDeleteRequest, BatchDeleteResponse, BatchGetRequest, BatchGetResponse, BatchPutRequest, + BatchPutResponse, DeleteRangeRequest, DeleteRangeResponse, PutRequest, PutResponse, + RangeRequest, RangeResponse, +}; use common_telemetry::{info, warn}; use snafu::{ensure, ResultExt}; use tokio::sync::RwLock; @@ -79,6 +87,51 @@ impl Client { } } +impl TxnService for Client { + type Error = MetaError; +} + +#[async_trait::async_trait] +impl KvBackend for Client { + fn name(&self) -> &str { + "ClusterClientKvBackend" + } + + fn as_any(&self) -> &dyn Any { + self + } + + async fn range(&self, req: RangeRequest) -> MetaResult<RangeResponse> { + self.range(req) + .await + .map_err(BoxedError::new) + .context(ExternalSnafu) + } + + async fn put(&self, _: PutRequest) -> MetaResult<PutResponse> { + unimplemented!("`put` is not supported in cluster client kv backend") + } + + async fn batch_put(&self, _: BatchPutRequest) -> MetaResult<BatchPutResponse> { + unimplemented!("`batch_put` is not supported in cluster client kv backend") + } + + async fn batch_get(&self, req: BatchGetRequest) -> MetaResult<BatchGetResponse> { + self.batch_get(req) + .await + .map_err(BoxedError::new) + .context(ExternalSnafu) + } + + async fn delete_range(&self, _: DeleteRangeRequest) -> MetaResult<DeleteRangeResponse> { + unimplemented!("`delete_range` is not supported in cluster client kv backend") + } + + async fn batch_delete(&self, _: BatchDeleteRequest) -> MetaResult<BatchDeleteResponse> { + unimplemented!("`batch_delete` is not supported in cluster client kv backend") + } +} + #[derive(Debug)] struct Inner { id: Id,
chore
paginated query region stats (#4942)
7fad4e835673697ea3522e730fa643c7e3e709df
2024-01-10 19:29:49
Weny Xu
fix: incorrect parsing `broker_endpoints` env variable (#3135)
false
diff --git a/src/cmd/src/metasrv.rs b/src/cmd/src/metasrv.rs index 9ef9608f2497..97c26af0e1ed 100644 --- a/src/cmd/src/metasrv.rs +++ b/src/cmd/src/metasrv.rs @@ -128,7 +128,7 @@ impl StartCommand { let mut opts: MetaSrvOptions = Options::load_layered_options( self.config_file.as_deref(), self.env_prefix.as_ref(), - None, + MetaSrvOptions::env_list_keys(), )?; if let Some(dir) = &cli_options.log_dir { diff --git a/src/cmd/src/standalone.rs b/src/cmd/src/standalone.rs index 6f0d62ef3598..8511dbf519c6 100644 --- a/src/cmd/src/standalone.rs +++ b/src/cmd/src/standalone.rs @@ -118,6 +118,12 @@ pub struct StandaloneOptions { pub export_metrics: ExportMetricsOption, } +impl StandaloneOptions { + pub fn env_list_keys() -> Option<&'static [&'static str]> { + Some(&["wal.broker_endpoints"]) + } +} + impl Default for StandaloneOptions { fn default() -> Self { Self { @@ -267,7 +273,7 @@ impl StartCommand { let opts: StandaloneOptions = Options::load_layered_options( self.config_file.as_deref(), self.env_prefix.as_ref(), - None, + StandaloneOptions::env_list_keys(), )?; self.convert_options(cli_options, opts) diff --git a/src/datanode/src/config.rs b/src/datanode/src/config.rs index 3437a8ca6a4a..9f469f0f484b 100644 --- a/src/datanode/src/config.rs +++ b/src/datanode/src/config.rs @@ -276,7 +276,7 @@ impl Default for DatanodeOptions { impl DatanodeOptions { pub fn env_list_keys() -> Option<&'static [&'static str]> { - Some(&["meta_client.metasrv_addrs"]) + Some(&["meta_client.metasrv_addrs", "wal.broker_endpoints"]) } pub fn to_toml_string(&self) -> String { diff --git a/src/meta-srv/src/metasrv.rs b/src/meta-srv/src/metasrv.rs index cb0f2cda4965..de8278df1ac0 100644 --- a/src/meta-srv/src/metasrv.rs +++ b/src/meta-srv/src/metasrv.rs @@ -79,6 +79,12 @@ pub struct MetaSrvOptions { pub store_key_prefix: String, } +impl MetaSrvOptions { + pub fn env_list_keys() -> Option<&'static [&'static str]> { + Some(&["wal.broker_endpoints"]) + } +} + impl Default for MetaSrvOptions { fn default() -> Self { Self {
fix
incorrect parsing `broker_endpoints` env variable (#3135)
f4b2d393beeb82819c4feb717714b866f747a8ca
2024-12-26 10:08:45
Zhenchi
feat(config): add bloom filter config (#5237)
false
diff --git a/config/config.md b/config/config.md index 85f5e481afd5..3574be2411f9 100644 --- a/config/config.md +++ b/config/config.md @@ -157,6 +157,11 @@ | `region_engine.mito.fulltext_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never | | `region_engine.mito.fulltext_index.apply_on_query` | String | `auto` | Whether to apply the index on query<br/>- `auto`: automatically (default)<br/>- `disable`: never | | `region_engine.mito.fulltext_index.mem_threshold_on_create` | String | `auto` | Memory threshold for index creation.<br/>- `auto`: automatically determine the threshold based on the system memory size (default)<br/>- `unlimited`: no memory limit<br/>- `[size]` e.g. `64MB`: fixed memory threshold | +| `region_engine.mito.bloom_filter_index` | -- | -- | The options for bloom filter in Mito engine. | +| `region_engine.mito.bloom_filter_index.create_on_flush` | String | `auto` | Whether to create the bloom filter on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never | +| `region_engine.mito.bloom_filter_index.create_on_compaction` | String | `auto` | Whether to create the bloom filter on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never | +| `region_engine.mito.bloom_filter_index.apply_on_query` | String | `auto` | Whether to apply the bloom filter on query<br/>- `auto`: automatically (default)<br/>- `disable`: never | +| `region_engine.mito.bloom_filter_index.mem_threshold_on_create` | String | `auto` | Memory threshold for bloom filter creation.<br/>- `auto`: automatically determine the threshold based on the system memory size (default)<br/>- `unlimited`: no memory limit<br/>- `[size]` e.g. `64MB`: fixed memory threshold | | `region_engine.mito.memtable` | -- | -- | -- | | `region_engine.mito.memtable.type` | String | `time_series` | Memtable type.<br/>- `time_series`: time-series memtable<br/>- `partition_tree`: partition tree memtable (experimental) | | `region_engine.mito.memtable.index_max_keys_per_shard` | Integer | `8192` | The max number of keys in one shard.<br/>Only available for `partition_tree` memtable. | @@ -486,6 +491,11 @@ | `region_engine.mito.fulltext_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never | | `region_engine.mito.fulltext_index.apply_on_query` | String | `auto` | Whether to apply the index on query<br/>- `auto`: automatically (default)<br/>- `disable`: never | | `region_engine.mito.fulltext_index.mem_threshold_on_create` | String | `auto` | Memory threshold for index creation.<br/>- `auto`: automatically determine the threshold based on the system memory size (default)<br/>- `unlimited`: no memory limit<br/>- `[size]` e.g. `64MB`: fixed memory threshold | +| `region_engine.mito.bloom_filter_index` | -- | -- | The options for bloom filter index in Mito engine. | +| `region_engine.mito.bloom_filter_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never | +| `region_engine.mito.bloom_filter_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never | +| `region_engine.mito.bloom_filter_index.apply_on_query` | String | `auto` | Whether to apply the index on query<br/>- `auto`: automatically (default)<br/>- `disable`: never | +| `region_engine.mito.bloom_filter_index.mem_threshold_on_create` | String | `auto` | Memory threshold for the index creation.<br/>- `auto`: automatically determine the threshold based on the system memory size (default)<br/>- `unlimited`: no memory limit<br/>- `[size]` e.g. `64MB`: fixed memory threshold | | `region_engine.mito.memtable` | -- | -- | -- | | `region_engine.mito.memtable.type` | String | `time_series` | Memtable type.<br/>- `time_series`: time-series memtable<br/>- `partition_tree`: partition tree memtable (experimental) | | `region_engine.mito.memtable.index_max_keys_per_shard` | Integer | `8192` | The max number of keys in one shard.<br/>Only available for `partition_tree` memtable. | diff --git a/config/datanode.example.toml b/config/datanode.example.toml index 1b062a4b3af1..05b55d6f7e35 100644 --- a/config/datanode.example.toml +++ b/config/datanode.example.toml @@ -576,6 +576,30 @@ apply_on_query = "auto" ## - `[size]` e.g. `64MB`: fixed memory threshold mem_threshold_on_create = "auto" +## The options for bloom filter index in Mito engine. +[region_engine.mito.bloom_filter_index] + +## Whether to create the index on flush. +## - `auto`: automatically (default) +## - `disable`: never +create_on_flush = "auto" + +## Whether to create the index on compaction. +## - `auto`: automatically (default) +## - `disable`: never +create_on_compaction = "auto" + +## Whether to apply the index on query +## - `auto`: automatically (default) +## - `disable`: never +apply_on_query = "auto" + +## Memory threshold for the index creation. +## - `auto`: automatically determine the threshold based on the system memory size (default) +## - `unlimited`: no memory limit +## - `[size]` e.g. `64MB`: fixed memory threshold +mem_threshold_on_create = "auto" + [region_engine.mito.memtable] ## Memtable type. ## - `time_series`: time-series memtable diff --git a/config/standalone.example.toml b/config/standalone.example.toml index 77445f8883bf..3e9cfc0694b8 100644 --- a/config/standalone.example.toml +++ b/config/standalone.example.toml @@ -619,6 +619,30 @@ apply_on_query = "auto" ## - `[size]` e.g. `64MB`: fixed memory threshold mem_threshold_on_create = "auto" +## The options for bloom filter in Mito engine. +[region_engine.mito.bloom_filter_index] + +## Whether to create the bloom filter on flush. +## - `auto`: automatically (default) +## - `disable`: never +create_on_flush = "auto" + +## Whether to create the bloom filter on compaction. +## - `auto`: automatically (default) +## - `disable`: never +create_on_compaction = "auto" + +## Whether to apply the bloom filter on query +## - `auto`: automatically (default) +## - `disable`: never +apply_on_query = "auto" + +## Memory threshold for bloom filter creation. +## - `auto`: automatically determine the threshold based on the system memory size (default) +## - `unlimited`: no memory limit +## - `[size]` e.g. `64MB`: fixed memory threshold +mem_threshold_on_create = "auto" + [region_engine.mito.memtable] ## Memtable type. ## - `time_series`: time-series memtable diff --git a/src/index/src/bloom_filter/creator.rs b/src/index/src/bloom_filter/creator.rs index da95334782a7..db79983e6274 100644 --- a/src/index/src/bloom_filter/creator.rs +++ b/src/index/src/bloom_filter/creator.rs @@ -320,7 +320,7 @@ mod tests { #[tokio::test] async fn test_bloom_filter_creator_batch_push() { let mut writer = Cursor::new(Vec::new()); - let mut creator = BloomFilterCreator::new( + let mut creator: BloomFilterCreator = BloomFilterCreator::new( 2, Arc::new(MockExternalTempFileProvider::new()), Arc::new(AtomicUsize::new(0)), diff --git a/src/mito2/src/access_layer.rs b/src/mito2/src/access_layer.rs index 752b36fd1e0b..0d6204d02416 100644 --- a/src/mito2/src/access_layer.rs +++ b/src/mito2/src/access_layer.rs @@ -22,7 +22,7 @@ use store_api::metadata::RegionMetadataRef; use crate::cache::write_cache::SstUploadRequest; use crate::cache::CacheManagerRef; -use crate::config::{FulltextIndexConfig, InvertedIndexConfig}; +use crate::config::{BloomFilterConfig, FulltextIndexConfig, InvertedIndexConfig}; use crate::error::{CleanDirSnafu, DeleteIndexSnafu, DeleteSstSnafu, OpenDalSnafu, Result}; use crate::read::Source; use crate::region::options::IndexOptions; @@ -154,6 +154,7 @@ impl AccessLayer { index_options: request.index_options, inverted_index_config: request.inverted_index_config, fulltext_index_config: request.fulltext_index_config, + bloom_filter_index_config: request.bloom_filter_index_config, } .build() .await; @@ -198,6 +199,7 @@ pub(crate) struct SstWriteRequest { pub(crate) index_options: IndexOptions, pub(crate) inverted_index_config: InvertedIndexConfig, pub(crate) fulltext_index_config: FulltextIndexConfig, + pub(crate) bloom_filter_index_config: BloomFilterConfig, } pub(crate) async fn new_fs_cache_store(root: &str) -> Result<ObjectStore> { diff --git a/src/mito2/src/cache/write_cache.rs b/src/mito2/src/cache/write_cache.rs index fc9972de5305..18fe41c5f614 100644 --- a/src/mito2/src/cache/write_cache.rs +++ b/src/mito2/src/cache/write_cache.rs @@ -125,6 +125,7 @@ impl WriteCache { index_options: write_request.index_options, inverted_index_config: write_request.inverted_index_config, fulltext_index_config: write_request.fulltext_index_config, + bloom_filter_index_config: write_request.bloom_filter_index_config, } .build() .await; @@ -378,6 +379,7 @@ mod tests { index_options: IndexOptions::default(), inverted_index_config: Default::default(), fulltext_index_config: Default::default(), + bloom_filter_index_config: Default::default(), }; let upload_request = SstUploadRequest { @@ -470,6 +472,7 @@ mod tests { index_options: IndexOptions::default(), inverted_index_config: Default::default(), fulltext_index_config: Default::default(), + bloom_filter_index_config: Default::default(), }; let write_opts = WriteOptions { row_group_size: 512, diff --git a/src/mito2/src/compaction/compactor.rs b/src/mito2/src/compaction/compactor.rs index e7d5e779b675..58425f4d79e3 100644 --- a/src/mito2/src/compaction/compactor.rs +++ b/src/mito2/src/compaction/compactor.rs @@ -301,6 +301,8 @@ impl Compactor for DefaultCompactor { let merge_mode = compaction_region.current_version.options.merge_mode(); let inverted_index_config = compaction_region.engine_config.inverted_index.clone(); let fulltext_index_config = compaction_region.engine_config.fulltext_index.clone(); + let bloom_filter_index_config = + compaction_region.engine_config.bloom_filter_index.clone(); futs.push(async move { let reader = CompactionSstReaderBuilder { metadata: region_metadata.clone(), @@ -325,6 +327,7 @@ impl Compactor for DefaultCompactor { index_options, inverted_index_config, fulltext_index_config, + bloom_filter_index_config, }, &write_opts, ) diff --git a/src/mito2/src/config.rs b/src/mito2/src/config.rs index 7a1574c850ae..1c06fb3f1aa0 100644 --- a/src/mito2/src/config.rs +++ b/src/mito2/src/config.rs @@ -117,6 +117,8 @@ pub struct MitoConfig { pub inverted_index: InvertedIndexConfig, /// Full-text index configs. pub fulltext_index: FulltextIndexConfig, + /// Bloom filter index configs. + pub bloom_filter_index: BloomFilterConfig, /// Memtable config pub memtable: MemtableConfig, @@ -155,6 +157,7 @@ impl Default for MitoConfig { index: IndexConfig::default(), inverted_index: InvertedIndexConfig::default(), fulltext_index: FulltextIndexConfig::default(), + bloom_filter_index: BloomFilterConfig::default(), memtable: MemtableConfig::default(), min_compaction_interval: Duration::from_secs(0), }; @@ -511,6 +514,48 @@ impl FulltextIndexConfig { } } +/// Configuration options for the bloom filter. +#[serde_as] +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)] +#[serde(default)] +pub struct BloomFilterConfig { + /// Whether to create the index on flush: automatically or never. + pub create_on_flush: Mode, + /// Whether to create the index on compaction: automatically or never. + pub create_on_compaction: Mode, + /// Whether to apply the index on query: automatically or never. + pub apply_on_query: Mode, + /// Memory threshold for creating the index. + pub mem_threshold_on_create: MemoryThreshold, +} + +impl Default for BloomFilterConfig { + fn default() -> Self { + Self { + create_on_flush: Mode::Auto, + create_on_compaction: Mode::Auto, + apply_on_query: Mode::Auto, + mem_threshold_on_create: MemoryThreshold::Auto, + } + } +} + +impl BloomFilterConfig { + pub fn mem_threshold_on_create(&self) -> Option<usize> { + match self.mem_threshold_on_create { + MemoryThreshold::Auto => { + if let Some(sys_memory) = common_config::utils::get_sys_total_memory() { + Some((sys_memory / INDEX_CREATE_MEM_THRESHOLD_FACTOR).as_bytes() as usize) + } else { + Some(ReadableSize::mb(64).as_bytes() as usize) + } + } + MemoryThreshold::Unlimited => None, + MemoryThreshold::Size(size) => Some(size.as_bytes() as usize), + } + } +} + /// Divide cpu num by a non-zero `divisor` and returns at least 1. fn divide_num_cpus(divisor: usize) -> usize { debug_assert!(divisor > 0); diff --git a/src/mito2/src/engine.rs b/src/mito2/src/engine.rs index 98160eadc46a..71caf363c02c 100644 --- a/src/mito2/src/engine.rs +++ b/src/mito2/src/engine.rs @@ -433,7 +433,7 @@ impl EngineInner { .with_parallel_scan_channel_size(self.config.parallel_scan_channel_size) .with_ignore_inverted_index(self.config.inverted_index.apply_on_query.disabled()) .with_ignore_fulltext_index(self.config.fulltext_index.apply_on_query.disabled()) - // .with_ignore_bloom_filter(self.config.bloom_filter_index.apply_on_query.disabled()) // TODO(ruihang): wait for #5237 + .with_ignore_bloom_filter(self.config.bloom_filter_index.apply_on_query.disabled()) .with_start_time(query_start); Ok(scan_region) diff --git a/src/mito2/src/flush.rs b/src/mito2/src/flush.rs index 64a739068ad9..dd844a7d534c 100644 --- a/src/mito2/src/flush.rs +++ b/src/mito2/src/flush.rs @@ -360,6 +360,7 @@ impl RegionFlushTask { index_options: self.index_options.clone(), inverted_index_config: self.engine_config.inverted_index.clone(), fulltext_index_config: self.engine_config.fulltext_index.clone(), + bloom_filter_index_config: self.engine_config.bloom_filter_index.clone(), }; let Some(sst_info) = self .access_layer diff --git a/src/mito2/src/read/scan_region.rs b/src/mito2/src/read/scan_region.rs index 2ce3367b409b..5cd99fe3778e 100644 --- a/src/mito2/src/read/scan_region.rs +++ b/src/mito2/src/read/scan_region.rs @@ -231,7 +231,6 @@ impl ScanRegion { /// Sets whether to ignore bloom filter. #[must_use] - #[allow(dead_code)] // TODO(ruihang): waiting for #5237 pub(crate) fn with_ignore_bloom_filter(mut self, ignore: bool) -> Self { self.ignore_bloom_filter = ignore; self diff --git a/src/mito2/src/sst/index.rs b/src/mito2/src/sst/index.rs index 0b2822c04a7c..dc0f0978f84c 100644 --- a/src/mito2/src/sst/index.rs +++ b/src/mito2/src/sst/index.rs @@ -33,7 +33,7 @@ use store_api::metadata::RegionMetadataRef; use store_api::storage::{ColumnId, RegionId}; use crate::access_layer::OperationType; -use crate::config::{FulltextIndexConfig, InvertedIndexConfig}; +use crate::config::{BloomFilterConfig, FulltextIndexConfig, InvertedIndexConfig}; use crate::metrics::INDEX_CREATE_MEMORY_USAGE; use crate::read::Batch; use crate::region::options::IndexOptions; @@ -179,6 +179,7 @@ pub(crate) struct IndexerBuilder<'a> { pub(crate) index_options: IndexOptions, pub(crate) inverted_index_config: InvertedIndexConfig, pub(crate) fulltext_index_config: FulltextIndexConfig, + pub(crate) bloom_filter_index_config: BloomFilterConfig, } impl<'a> IndexerBuilder<'a> { @@ -320,7 +321,10 @@ impl<'a> IndexerBuilder<'a> { } fn build_bloom_filter_indexer(&self) -> Option<BloomFilterIndexer> { - let create = true; // TODO(zhongzc): add config for bloom filter + let create = match self.op_type { + OperationType::Flush => self.bloom_filter_index_config.create_on_flush.auto(), + OperationType::Compact => self.bloom_filter_index_config.create_on_compaction.auto(), + }; if !create { debug!( @@ -330,7 +334,7 @@ impl<'a> IndexerBuilder<'a> { return None; } - let mem_limit = Some(16 * 1024 * 1024); // TODO(zhongzc): add config for bloom filter + let mem_limit = self.bloom_filter_index_config.mem_threshold_on_create(); let indexer = BloomFilterIndexer::new( self.file_id, self.metadata, @@ -496,6 +500,7 @@ mod tests { index_options: IndexOptions::default(), inverted_index_config: InvertedIndexConfig::default(), fulltext_index_config: FulltextIndexConfig::default(), + bloom_filter_index_config: BloomFilterConfig::default(), } .build() .await; @@ -530,12 +535,14 @@ mod tests { ..Default::default() }, fulltext_index_config: FulltextIndexConfig::default(), + bloom_filter_index_config: BloomFilterConfig::default(), } .build() .await; assert!(indexer.inverted_indexer.is_none()); assert!(indexer.fulltext_indexer.is_some()); + assert!(indexer.bloom_filter_indexer.is_some()); let indexer = IndexerBuilder { op_type: OperationType::Compact, @@ -544,19 +551,44 @@ mod tests { metadata: &metadata, row_group_size: 1024, puffin_manager: factory.build(mock_object_store()), - intermediate_manager: intm_manager, + intermediate_manager: intm_manager.clone(), index_options: IndexOptions::default(), inverted_index_config: InvertedIndexConfig::default(), fulltext_index_config: FulltextIndexConfig { create_on_compaction: Mode::Disable, ..Default::default() }, + bloom_filter_index_config: BloomFilterConfig::default(), } .build() .await; assert!(indexer.inverted_indexer.is_some()); assert!(indexer.fulltext_indexer.is_none()); + assert!(indexer.bloom_filter_indexer.is_some()); + + let indexer = IndexerBuilder { + op_type: OperationType::Compact, + file_id: FileId::random(), + file_path: "test".to_string(), + metadata: &metadata, + row_group_size: 1024, + puffin_manager: factory.build(mock_object_store()), + intermediate_manager: intm_manager, + index_options: IndexOptions::default(), + inverted_index_config: InvertedIndexConfig::default(), + fulltext_index_config: FulltextIndexConfig::default(), + bloom_filter_index_config: BloomFilterConfig { + create_on_compaction: Mode::Disable, + ..Default::default() + }, + } + .build() + .await; + + assert!(indexer.inverted_indexer.is_some()); + assert!(indexer.fulltext_indexer.is_some()); + assert!(indexer.bloom_filter_indexer.is_none()); } #[tokio::test] @@ -581,6 +613,7 @@ mod tests { index_options: IndexOptions::default(), inverted_index_config: InvertedIndexConfig::default(), fulltext_index_config: FulltextIndexConfig::default(), + bloom_filter_index_config: BloomFilterConfig::default(), } .build() .await; @@ -605,6 +638,7 @@ mod tests { index_options: IndexOptions::default(), inverted_index_config: InvertedIndexConfig::default(), fulltext_index_config: FulltextIndexConfig::default(), + bloom_filter_index_config: BloomFilterConfig::default(), } .build() .await; @@ -629,6 +663,7 @@ mod tests { index_options: IndexOptions::default(), inverted_index_config: InvertedIndexConfig::default(), fulltext_index_config: FulltextIndexConfig::default(), + bloom_filter_index_config: BloomFilterConfig::default(), } .build() .await; @@ -660,6 +695,7 @@ mod tests { index_options: IndexOptions::default(), inverted_index_config: InvertedIndexConfig::default(), fulltext_index_config: FulltextIndexConfig::default(), + bloom_filter_index_config: BloomFilterConfig::default(), } .build() .await; diff --git a/tests-integration/tests/http.rs b/tests-integration/tests/http.rs index fb2824790848..1e1de235a88b 100644 --- a/tests-integration/tests/http.rs +++ b/tests-integration/tests/http.rs @@ -955,6 +955,12 @@ apply_on_query = "auto" mem_threshold_on_create = "auto" compress = true +[region_engine.mito.bloom_filter_index] +create_on_flush = "auto" +create_on_compaction = "auto" +apply_on_query = "auto" +mem_threshold_on_create = "auto" + [region_engine.mito.memtable] type = "time_series"
feat
add bloom filter config (#5237)
c8ed1bbfae3d66298cdb911f2808c47e120d5346
2023-07-10 14:23:38
Weny Xu
fix: cast orc data against output schema (#1922)
false
diff --git a/src/common/datasource/src/file_format/orc.rs b/src/common/datasource/src/file_format/orc.rs index fb228ee1dbca..7b9858661a43 100644 --- a/src/common/datasource/src/file_format/orc.rs +++ b/src/common/datasource/src/file_format/orc.rs @@ -15,6 +15,7 @@ use std::pin::Pin; use std::task::{Context, Poll}; +use arrow::compute::cast; use arrow_schema::{Schema, SchemaRef}; use async_trait::async_trait; use datafusion::arrow::record_batch::RecordBatch as DfRecordBatch; @@ -60,12 +61,16 @@ pub async fn infer_orc_schema<R: AsyncRead + AsyncSeek + Unpin + Send + 'static> } pub struct OrcArrowStreamReaderAdapter<T: AsyncRead + AsyncSeek + Unpin + Send + 'static> { + output_schema: SchemaRef, stream: ArrowStreamReader<T>, } impl<T: AsyncRead + AsyncSeek + Unpin + Send + 'static> OrcArrowStreamReaderAdapter<T> { - pub fn new(stream: ArrowStreamReader<T>) -> Self { - Self { stream } + pub fn new(output_schema: SchemaRef, stream: ArrowStreamReader<T>) -> Self { + Self { + stream, + output_schema, + } } } @@ -73,7 +78,7 @@ impl<T: AsyncRead + AsyncSeek + Unpin + Send + 'static> RecordBatchStream for OrcArrowStreamReaderAdapter<T> { fn schema(&self) -> SchemaRef { - self.stream.schema() + self.output_schema.clone() } } @@ -83,6 +88,24 @@ impl<T: AsyncRead + AsyncSeek + Unpin + Send + 'static> Stream for OrcArrowStrea fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> { let batch = futures::ready!(Pin::new(&mut self.stream).poll_next(cx)) .map(|r| r.map_err(|e| DataFusionError::External(Box::new(e)))); + + let batch = batch.map(|b| { + b.and_then(|b| { + let mut columns = Vec::with_capacity(b.num_columns()); + for (idx, column) in b.columns().iter().enumerate() { + if column.data_type() != self.output_schema.field(idx).data_type() { + let output = cast(&column, self.output_schema.field(idx).data_type())?; + columns.push(output) + } else { + columns.push(column.clone()) + } + } + let record_batch = DfRecordBatch::try_new(self.output_schema.clone(), columns)?; + + Ok(record_batch) + }) + }); + Poll::Ready(batch) } } diff --git a/src/frontend/src/statement/copy_table_from.rs b/src/frontend/src/statement/copy_table_from.rs index b79bc1d45d14..45807a08f653 100644 --- a/src/frontend/src/statement/copy_table_from.rs +++ b/src/frontend/src/statement/copy_table_from.rs @@ -224,7 +224,7 @@ impl StatementExecutor { let stream = new_orc_stream_reader(reader) .await .context(error::ReadOrcSnafu)?; - let stream = OrcArrowStreamReaderAdapter::new(stream); + let stream = OrcArrowStreamReaderAdapter::new(schema, stream); Ok(Box::pin(stream)) }
fix
cast orc data against output schema (#1922)
dee5ccec9e2cfd53fee725105e032820b1682732
2022-11-18 09:18:29
zyy17
ci: add nightly build job (#565)
false
diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index c079cc02c809..ad6034972a77 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -2,6 +2,10 @@ on: push: tags: - "v*.*.*" + schedule: + # At 00:00 Everyday + # https://crontab.guru/every-day-at-midnight + - cron: '0 0 * * *' workflow_dispatch: name: Release @@ -9,6 +13,9 @@ name: Release env: RUST_TOOLCHAIN: nightly-2022-07-14 + # FIXME(zyy17): It's better to fetch the latest tag from git, but for a long time, we will stay at 'v0.1.0-alpha-*' + NIGHTLY_BUILD_VERSION_PREFIX: v0.1.0-alpha + jobs: build: name: Build binary @@ -106,8 +113,25 @@ jobs: - name: Download artifacts uses: actions/download-artifact@v3 + - name: Configure nigthly build version # the version would be ${NIGHTLY_BUILD_VERSION_PREFIX}-YYYYMMDD-nightly, like v0.1.0-alpha-20221119-nightly. + shell: bash + if: github.event_name == 'schedule' + run: | + buildTime=`date "+%Y%m%d"` + NIGHTLY_VERSION=${{ env.NIGHTLY_BUILD_VERSION_PREFIX }}-$buildTime-nigthly + echo "VERSION=${NIGHTLY_VERSION}" >> $GITHUB_ENV + + - name: Publish nigthly release # configure the different release title. + uses: softprops/action-gh-release@v1 + if: github.event_name == 'schedule' + with: + name: "Release ${{ env.NIGHTLY_VERSION }}" + files: | + **/greptime-* + - name: Publish release uses: softprops/action-gh-release@v1 + if: github.event_name != 'schedule' with: name: "Release ${{ github.ref_name }}" files: | @@ -158,8 +182,17 @@ jobs: username: ${{ secrets.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_TOKEN }} + - name: Configure nigthly build image tag # the tag would be ${NIGHTLY_BUILD_VERSION_PREFIX}-YYYYMMDD-nightly + shell: bash + if: github.event_name == 'schedule' + run: | + buildTime=`date "+%Y%m%d"` + VERSION=${{ env.NIGHTLY_BUILD_VERSION_PREFIX }}-$buildTime-nigthly + echo "VERSION=${VERSION:1}" >> $GITHUB_ENV + - name: Configure tag # If the release tag is v0.1.0, then the image version tag will be 0.1.0. shell: bash + if: github.event_name != 'schedule' run: | VERSION=${{ github.ref_name }} echo "VERSION=${VERSION:1}" >> $GITHUB_ENV
ci
add nightly build job (#565)
43391e0162cce882bd12832c5b514ad7d7d77db4
2023-04-14 08:36:01
Ning Sun
chore: update pgwire and rustls libraries (#1380)
false
diff --git a/Cargo.lock b/Cargo.lock index ff82495aebe4..d17a9a2262c7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2479,6 +2479,16 @@ dependencies = [ "zeroize", ] +[[package]] +name = "der" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "82b10af9f9f9f2134a42d3f8aa74658660f2e0234b0eb81bd171df8aa32779ed" +dependencies = [ + "const-oid", + "zeroize", +] + [[package]] name = "derive-new" version = "0.5.9" @@ -2958,7 +2968,6 @@ dependencies = [ "query", "rstest", "rstest_reuse", - "rustls", "script", "serde", "serde_json", @@ -3466,9 +3475,9 @@ checksum = "1788965e61b367cd03a62950836d5cd41560c3577d90e40e0819373194d1661c" dependencies = [ "http", "hyper", - "rustls", + "rustls 0.20.8", "tokio", - "tokio-rustls", + "tokio-rustls 0.23.4", ] [[package]] @@ -3679,7 +3688,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6971da4d9c3aa03c3d8f3ff0f4155b534aad021292003895a469716b2a230378" dependencies = [ "base64 0.21.0", - "pem", + "pem 1.1.1", "ring", "serde", "serde_json", @@ -3817,9 +3826,9 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.140" +version = "0.2.141" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99227334921fae1a979cf0bfdfcc6b3e5ce376ef57e16fb6fb3ea2ed6095f80c" +checksum = "3304a64d199bb964be99741b7a14d26972741915b3649639149b2479bb46f4b5" [[package]] name = "libloading" @@ -3966,18 +3975,18 @@ dependencies = [ [[package]] name = "lru" -version = "0.8.1" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6e8aaa3f231bb4bd57b84b2d5dc3ae7f350265df8aa96492e0bc394a1571909" +checksum = "71e7d46de488603ffdd5f30afbc64fbba2378214a2c3a2fb83abf3d33126df17" dependencies = [ - "hashbrown 0.12.3", + "hashbrown 0.13.2", ] [[package]] name = "lru" -version = "0.9.0" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71e7d46de488603ffdd5f30afbc64fbba2378214a2c3a2fb83abf3d33126df17" +checksum = "03f1160296536f10c833a82dca22267d5486734230d47bf00bf435885814ba1e" dependencies = [ "hashbrown 0.13.2", ] @@ -4364,11 +4373,29 @@ version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e5ce46fe64a9d73be07dcbe690a38ce1b293be448fd8ce1e6c1b8062c9f72c6a" +[[package]] +name = "mysql-common-derive" +version = "0.30.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8c3c1f30203977ce6134381bd895ba82892f967578442a0894484858594de992" +dependencies = [ + "darling", + "heck 0.4.1", + "num-bigint", + "proc-macro-crate 1.3.1", + "proc-macro-error", + "proc-macro2", + "quote", + "syn 1.0.109", + "termcolor", + "thiserror", +] + [[package]] name = "mysql_async" -version = "0.31.3" +version = "0.32.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2975442c70450b8f3a0400216321f6ab7b8bda177579f533d312ac511f913655" +checksum = "7089295150273e5d211a11222dcae3974c9aa4f7691c288e10e2e8aa43b3b1e9" dependencies = [ "bytes", "crossbeam", @@ -4377,39 +4404,39 @@ dependencies = [ "futures-sink", "futures-util", "lazy_static", - "lru 0.8.1", + "lru 0.10.0", "mio", "mysql_common", "once_cell", - "pem", + "pem 2.0.1", "percent-encoding", "pin-project", "priority-queue", - "rustls", + "rustls 0.21.0", "rustls-pemfile", "serde", "serde_json", - "socket2 0.4.9", + "socket2 0.5.2", "thiserror", "tokio", - "tokio-rustls", + "tokio-rustls 0.24.0", "tokio-util", "twox-hash", "url", "webpki", - "webpki-roots", + "webpki-roots 0.23.0", ] [[package]] name = "mysql_common" -version = "0.29.2" +version = "0.30.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9006c95034ccf7b903d955f210469119f6c3477fc9c9e7a7845ce38a3e665c2a" +checksum = "92439c97246ac4c7b3172e6adc45a75205a45e805979319e25a75a376a3f910d" dependencies = [ - "base64 0.13.1", + "base64 0.21.0", "bigdecimal", "bindgen", - "bitflags 1.3.2", + "bitflags 2.0.2", "bitvec", "byteorder", "bytes", @@ -4421,6 +4448,7 @@ dependencies = [ "frunk", "lazy_static", "lexical", + "mysql-common-derive", "num-bigint", "num-traits", "rand", @@ -4791,8 +4819,9 @@ dependencies = [ [[package]] name = "opensrv-mysql" -version = "0.3.0" -source = "git+https://github.com/sunng87/opensrv?branch=fix/buffer-overread#d5c24b25543ba48b69c3c4fe97f71e499819bd99" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e34be5f325aa2d88f9770cea96539b58297b83cc7f80b2b317711af787f6c5a7" dependencies = [ "async-trait", "byteorder", @@ -4801,7 +4830,7 @@ dependencies = [ "nom", "pin-project-lite", "tokio", - "tokio-rustls", + "tokio-rustls 0.24.0", ] [[package]] @@ -5059,6 +5088,16 @@ dependencies = [ "base64 0.13.1", ] +[[package]] +name = "pem" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6b13fe415cdf3c8e44518e18a7c95a13431d9bdf6d15367d82b23c377fdd441a" +dependencies = [ + "base64 0.21.0", + "serde", +] + [[package]] name = "pem-rfc7468" version = "0.6.0" @@ -5130,9 +5169,9 @@ dependencies = [ [[package]] name = "pgwire" -version = "0.12.0" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7384deed2eb0a2372259f454f1071561b3fc9ec6742fe3a8a2d2a72e8b28d9ff" +checksum = "3da58f2d096a2b20ee96420524c6156425575b64409a25acdf638bff450cf53a" dependencies = [ "async-trait", "base64 0.21.0", @@ -5151,7 +5190,7 @@ dependencies = [ "thiserror", "time 0.3.17", "tokio", - "tokio-rustls", + "tokio-rustls 0.24.0", "tokio-util", "x509-certificate", ] @@ -5277,9 +5316,9 @@ version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eff33bdbdfc54cc98a2eca766ebdec3e1b8fb7387523d5c9c9a2891da856f719" dependencies = [ - "der", + "der 0.6.1", "pkcs8", - "spki", + "spki 0.6.0", "zeroize", ] @@ -5289,8 +5328,8 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9eca2c590a5f85da82668fa685c09ce2888b9430e83299debf1f34b65fd4a4ba" dependencies = [ - "der", - "spki", + "der 0.6.1", + "spki 0.6.0", ] [[package]] @@ -6106,14 +6145,14 @@ dependencies = [ "once_cell", "percent-encoding", "pin-project-lite", - "rustls", + "rustls 0.20.8", "rustls-native-certs", "rustls-pemfile", "serde", "serde_json", "serde_urlencoded", "tokio", - "tokio-rustls", + "tokio-rustls 0.23.4", "tokio-util", "tower-service", "url", @@ -6400,6 +6439,18 @@ dependencies = [ "webpki", ] +[[package]] +name = "rustls" +version = "0.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "07180898a28ed6a7f7ba2311594308f595e3dd2e3c3812fa0a80a47b45f17e5d" +dependencies = [ + "log", + "ring", + "rustls-webpki", + "sct", +] + [[package]] name = "rustls-native-certs" version = "0.6.2" @@ -6421,6 +6472,16 @@ dependencies = [ "base64 0.21.0", ] +[[package]] +name = "rustls-webpki" +version = "0.100.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d6207cd5ed3d8dca7816f8f3725513a34609c0c765bf652b8c3cb4cfd87db46b" +dependencies = [ + "ring", + "untrusted", +] + [[package]] name = "rustpython-ast" version = "0.1.0" @@ -7163,7 +7224,7 @@ dependencies = [ "rand", "regex", "rust-embed", - "rustls", + "rustls 0.21.0", "rustls-pemfile", "schemars", "script", @@ -7179,7 +7240,7 @@ dependencies = [ "tokio", "tokio-postgres", "tokio-postgres-rustls", - "tokio-rustls", + "tokio-rustls 0.24.0", "tokio-stream", "tokio-test", "tonic", @@ -7399,12 +7460,12 @@ dependencies = [ [[package]] name = "socket2" -version = "0.5.1" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc8d618c6641ae355025c449427f9e96b98abf99a772be3cef6708d15c77147a" +checksum = "6d283f86695ae989d1e18440a943880967156325ba025f05049946bff47bcc2b" dependencies = [ "libc", - "windows-sys 0.45.0", + "windows-sys 0.48.0", ] [[package]] @@ -7432,7 +7493,17 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "67cf02bbac7a337dc36e4f5a693db6c21e7863f45070f7064577eb4367a3212b" dependencies = [ "base64ct", - "der", + "der 0.6.1", +] + +[[package]] +name = "spki" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37a5be806ab6f127c3da44b7378837ebf01dadca8510a0e572460216b228bd0e" +dependencies = [ + "base64ct", + "der 0.7.3", ] [[package]] @@ -8252,7 +8323,7 @@ dependencies = [ "pin-project-lite", "postgres-protocol", "postgres-types", - "socket2 0.5.1", + "socket2 0.5.2", "tokio", "tokio-util", ] @@ -8260,15 +8331,14 @@ dependencies = [ [[package]] name = "tokio-postgres-rustls" version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "606f2b73660439474394432239c82249c0d45eb5f23d91f401be1e33590444a7" +source = "git+https://github.com/sunng87/tokio-postgres-rustls.git?branch=patch-1#9f6e8a1c11e33c43a80618acd6b5135a7fb9a4be" dependencies = [ "futures", "ring", - "rustls", + "rustls 0.21.0", "tokio", "tokio-postgres", - "tokio-rustls", + "tokio-rustls 0.24.0", ] [[package]] @@ -8277,11 +8347,21 @@ version = "0.23.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c43ee83903113e03984cb9e5cebe6c04a5116269e900e3ddba8f068a62adda59" dependencies = [ - "rustls", + "rustls 0.20.8", "tokio", "webpki", ] +[[package]] +name = "tokio-rustls" +version = "0.24.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e0d409377ff5b1e3ca6437aa86c1eb7d40c134bfec254e44c830defa92669db5" +dependencies = [ + "rustls 0.21.0", + "tokio", +] + [[package]] name = "tokio-stream" version = "0.1.12" @@ -8371,7 +8451,7 @@ dependencies = [ "prost-derive", "rustls-pemfile", "tokio", - "tokio-rustls", + "tokio-rustls 0.23.4", "tokio-stream", "tokio-util", "tower", @@ -8912,11 +8992,11 @@ dependencies = [ "base64 0.13.1", "log", "once_cell", - "rustls", + "rustls 0.20.8", "rustls-native-certs", "url", "webpki", - "webpki-roots", + "webpki-roots 0.22.6", ] [[package]] @@ -9162,6 +9242,15 @@ dependencies = [ "webpki", ] +[[package]] +name = "webpki-roots" +version = "0.23.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aa54963694b65584e170cf5dc46aeb4dcaa5584e652ff5f3952e56d66aff0125" +dependencies = [ + "rustls-webpki", +] + [[package]] name = "which" version = "4.4.0" @@ -9239,7 +9328,7 @@ version = "0.46.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cdacb41e6a96a052c6cb63a144f24900236121c6f63f4f8219fef5977ecb0c25" dependencies = [ - "windows-targets", + "windows-targets 0.42.2", ] [[package]] @@ -9248,12 +9337,12 @@ version = "0.42.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5a3e1820f08b8513f676f7ab6c1f99ff312fb97b553d30ff4dd86f9f15728aa7" dependencies = [ - "windows_aarch64_gnullvm", + "windows_aarch64_gnullvm 0.42.2", "windows_aarch64_msvc 0.42.2", "windows_i686_gnu 0.42.2", "windows_i686_msvc 0.42.2", "windows_x86_64_gnu 0.42.2", - "windows_x86_64_gnullvm", + "windows_x86_64_gnullvm 0.42.2", "windows_x86_64_msvc 0.42.2", ] @@ -9263,7 +9352,16 @@ version = "0.45.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "75283be5efb2831d37ea142365f009c02ec203cd29a3ebecbc093d52315b66d0" dependencies = [ - "windows-targets", + "windows-targets 0.42.2", +] + +[[package]] +name = "windows-sys" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" +dependencies = [ + "windows-targets 0.48.0", ] [[package]] @@ -9272,21 +9370,42 @@ version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8e5180c00cd44c9b1c88adb3693291f1cd93605ded80c250a75d472756b4d071" dependencies = [ - "windows_aarch64_gnullvm", + "windows_aarch64_gnullvm 0.42.2", "windows_aarch64_msvc 0.42.2", "windows_i686_gnu 0.42.2", "windows_i686_msvc 0.42.2", "windows_x86_64_gnu 0.42.2", - "windows_x86_64_gnullvm", + "windows_x86_64_gnullvm 0.42.2", "windows_x86_64_msvc 0.42.2", ] +[[package]] +name = "windows-targets" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b1eb6f0cd7c80c79759c929114ef071b87354ce476d9d94271031c0497adfd5" +dependencies = [ + "windows_aarch64_gnullvm 0.48.0", + "windows_aarch64_msvc 0.48.0", + "windows_i686_gnu 0.48.0", + "windows_i686_msvc 0.48.0", + "windows_x86_64_gnu 0.48.0", + "windows_x86_64_gnullvm 0.48.0", + "windows_x86_64_msvc 0.48.0", +] + [[package]] name = "windows_aarch64_gnullvm" version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "597a5118570b68bc08d8d59125332c54f1ba9d9adeedeef5b99b02ba2b0698f8" +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91ae572e1b79dba883e0d315474df7305d12f569b400fcf90581b06062f7e1bc" + [[package]] name = "windows_aarch64_msvc" version = "0.39.0" @@ -9299,6 +9418,12 @@ version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e08e8864a60f06ef0d0ff4ba04124db8b0fb3be5776a5cd47641e942e58c4d43" +[[package]] +name = "windows_aarch64_msvc" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b2ef27e0d7bdfcfc7b868b317c1d32c641a6fe4629c171b8928c7b08d98d7cf3" + [[package]] name = "windows_i686_gnu" version = "0.39.0" @@ -9311,6 +9436,12 @@ version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c61d927d8da41da96a81f029489353e68739737d3beca43145c8afec9a31a84f" +[[package]] +name = "windows_i686_gnu" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "622a1962a7db830d6fd0a69683c80a18fda201879f0f447f065a3b7467daa241" + [[package]] name = "windows_i686_msvc" version = "0.39.0" @@ -9323,6 +9454,12 @@ version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "44d840b6ec649f480a41c8d80f9c65108b92d89345dd94027bfe06ac444d1060" +[[package]] +name = "windows_i686_msvc" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4542c6e364ce21bf45d69fdd2a8e455fa38d316158cfd43b3ac1c5b1b19f8e00" + [[package]] name = "windows_x86_64_gnu" version = "0.39.0" @@ -9335,12 +9472,24 @@ version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8de912b8b8feb55c064867cf047dda097f92d51efad5b491dfb98f6bbb70cb36" +[[package]] +name = "windows_x86_64_gnu" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ca2b8a661f7628cbd23440e50b05d705db3686f894fc9580820623656af974b1" + [[package]] name = "windows_x86_64_gnullvm" version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "26d41b46a36d453748aedef1486d5c7a85db22e56aff34643984ea85514e94a3" +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7896dbc1f41e08872e9d5e8f8baa8fdd2677f29468c4e156210174edc7f7b953" + [[package]] name = "windows_x86_64_msvc" version = "0.39.0" @@ -9353,6 +9502,12 @@ version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9aec5da331524158c6d1a4ac0ab1541149c0b9505fde06423b02f5ef0106b9f0" +[[package]] +name = "windows_x86_64_msvc" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a515f5799fe4961cb532f983ce2b23082366b898e52ffbce459c86f67c8378a" + [[package]] name = "winnow" version = "0.4.1" @@ -9382,19 +9537,19 @@ dependencies = [ [[package]] name = "x509-certificate" -version = "0.18.0" +version = "0.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6ae06cd45e681e1ae216e2e668e30ce1c4f02db026374bde8c0644684af1721" +checksum = "bf14059fbc1dce14de1d08535c411ba0b18749c2550a12550300da90b7ba350b" dependencies = [ "bcder", "bytes", "chrono", - "der", + "der 0.7.3", "hex", - "pem", + "pem 1.1.1", "ring", "signature", - "spki", + "spki 0.7.1", "thiserror", ] diff --git a/src/frontend/Cargo.toml b/src/frontend/Cargo.toml index b9a12b1027eb..ce22422c5bd2 100644 --- a/src/frontend/Cargo.toml +++ b/src/frontend/Cargo.toml @@ -40,7 +40,6 @@ openmetrics-parser = "0.4" partition = { path = "../partition" } prost.workspace = true query = { path = "../query" } -rustls = "0.20" script = { path = "../script", features = ["python"], optional = true } serde = "1.0" serde_json = "1.0" diff --git a/src/servers/Cargo.toml b/src/servers/Cargo.toml index 2156dbf87619..78a123aafa24 100644 --- a/src/servers/Cargo.toml +++ b/src/servers/Cargo.toml @@ -44,9 +44,9 @@ mime_guess = "2.0" num_cpus = "1.13" once_cell = "1.16" openmetrics-parser = "0.4" -opensrv-mysql = { git = "https://github.com/sunng87/opensrv", branch = "fix/buffer-overread" } +opensrv-mysql = "0.4" parking_lot = "0.12" -pgwire = "0.12" +pgwire = "0.13" pin-project = "1.0" postgres-types = { version = "0.2", features = ["with-chrono-0_4"] } promql-parser = "0.1.0" @@ -54,7 +54,7 @@ prost.workspace = true query = { path = "../query" } rand.workspace = true regex = "1.6" -rustls = "0.20" +rustls = "0.21" rustls-pemfile = "1.0" rust-embed = { version = "6.6", features = ["debug-embed"] } schemars = "0.8" @@ -67,7 +67,7 @@ snap = "1" sql = { path = "../sql" } strum = { version = "0.24", features = ["derive"] } table = { path = "../table" } -tokio-rustls = "0.23" +tokio-rustls = "0.24" tokio-stream = { version = "0.1", features = ["net"] } tokio.workspace = true tonic.workspace = true @@ -81,13 +81,14 @@ axum-test-helper = { git = "https://github.com/sunng87/axum-test-helper.git", br client = { path = "../client" } common-base = { path = "../common/base" } common-test-util = { path = "../common/test-util" } -mysql_async = { version = "0.31", default-features = false, features = [ +mysql_async = { version = "0.32", default-features = false, features = [ "default-rustls", ] } rand.workspace = true +rustls = { version = "0.21", features = ["dangerous_configuration"] } script = { path = "../script", features = ["python"] } serde_json = "1.0" table = { path = "../table" } tokio-postgres = "0.7" -tokio-postgres-rustls = "0.9" +tokio-postgres-rustls = { git = "https://github.com/sunng87/tokio-postgres-rustls.git", branch = "patch-1" } tokio-test = "0.4"
chore
update pgwire and rustls libraries (#1380)
dc46e9687924b4d6f024514e0f11ece93246ae16
2023-11-14 10:46:13
Weny Xu
refactor: replace InactiveRegionManager with RegionLeaseKeeper (#2729)
false
diff --git a/src/common/meta/src/key.rs b/src/common/meta/src/key.rs index 5970868f1a4e..fa06558a9ba3 100644 --- a/src/common/meta/src/key.rs +++ b/src/common/meta/src/key.rs @@ -660,9 +660,7 @@ impl TableMetadataManager { .table_route_manager() .build_update_txn(table_id, &current_table_route_value, &new_table_route_value)?; - let txn = Txn::merge_all(vec![update_table_route_txn]); - - let r = self.kv_backend.txn(txn).await?; + let r = self.kv_backend.txn(update_table_route_txn).await?; // Checks whether metadata was already updated. if !r.succeeded { diff --git a/src/datanode/src/alive_keeper.rs b/src/datanode/src/alive_keeper.rs index 21282e9ff54b..d8b0b7c50d67 100644 --- a/src/datanode/src/alive_keeper.rs +++ b/src/datanode/src/alive_keeper.rs @@ -373,12 +373,16 @@ impl CountdownTask { countdown.set(tokio::time::sleep_until(first_deadline)); }, Some(CountdownCommand::Reset((role, deadline))) => { + // The first-time granted regions might be ignored because the `first_deadline` is larger than the `region_lease_timeout`. + // Therefore, we set writable at the outside. + // TODO(weny): Considers setting `first_deadline` to `region_lease_timeout`. + let _ = self.region_server.set_writable(self.region_id, role.writable()); + if countdown.deadline() < deadline { trace!( "Reset deadline of region {region_id} to approximately {} seconds later", (deadline - Instant::now()).as_secs_f32(), ); - let _ = self.region_server.set_writable(self.region_id, role.writable()); countdown.set(tokio::time::sleep_until(deadline)); } // Else the countdown could be either: diff --git a/src/meta-srv/src/handler/node_stat.rs b/src/meta-srv/src/handler/node_stat.rs index 2295a42c9c6f..609e806296d6 100644 --- a/src/meta-srv/src/handler/node_stat.rs +++ b/src/meta-srv/src/handler/node_stat.rs @@ -18,6 +18,7 @@ use api::v1::meta::HeartbeatRequest; use common_time::util as time_util; use serde::{Deserialize, Serialize}; use store_api::region_engine::RegionRole; +use store_api::storage::RegionId; use crate::error::{Error, InvalidHeartbeatRequestSnafu}; use crate::keys::StatKey; @@ -72,8 +73,12 @@ impl Stat { } } - pub fn region_ids(&self) -> Vec<u64> { - self.region_stats.iter().map(|s| s.id).collect() + /// Returns a tuple array containing [RegionId] and [RegionRole]. + pub fn regions(&self) -> Vec<(RegionId, RegionRole)> { + self.region_stats + .iter() + .map(|s| (RegionId::from(s.id), s.role)) + .collect() } pub fn retain_active_region_stats(&mut self, inactive_region_ids: &HashSet<u64>) { diff --git a/src/meta-srv/src/handler/region_lease_handler.rs b/src/meta-srv/src/handler/region_lease_handler.rs index 96808dcd82a5..24827ef40f17 100644 --- a/src/meta-srv/src/handler/region_lease_handler.rs +++ b/src/meta-srv/src/handler/region_lease_handler.rs @@ -12,22 +12,63 @@ // See the License for the specific language governing permissions and // limitations under the License. -use api::v1::meta::{GrantedRegion, HeartbeatRequest, RegionLease, RegionRole, Role}; +use std::collections::HashSet; +use std::sync::Arc; + +use api::v1::meta::{HeartbeatRequest, RegionLease, Role}; use async_trait::async_trait; +use common_meta::key::TableMetadataManagerRef; +use store_api::region_engine::{GrantedRegion, RegionRole}; +use store_api::storage::RegionId; use crate::error::Result; use crate::handler::{HeartbeatAccumulator, HeartbeatHandler}; -use crate::inactive_region_manager::InactiveRegionManager; use crate::metasrv::Context; +use crate::region::lease_keeper::RegionLeaseKeeperRef; +use crate::region::RegionLeaseKeeper; pub struct RegionLeaseHandler { region_lease_seconds: u64, + region_lease_keeper: RegionLeaseKeeperRef, } impl RegionLeaseHandler { - pub fn new(region_lease_seconds: u64) -> Self { + pub fn new(region_lease_seconds: u64, table_metadata_manager: TableMetadataManagerRef) -> Self { + let region_lease_keeper = RegionLeaseKeeper::new(table_metadata_manager); + Self { region_lease_seconds, + region_lease_keeper: Arc::new(region_lease_keeper), + } + } +} + +fn flip_role(role: RegionRole) -> RegionRole { + match role { + RegionRole::Follower => RegionRole::Leader, + RegionRole::Leader => RegionRole::Follower, + } +} + +/// Grants lease of regions. +/// +/// - If a region is in an `operable` set, it will be granted an `flip_role(current)`([RegionRole]); +/// otherwise, it will be granted a `current`([RegionRole]). +/// - If a region is in a `closable` set, it won't be granted. +fn grant( + granted_regions: &mut Vec<GrantedRegion>, + operable: &HashSet<RegionId>, + closable: &HashSet<RegionId>, + regions: &[RegionId], + current: RegionRole, +) { + for region in regions { + if operable.contains(region) { + granted_regions.push(GrantedRegion::new(*region, flip_role(current))); + } else if closable.contains(region) { + // Filters out the closable regions. + } else { + granted_regions.push(GrantedRegion::new(*region, current)) } } } @@ -41,31 +82,61 @@ impl HeartbeatHandler for RegionLeaseHandler { async fn handle( &self, req: &HeartbeatRequest, - ctx: &mut Context, + _ctx: &mut Context, acc: &mut HeartbeatAccumulator, ) -> Result<()> { let Some(stat) = acc.stat.as_ref() else { return Ok(()); }; - let mut region_ids = stat.region_ids(); + let regions = stat.regions(); + let cluster_id = stat.cluster_id; + let datanode_id = stat.id; + let mut granted_regions = Vec::with_capacity(regions.len()); - let inactive_region_manager = InactiveRegionManager::new(&ctx.in_memory); - let inactive_region_ids = inactive_region_manager - .retain_active_regions(stat.cluster_id, stat.id, &mut region_ids) - .await?; - - let regions = region_ids + let (leaders, followers): (Vec<_>, Vec<_>) = regions .into_iter() - .map(|region_id| GrantedRegion { - region_id, - role: RegionRole::Leader.into(), + .map(|(id, role)| match role { + RegionRole::Follower => (None, Some(id)), + RegionRole::Leader => (Some(id), None), }) - .collect(); + .unzip(); + + let leaders = leaders.into_iter().flatten().collect::<Vec<_>>(); + + let (downgradable, closable) = self + .region_lease_keeper + .find_staled_leader_regions(cluster_id, datanode_id, &leaders) + .await?; + + grant( + &mut granted_regions, + &downgradable, + &closable, + &leaders, + RegionRole::Leader, + ); + + let followers = followers.into_iter().flatten().collect::<Vec<_>>(); + + let (upgradeable, closable) = self + .region_lease_keeper + .find_staled_follower_regions(cluster_id, datanode_id, &followers) + .await?; + + grant( + &mut granted_regions, + &upgradeable, + &closable, + &followers, + RegionRole::Follower, + ); - acc.inactive_region_ids = inactive_region_ids; acc.region_lease = Some(RegionLease { - regions, + regions: granted_regions + .into_iter() + .map(Into::into) + .collect::<Vec<_>>(), duration_since_epoch: req.duration_since_epoch, lease_seconds: self.region_lease_seconds, }); @@ -76,101 +147,215 @@ impl HeartbeatHandler for RegionLeaseHandler { #[cfg(test)] mod test { + use std::collections::HashMap; use std::sync::Arc; - use api::v1::meta::RegionRole; + use common_meta::distributed_time_constants; + use common_meta::key::test_utils::new_test_table_info; use common_meta::key::TableMetadataManager; - use common_meta::{distributed_time_constants, RegionIdent}; - use store_api::storage::{RegionId, RegionNumber}; + use common_meta::kv_backend::memory::MemoryKvBackend; + use common_meta::peer::Peer; + use common_meta::rpc::router::{Region, RegionRoute, RegionStatus}; + use store_api::storage::RegionId; use super::*; use crate::handler::node_stat::{RegionStat, Stat}; use crate::metasrv::builder::MetaSrvBuilder; - use crate::test_util; + + fn new_test_keeper() -> RegionLeaseKeeper { + let store = Arc::new(MemoryKvBackend::new()); + + let table_metadata_manager = Arc::new(TableMetadataManager::new(store)); + + RegionLeaseKeeper::new(table_metadata_manager) + } + + fn new_empty_region_stat(region_id: RegionId, role: RegionRole) -> RegionStat { + RegionStat { + id: region_id.as_u64(), + role, + rcus: 0, + wcus: 0, + approximate_bytes: 0, + approximate_rows: 0, + engine: String::new(), + } + } #[tokio::test] - async fn test_handle_region_lease() { - let region_failover_manager = test_util::create_region_failover_manager(); - let kv_backend = region_failover_manager - .create_context() - .selector_ctx - .kv_backend - .clone(); - - let table_id = 1; - let table_name = "my_table"; - let table_metadata_manager = Arc::new(TableMetadataManager::new(kv_backend.clone())); - test_util::prepare_table_region_and_info_value(&table_metadata_manager, table_name).await; + async fn test_handle_upgradable_follower() { + let datanode_id = 1; + let region_number = 1u32; + let table_id = 10; + let region_id = RegionId::new(table_id, region_number); + let another_region_id = RegionId::new(table_id, region_number + 1); + let peer = Peer::empty(datanode_id); + let follower_peer = Peer::empty(datanode_id + 1); + let table_info = new_test_table_info(table_id, vec![region_number]).into(); + let cluster_id = 1; - let req = HeartbeatRequest { - duration_since_epoch: 1234, + let region_routes = vec![RegionRoute { + region: Region::new_test(region_id), + leader_peer: Some(peer.clone()), + follower_peers: vec![follower_peer.clone()], ..Default::default() - }; + }]; + + let keeper = new_test_keeper(); + let table_metadata_manager = keeper.table_metadata_manager(); + + table_metadata_manager + .create_table_metadata(table_info, region_routes) + .await + .unwrap(); let builder = MetaSrvBuilder::new(); let metasrv = builder.build().await.unwrap(); let ctx = &mut metasrv.new_ctx(); let acc = &mut HeartbeatAccumulator::default(); - let new_region_stat = |region_number: RegionNumber| -> RegionStat { - let region_id = RegionId::new(table_id, region_number); - RegionStat { - id: region_id.as_u64(), - rcus: 0, - wcus: 0, - approximate_bytes: 0, - approximate_rows: 0, - engine: String::new(), - role: RegionRole::Leader.into(), - } + + acc.stat = Some(Stat { + cluster_id, + id: peer.id, + region_stats: vec![ + new_empty_region_stat(region_id, RegionRole::Follower), + new_empty_region_stat(another_region_id, RegionRole::Follower), + ], + ..Default::default() + }); + + let req = HeartbeatRequest { + duration_since_epoch: 1234, + ..Default::default() }; + + let handler = RegionLeaseHandler::new( + distributed_time_constants::REGION_LEASE_SECS, + table_metadata_manager.clone(), + ); + + handler.handle(&req, ctx, acc).await.unwrap(); + + assert_region_lease(acc, vec![GrantedRegion::new(region_id, RegionRole::Leader)]); + + let acc = &mut HeartbeatAccumulator::default(); + acc.stat = Some(Stat { - cluster_id: 1, - id: 1, - region_stats: vec![new_region_stat(1), new_region_stat(2), new_region_stat(3)], + cluster_id, + id: follower_peer.id, + region_stats: vec![ + new_empty_region_stat(region_id, RegionRole::Follower), + new_empty_region_stat(another_region_id, RegionRole::Follower), + ], ..Default::default() }); - let inactive_region_manager = InactiveRegionManager::new(&ctx.in_memory); - inactive_region_manager - .register_inactive_region(&RegionIdent { - cluster_id: 1, - datanode_id: 1, - table_id: 1, - region_number: 1, - engine: "mito2".to_string(), - }) - .await - .unwrap(); - inactive_region_manager - .register_inactive_region(&RegionIdent { - cluster_id: 1, - datanode_id: 1, - table_id: 1, - region_number: 3, - engine: "mito2".to_string(), - }) - .await - .unwrap(); + handler.handle(&req, ctx, acc).await.unwrap(); + + assert_eq!( + acc.region_lease.as_ref().unwrap().lease_seconds, + distributed_time_constants::REGION_LEASE_SECS + ); - RegionLeaseHandler::new(distributed_time_constants::REGION_LEASE_SECS) - .handle(&req, ctx, acc) + assert_region_lease( + acc, + vec![GrantedRegion::new(region_id, RegionRole::Follower)], + ); + } + + #[tokio::test] + + async fn test_handle_downgradable_leader() { + let datanode_id = 1; + let region_number = 1u32; + let table_id = 10; + let region_id = RegionId::new(table_id, region_number); + let another_region_id = RegionId::new(table_id, region_number + 1); + let no_exist_region_id = RegionId::new(table_id, region_number + 2); + let peer = Peer::empty(datanode_id); + let follower_peer = Peer::empty(datanode_id + 1); + let table_info = new_test_table_info(table_id, vec![region_number]).into(); + let cluster_id = 1; + + let region_routes = vec![ + RegionRoute { + region: Region::new_test(region_id), + leader_peer: Some(peer.clone()), + follower_peers: vec![follower_peer.clone()], + leader_status: Some(RegionStatus::Downgraded), + }, + RegionRoute { + region: Region::new_test(another_region_id), + leader_peer: Some(peer.clone()), + ..Default::default() + }, + ]; + + let keeper = new_test_keeper(); + let table_metadata_manager = keeper.table_metadata_manager(); + + table_metadata_manager + .create_table_metadata(table_info, region_routes) .await .unwrap(); - assert!(acc.region_lease.is_some()); - let lease = acc.region_lease.as_ref().unwrap(); - assert_eq!( - lease.regions, - vec![GrantedRegion { - region_id: RegionId::new(table_id, 2).as_u64(), - role: RegionRole::Leader.into() - }] + let builder = MetaSrvBuilder::new(); + let metasrv = builder.build().await.unwrap(); + let ctx = &mut metasrv.new_ctx(); + + let req = HeartbeatRequest { + duration_since_epoch: 1234, + ..Default::default() + }; + + let acc = &mut HeartbeatAccumulator::default(); + + acc.stat = Some(Stat { + cluster_id, + id: peer.id, + region_stats: vec![ + new_empty_region_stat(region_id, RegionRole::Leader), + new_empty_region_stat(another_region_id, RegionRole::Leader), + new_empty_region_stat(no_exist_region_id, RegionRole::Leader), + ], + ..Default::default() + }); + + let handler = RegionLeaseHandler::new( + distributed_time_constants::REGION_LEASE_SECS, + table_metadata_manager.clone(), ); - assert_eq!(lease.duration_since_epoch, 1234); - assert_eq!( - lease.lease_seconds, - distributed_time_constants::REGION_LEASE_SECS + + handler.handle(&req, ctx, acc).await.unwrap(); + + assert_region_lease( + acc, + vec![ + GrantedRegion::new(region_id, RegionRole::Follower), + GrantedRegion::new(another_region_id, RegionRole::Leader), + ], ); } + + fn assert_region_lease(acc: &HeartbeatAccumulator, expected: Vec<GrantedRegion>) { + let region_lease = acc.region_lease.as_ref().unwrap().clone(); + let granted: Vec<GrantedRegion> = region_lease + .regions + .into_iter() + .map(Into::into) + .collect::<Vec<_>>(); + + let granted = granted + .into_iter() + .map(|region| (region.region_id, region)) + .collect::<HashMap<_, _>>(); + + let expected = expected + .into_iter() + .map(|region| (region.region_id, region)) + .collect::<HashMap<_, _>>(); + + assert_eq!(granted, expected); + } } diff --git a/src/meta-srv/src/inactive_region_manager.rs b/src/meta-srv/src/inactive_region_manager.rs deleted file mode 100644 index 273aad844b4d..000000000000 --- a/src/meta-srv/src/inactive_region_manager.rs +++ /dev/null @@ -1,156 +0,0 @@ -// Copyright 2023 Greptime Team -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use std::collections::HashSet; - -use common_meta::kv_backend::ResettableKvBackendRef; -use common_meta::rpc::store::{BatchGetRequest, DeleteRangeRequest, PutRequest, RangeRequest}; -use common_meta::RegionIdent; -use snafu::ResultExt; - -use crate::error::{self, Result}; -use crate::keys::InactiveRegionKey; -use crate::metrics::METRIC_META_INACTIVE_REGIONS; - -pub struct InactiveRegionManager<'a> { - store: &'a ResettableKvBackendRef, -} - -impl<'a> InactiveRegionManager<'a> { - pub fn new(store: &'a ResettableKvBackendRef) -> Self { - Self { store } - } - - pub async fn register_inactive_region(&self, region_ident: &RegionIdent) -> Result<()> { - let region_id = region_ident.get_region_id().as_u64(); - let key = InactiveRegionKey { - cluster_id: region_ident.cluster_id, - node_id: region_ident.datanode_id, - region_id, - }; - let req = PutRequest { - key: key.into(), - value: vec![], - prev_kv: false, - }; - self.store.put(req).await.context(error::KvBackendSnafu)?; - - METRIC_META_INACTIVE_REGIONS.inc(); - - Ok(()) - } - - pub async fn deregister_inactive_region(&self, region_ident: &RegionIdent) -> Result<()> { - let region_id = region_ident.get_region_id().as_u64(); - let key: Vec<u8> = InactiveRegionKey { - cluster_id: region_ident.cluster_id, - node_id: region_ident.datanode_id, - region_id, - } - .into(); - self.store - .delete(&key, false) - .await - .context(error::KvBackendSnafu)?; - - METRIC_META_INACTIVE_REGIONS.dec(); - - Ok(()) - } - - /// The input is a list of regions on a specific node. If one or more regions have been - /// set to inactive state by metasrv, the corresponding regions will be removed(update the - /// `region_ids`), then returns the removed regions. - pub async fn retain_active_regions( - &self, - cluster_id: u64, - node_id: u64, - region_ids: &mut Vec<u64>, - ) -> Result<HashSet<u64>> { - let key_region_ids = region_ids - .iter() - .map(|region_id| { - ( - InactiveRegionKey { - cluster_id, - node_id, - region_id: *region_id, - } - .into(), - *region_id, - ) - }) - .collect::<Vec<(Vec<u8>, _)>>(); - let keys = key_region_ids.iter().map(|(key, _)| key.clone()).collect(); - let resp = self - .store - .batch_get(BatchGetRequest { keys }) - .await - .context(error::KvBackendSnafu)?; - let kvs = resp.kvs; - if kvs.is_empty() { - return Ok(HashSet::new()); - } - - let inactive_keys = kvs.into_iter().map(|kv| kv.key).collect::<HashSet<_>>(); - let (active_region_ids, inactive_region_ids): (Vec<Option<u64>>, Vec<Option<u64>>) = - key_region_ids - .into_iter() - .map(|(key, region_id)| { - let is_active = !inactive_keys.contains(&key); - if is_active { - (Some(region_id), None) - } else { - (None, Some(region_id)) - } - }) - .unzip(); - *region_ids = active_region_ids.into_iter().flatten().collect(); - - Ok(inactive_region_ids.into_iter().flatten().collect()) - } - - /// Scan all inactive regions in the cluster. - /// - /// When will these data appear? - /// Generally, it is because the corresponding Datanode is disconnected and - /// did not respond to the `Failover` scheduling instructions of metasrv. - pub async fn scan_all_inactive_regions( - &self, - cluster_id: u64, - ) -> Result<Vec<InactiveRegionKey>> { - let prefix = InactiveRegionKey::get_prefix_by_cluster(cluster_id); - let request = RangeRequest::new().with_prefix(prefix); - let resp = self - .store - .range(request) - .await - .context(error::KvBackendSnafu)?; - let kvs = resp.kvs; - kvs.into_iter() - .map(|kv| InactiveRegionKey::try_from(kv.key)) - .collect::<Result<Vec<_>>>() - } - - pub async fn clear_all_inactive_regions(&self, cluster_id: u64) -> Result<()> { - let prefix = InactiveRegionKey::get_prefix_by_cluster(cluster_id); - let request = DeleteRangeRequest::new().with_prefix(prefix); - let _ = self - .store - .delete_range(request) - .await - .context(error::KvBackendSnafu)?; - Ok(()) - } -} diff --git a/src/meta-srv/src/lib.rs b/src/meta-srv/src/lib.rs index 14af05f8c0c5..b30c6779b36a 100644 --- a/src/meta-srv/src/lib.rs +++ b/src/meta-srv/src/lib.rs @@ -40,8 +40,6 @@ pub mod table_meta_alloc; pub use crate::error::Result; -mod inactive_region_manager; - mod greptimedb_telemetry; #[cfg(test)] diff --git a/src/meta-srv/src/metasrv/builder.rs b/src/meta-srv/src/metasrv/builder.rs index 8ad55b799918..12550f34f60e 100644 --- a/src/meta-srv/src/metasrv/builder.rs +++ b/src/meta-srv/src/metasrv/builder.rs @@ -168,7 +168,6 @@ impl MetaSrvBuilder { state.clone(), kv_backend.clone(), )); - let kv_backend = leader_cached_kv_backend.clone() as _; let meta_peer_client = meta_peer_client .unwrap_or_else(|| build_default_meta_peer_client(&election, &in_memory)); @@ -177,7 +176,9 @@ impl MetaSrvBuilder { let mailbox = build_mailbox(&kv_backend, &pushers); let procedure_manager = build_procedure_manager(&options, &kv_backend); let table_id_sequence = Arc::new(Sequence::new(TABLE_ID_SEQ, 1024, 10, kv_backend.clone())); - let table_metadata_manager = Arc::new(TableMetadataManager::new(kv_backend.clone())); + let table_metadata_manager = Arc::new(TableMetadataManager::new( + leader_cached_kv_backend.clone() as _, + )); let lock = lock.unwrap_or_else(|| Arc::new(MemLock::default())); let selector_ctx = SelectorContext { server_addr: options.server_addr.clone(), @@ -227,8 +228,10 @@ impl MetaSrvBuilder { .and_then(|plugins| plugins.get::<PublishRef>()) .map(|publish| PublishHeartbeatHandler::new(publish.clone())); - let region_lease_handler = - RegionLeaseHandler::new(distributed_time_constants::REGION_LEASE_SECS); + let region_lease_handler = RegionLeaseHandler::new( + distributed_time_constants::REGION_LEASE_SECS, + table_metadata_manager.clone(), + ); let group = HeartbeatHandlerGroup::new(pushers); group.add_handler(ResponseHeaderHandler).await; diff --git a/src/meta-srv/src/procedure/region_failover.rs b/src/meta-srv/src/procedure/region_failover.rs index a09bb1c2c0c4..fde254ea7084 100644 --- a/src/meta-srv/src/procedure/region_failover.rs +++ b/src/meta-srv/src/procedure/region_failover.rs @@ -270,8 +270,6 @@ trait State: Sync + Send + Debug { fn status(&self) -> Status { Status::executing(true) } - - fn remark_inactive_region_if_needed(&mut self) {} } /// The states transition of region failover procedure: @@ -341,11 +339,7 @@ impl RegionFailoverProcedure { } fn from_json(json: &str, context: RegionFailoverContext) -> ProcedureResult<Self> { - let mut node: Node = serde_json::from_str(json).context(FromJsonSnafu)?; - // If the meta leader node dies during the execution of the procedure, - // the new leader node needs to remark the failed region as "inactive" - // to prevent it from renewing the lease. - node.state.remark_inactive_region_if_needed(); + let node: Node = serde_json::from_str(json).context(FromJsonSnafu)?; Ok(Self { node, context }) } } diff --git a/src/meta-srv/src/procedure/region_failover/activate_region.rs b/src/meta-srv/src/procedure/region_failover/activate_region.rs index 69dc51334358..b758524018b9 100644 --- a/src/meta-srv/src/procedure/region_failover/activate_region.rs +++ b/src/meta-srv/src/procedure/region_failover/activate_region.rs @@ -31,7 +31,6 @@ use crate::error::{ self, Error, Result, RetryLaterSnafu, SerializeToJsonSnafu, UnexpectedInstructionReplySnafu, }; use crate::handler::HeartbeatMailbox; -use crate::inactive_region_manager::InactiveRegionManager; use crate::procedure::region_failover::OPEN_REGION_MESSAGE_TIMEOUT; use crate::service::mailbox::{Channel, MailboxReceiver}; @@ -104,17 +103,6 @@ impl ActivateRegion { input: instruction.to_string(), })?; - // Ensure that metasrv will renew the lease for this candidate node. - // - // This operation may not be redundant, imagine the following scenario: - // This candidate once had the current region, and because it did not respond to the `close` - // command in time, it was considered an inactive node by metasrv, then it replied, and the - // current region failed over again, and the node was selected as a candidate, so it needs - // to clear its previous state first. - InactiveRegionManager::new(&ctx.in_memory) - .deregister_inactive_region(&candidate_ident) - .await?; - let ch = Channel::Datanode(self.candidate.id); ctx.mailbox.send(&ch, msg, timeout).await } @@ -182,23 +170,12 @@ impl State for ActivateRegion { ctx: &RegionFailoverContext, failed_region: &RegionIdent, ) -> Result<Box<dyn State>> { - if self.remark_inactive_region { - // Remark the fail region as inactive to prevent it from renewing the lease. - InactiveRegionManager::new(&ctx.in_memory) - .register_inactive_region(failed_region) - .await?; - } - let mailbox_receiver = self .send_open_region_message(ctx, failed_region, OPEN_REGION_MESSAGE_TIMEOUT) .await?; self.handle_response(mailbox_receiver, failed_region).await } - - fn remark_inactive_region_if_needed(&mut self) { - self.remark_inactive_region = true; - } } #[cfg(test)] diff --git a/src/meta-srv/src/procedure/region_failover/deactivate_region.rs b/src/meta-srv/src/procedure/region_failover/deactivate_region.rs index d24ae9f68b8c..04b3ccde97e4 100644 --- a/src/meta-srv/src/procedure/region_failover/deactivate_region.rs +++ b/src/meta-srv/src/procedure/region_failover/deactivate_region.rs @@ -30,7 +30,6 @@ use crate::error::{ self, Error, Result, RetryLaterSnafu, SerializeToJsonSnafu, UnexpectedInstructionReplySnafu, }; use crate::handler::HeartbeatMailbox; -use crate::inactive_region_manager::InactiveRegionManager; use crate::service::mailbox::{Channel, MailboxReceiver}; #[derive(Serialize, Deserialize, Debug)] @@ -91,22 +90,13 @@ impl DeactivateRegion { })?; let ch = Channel::Datanode(failed_region.datanode_id); - // Mark the region as inactive - InactiveRegionManager::new(&ctx.in_memory) - .register_inactive_region(failed_region) - .await?; - // We first marked the region as inactive, which means that the failed region cannot - // be successfully renewed from now on, so after the lease time is exceeded, the region - // will be automatically closed. - // If the deadline is exceeded, we can proceed to the next step with confidence, - // as the expiration means that the region has been closed. let timeout = Duration::from_secs(ctx.region_lease_secs); ctx.mailbox.send(&ch, msg, timeout).await } async fn handle_response( &self, - ctx: &RegionFailoverContext, + _ctx: &RegionFailoverContext, mailbox_receiver: MailboxReceiver, failed_region: &RegionIdent, ) -> Result<Box<dyn State>> { @@ -123,10 +113,6 @@ impl DeactivateRegion { .fail(); }; if result { - InactiveRegionManager::new(&ctx.in_memory) - .deregister_inactive_region(failed_region) - .await?; - Ok(Box::new(ActivateRegion::new(self.candidate.clone()))) } else { // Under rare circumstances would a Datanode fail to close a Region. diff --git a/src/meta-srv/src/region/lease_keeper.rs b/src/meta-srv/src/region/lease_keeper.rs index a8d7c8eeca12..76f5f57dd866 100644 --- a/src/meta-srv/src/region/lease_keeper.rs +++ b/src/meta-srv/src/region/lease_keeper.rs @@ -16,6 +16,7 @@ pub mod mito; pub mod utils; use std::collections::{HashMap, HashSet}; +use std::sync::Arc; use common_meta::key::table_route::TableRouteValue; use common_meta::key::TableMetadataManagerRef; @@ -26,6 +27,8 @@ use self::mito::find_staled_leader_regions; use crate::error::{self, Result}; use crate::region::lease_keeper::utils::find_staled_follower_regions; +pub type RegionLeaseKeeperRef = Arc<RegionLeaseKeeper>; + pub struct RegionLeaseKeeper { table_metadata_manager: TableMetadataManagerRef, } diff --git a/src/meta-srv/src/service/admin.rs b/src/meta-srv/src/service/admin.rs index c7eac3af4231..a5867e376924 100644 --- a/src/meta-srv/src/service/admin.rs +++ b/src/meta-srv/src/service/admin.rs @@ -14,7 +14,6 @@ mod health; mod heartbeat; -mod inactive_regions; mod leader; mod meta; mod node_lease; @@ -91,20 +90,6 @@ pub fn make_admin_service(meta_srv: MetaSrv) -> Admin { .route("/route", handler.clone()) .route("/route/help", handler); - let router = router.route( - "/inactive-regions/view", - inactive_regions::ViewInactiveRegionsHandler { - store: meta_srv.in_memory().clone(), - }, - ); - - let router = router.route( - "/inactive-regions/clear", - inactive_regions::ClearInactiveRegionsHandler { - store: meta_srv.in_memory().clone(), - }, - ); - let router = Router::nest("/admin", router); Admin::new(router) diff --git a/src/meta-srv/src/service/admin/inactive_regions.rs b/src/meta-srv/src/service/admin/inactive_regions.rs deleted file mode 100644 index 6c3c4184903e..000000000000 --- a/src/meta-srv/src/service/admin/inactive_regions.rs +++ /dev/null @@ -1,92 +0,0 @@ -// Copyright 2023 Greptime Team -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use std::collections::HashMap; - -use common_meta::kv_backend::ResettableKvBackendRef; -use serde::{Deserialize, Serialize}; -use snafu::ResultExt; -use tonic::codegen::http; - -use crate::error::{self, Result}; -use crate::inactive_region_manager::InactiveRegionManager; -use crate::keys::InactiveRegionKey; -use crate::service::admin::{util, HttpHandler}; - -pub struct ViewInactiveRegionsHandler { - pub store: ResettableKvBackendRef, -} - -#[async_trait::async_trait] -impl HttpHandler for ViewInactiveRegionsHandler { - async fn handle( - &self, - _: &str, - params: &HashMap<String, String>, - ) -> Result<http::Response<String>> { - let cluster_id = util::extract_cluster_id(params)?; - - let inactive_region_manager = InactiveRegionManager::new(&self.store); - let inactive_regions = inactive_region_manager - .scan_all_inactive_regions(cluster_id) - .await?; - let result = InactiveRegions { inactive_regions }.try_into()?; - - http::Response::builder() - .status(http::StatusCode::OK) - .body(result) - .context(error::InvalidHttpBodySnafu) - } -} - -pub struct ClearInactiveRegionsHandler { - pub store: ResettableKvBackendRef, -} - -#[async_trait::async_trait] -impl HttpHandler for ClearInactiveRegionsHandler { - async fn handle( - &self, - _: &str, - params: &HashMap<String, String>, - ) -> Result<http::Response<String>> { - let cluster_id = util::extract_cluster_id(params)?; - - let inactive_region_manager = InactiveRegionManager::new(&self.store); - inactive_region_manager - .clear_all_inactive_regions(cluster_id) - .await?; - - Ok(http::Response::builder() - .status(http::StatusCode::OK) - .body("Success\n".to_owned()) - .unwrap()) - } -} - -#[derive(Debug, Serialize, Deserialize)] -#[serde(transparent)] -struct InactiveRegions { - inactive_regions: Vec<InactiveRegionKey>, -} - -impl TryFrom<InactiveRegions> for String { - type Error = error::Error; - - fn try_from(value: InactiveRegions) -> Result<Self> { - serde_json::to_string(&value).context(error::SerializeToJsonSnafu { - input: format!("{value:?}"), - }) - } -} diff --git a/src/store-api/src/region_engine.rs b/src/store-api/src/region_engine.rs index bac3df5bf458..f300931bb109 100644 --- a/src/store-api/src/region_engine.rs +++ b/src/store-api/src/region_engine.rs @@ -16,7 +16,7 @@ use std::sync::Arc; -use api::greptime_proto::v1::meta::RegionRole as PbRegionRole; +use api::greptime_proto::v1::meta::{GrantedRegion as PbGrantedRegion, RegionRole as PbRegionRole}; use async_trait::async_trait; use common_error::ext::BoxedError; use common_query::Output; @@ -27,6 +27,38 @@ use crate::metadata::RegionMetadataRef; use crate::region_request::RegionRequest; use crate::storage::{RegionId, ScanRequest}; +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct GrantedRegion { + pub region_id: RegionId, + pub region_role: RegionRole, +} +impl GrantedRegion { + pub fn new(region_id: RegionId, region_role: RegionRole) -> Self { + Self { + region_id, + region_role, + } + } +} + +impl From<GrantedRegion> for PbGrantedRegion { + fn from(value: GrantedRegion) -> Self { + PbGrantedRegion { + region_id: value.region_id.as_u64(), + role: PbRegionRole::from(value.region_role).into(), + } + } +} + +impl From<PbGrantedRegion> for GrantedRegion { + fn from(value: PbGrantedRegion) -> Self { + GrantedRegion { + region_id: RegionId::from_u64(value.region_id), + region_role: value.role().into(), + } + } +} + #[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)] pub enum RegionRole { // Readonly region(mito2), Readonly region(file).
refactor
replace InactiveRegionManager with RegionLeaseKeeper (#2729)
49004391d348af785aa910691e48d2037b621ffa
2024-09-19 15:10:10
Weny Xu
chore(fuzz): print table name for debugging (#4738)
false
diff --git a/tests-fuzz/targets/failover/fuzz_failover_metric_regions.rs b/tests-fuzz/targets/failover/fuzz_failover_metric_regions.rs index 5eaf43ab0a8e..147c3ead1e41 100644 --- a/tests-fuzz/targets/failover/fuzz_failover_metric_regions.rs +++ b/tests-fuzz/targets/failover/fuzz_failover_metric_regions.rs @@ -257,7 +257,12 @@ async fn execute_failover(ctx: FuzzContext, input: FuzzInput) -> Result<()> { for (table_ctx, insert_expr) in tables.values() { let sql = format!("select count(1) as count from {}", table_ctx.name); let values = count_values(&ctx.greptime, &sql).await?; - assert_eq!(values.count as usize, insert_expr.values_list.len()); + let expected_rows = insert_expr.values_list.len() as u64; + assert_eq!( + values.count as u64, expected_rows, + "Expected rows: {}, got: {}, table: {}", + expected_rows, values.count, table_ctx.name + ); } // Clean up diff --git a/tests-fuzz/targets/failover/fuzz_failover_mito_regions.rs b/tests-fuzz/targets/failover/fuzz_failover_mito_regions.rs index 5fab5f260333..f456550b3fb9 100644 --- a/tests-fuzz/targets/failover/fuzz_failover_mito_regions.rs +++ b/tests-fuzz/targets/failover/fuzz_failover_mito_regions.rs @@ -323,7 +323,11 @@ async fn execute_failover(ctx: FuzzContext, input: FuzzInput) -> Result<()> { for (table_ctx, expected_rows) in table_ctxs.iter().zip(affected_rows) { let sql = format!("select count(1) as count from {}", table_ctx.name); let values = count_values(&ctx.greptime, &sql).await?; - assert_eq!(values.count as u64, expected_rows); + assert_eq!( + values.count as u64, expected_rows, + "Expected rows: {}, got: {}, table: {}", + expected_rows, values.count, table_ctx.name + ); } for table_ctx in table_ctxs {
chore
print table name for debugging (#4738)
e8d2e823357ddbc44b942f4c81b93778f4fd0d98
2023-03-14 18:48:43
Ruihang Xia
fix: ambiguous column reference (#1177)
false
diff --git a/Cargo.lock b/Cargo.lock index 09b1856d3ac2..4e07c2a9ee7f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2107,8 +2107,8 @@ dependencies = [ [[package]] name = "datafusion" -version = "19.0.0" -source = "git+https://github.com/MichaelScofield/arrow-datafusion.git?rev=d7b3c730049f2561755f9d855f638cb580c38eff#d7b3c730049f2561755f9d855f638cb580c38eff" +version = "20.0.0" +source = "git+https://github.com/apache/arrow-datafusion.git?rev=146a949218ec970784974137277cde3b4e547d0a#146a949218ec970784974137277cde3b4e547d0a" dependencies = [ "ahash 0.8.3", "arrow", @@ -2137,7 +2137,6 @@ dependencies = [ "object_store", "parking_lot", "parquet", - "paste", "percent-encoding", "pin-project-lite", "rand", @@ -2155,8 +2154,8 @@ dependencies = [ [[package]] name = "datafusion-common" -version = "19.0.0" -source = "git+https://github.com/MichaelScofield/arrow-datafusion.git?rev=d7b3c730049f2561755f9d855f638cb580c38eff#d7b3c730049f2561755f9d855f638cb580c38eff" +version = "20.0.0" +source = "git+https://github.com/apache/arrow-datafusion.git?rev=146a949218ec970784974137277cde3b4e547d0a#146a949218ec970784974137277cde3b4e547d0a" dependencies = [ "arrow", "chrono", @@ -2168,8 +2167,8 @@ dependencies = [ [[package]] name = "datafusion-execution" -version = "19.0.0" -source = "git+https://github.com/MichaelScofield/arrow-datafusion.git?rev=d7b3c730049f2561755f9d855f638cb580c38eff#d7b3c730049f2561755f9d855f638cb580c38eff" +version = "20.0.0" +source = "git+https://github.com/apache/arrow-datafusion.git?rev=146a949218ec970784974137277cde3b4e547d0a#146a949218ec970784974137277cde3b4e547d0a" dependencies = [ "dashmap", "datafusion-common", @@ -2185,20 +2184,19 @@ dependencies = [ [[package]] name = "datafusion-expr" -version = "19.0.0" -source = "git+https://github.com/MichaelScofield/arrow-datafusion.git?rev=d7b3c730049f2561755f9d855f638cb580c38eff#d7b3c730049f2561755f9d855f638cb580c38eff" +version = "20.0.0" +source = "git+https://github.com/apache/arrow-datafusion.git?rev=146a949218ec970784974137277cde3b4e547d0a#146a949218ec970784974137277cde3b4e547d0a" dependencies = [ "ahash 0.8.3", "arrow", "datafusion-common", - "log", "sqlparser", ] [[package]] name = "datafusion-optimizer" -version = "19.0.0" -source = "git+https://github.com/MichaelScofield/arrow-datafusion.git?rev=d7b3c730049f2561755f9d855f638cb580c38eff#d7b3c730049f2561755f9d855f638cb580c38eff" +version = "20.0.0" +source = "git+https://github.com/apache/arrow-datafusion.git?rev=146a949218ec970784974137277cde3b4e547d0a#146a949218ec970784974137277cde3b4e547d0a" dependencies = [ "arrow", "async-trait", @@ -2214,8 +2212,8 @@ dependencies = [ [[package]] name = "datafusion-physical-expr" -version = "19.0.0" -source = "git+https://github.com/MichaelScofield/arrow-datafusion.git?rev=d7b3c730049f2561755f9d855f638cb580c38eff#d7b3c730049f2561755f9d855f638cb580c38eff" +version = "20.0.0" +source = "git+https://github.com/apache/arrow-datafusion.git?rev=146a949218ec970784974137277cde3b4e547d0a#146a949218ec970784974137277cde3b4e547d0a" dependencies = [ "ahash 0.8.3", "arrow", @@ -2233,7 +2231,6 @@ dependencies = [ "itertools", "lazy_static", "md-5", - "num-traits", "paste", "petgraph", "rand", @@ -2245,8 +2242,8 @@ dependencies = [ [[package]] name = "datafusion-row" -version = "19.0.0" -source = "git+https://github.com/MichaelScofield/arrow-datafusion.git?rev=d7b3c730049f2561755f9d855f638cb580c38eff#d7b3c730049f2561755f9d855f638cb580c38eff" +version = "20.0.0" +source = "git+https://github.com/apache/arrow-datafusion.git?rev=146a949218ec970784974137277cde3b4e547d0a#146a949218ec970784974137277cde3b4e547d0a" dependencies = [ "arrow", "datafusion-common", @@ -2256,8 +2253,8 @@ dependencies = [ [[package]] name = "datafusion-sql" -version = "19.0.0" -source = "git+https://github.com/MichaelScofield/arrow-datafusion.git?rev=d7b3c730049f2561755f9d855f638cb580c38eff#d7b3c730049f2561755f9d855f638cb580c38eff" +version = "20.0.0" +source = "git+https://github.com/apache/arrow-datafusion.git?rev=146a949218ec970784974137277cde3b4e547d0a#146a949218ec970784974137277cde3b4e547d0a" dependencies = [ "arrow-schema", "datafusion-common", diff --git a/Cargo.toml b/Cargo.toml index fdb5366910cd..ab0d3136f03e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -57,13 +57,12 @@ arrow-schema = { version = "34.0", features = ["serde"] } async-stream = "0.3" async-trait = "0.1" chrono = { version = "0.4", features = ["serde"] } -# TODO(LFC): Use official DataFusion, when https://github.com/apache/arrow-datafusion/pull/5542 got merged -datafusion = { git = "https://github.com/MichaelScofield/arrow-datafusion.git", rev = "d7b3c730049f2561755f9d855f638cb580c38eff" } -datafusion-common = { git = "https://github.com/MichaelScofield/arrow-datafusion.git", rev = "d7b3c730049f2561755f9d855f638cb580c38eff" } -datafusion-expr = { git = "https://github.com/MichaelScofield/arrow-datafusion.git", rev = "d7b3c730049f2561755f9d855f638cb580c38eff" } -datafusion-optimizer = { git = "https://github.com/MichaelScofield/arrow-datafusion.git", rev = "d7b3c730049f2561755f9d855f638cb580c38eff" } -datafusion-physical-expr = { git = "https://github.com/MichaelScofield/arrow-datafusion.git", rev = "d7b3c730049f2561755f9d855f638cb580c38eff" } -datafusion-sql = { git = "https://github.com/MichaelScofield/arrow-datafusion.git", rev = "d7b3c730049f2561755f9d855f638cb580c38eff" } +datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev = "146a949218ec970784974137277cde3b4e547d0a" } +datafusion-common = { git = "https://github.com/apache/arrow-datafusion.git", rev = "146a949218ec970784974137277cde3b4e547d0a" } +datafusion-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "146a949218ec970784974137277cde3b4e547d0a" } +datafusion-optimizer = { git = "https://github.com/apache/arrow-datafusion.git", rev = "146a949218ec970784974137277cde3b4e547d0a" } +datafusion-physical-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "146a949218ec970784974137277cde3b4e547d0a" } +datafusion-sql = { git = "https://github.com/apache/arrow-datafusion.git", rev = "146a949218ec970784974137277cde3b4e547d0a" } futures = "0.3" futures-util = "0.3" parquet = "34.0" diff --git a/src/promql/src/planner.rs b/src/promql/src/planner.rs index a89c9abb4d1d..c64f0d53a3a8 100644 --- a/src/promql/src/planner.rs +++ b/src/promql/src/planner.rs @@ -957,7 +957,12 @@ impl PromPlanner { .tag_columns .iter() .chain(self.ctx.time_index_column.iter()) - .map(|col| Ok(DfExpr::Column(Column::from(col)))); + .map(|col| { + Ok(DfExpr::Column(Column::new( + self.ctx.table_name.clone(), + col, + ))) + }); // build computation exprs let result_value_columns = self @@ -1485,7 +1490,7 @@ mod test { .unwrap(); let expected = String::from( - "Projection: lhs.tag_0, lhs.timestamp, some_metric.field_0 + some_metric.field_0 AS some_metric.field_0 + some_metric.field_0 [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), some_metric.field_0 + some_metric.field_0:Float64;N]\ + "Projection: some_metric.tag_0, some_metric.timestamp, some_metric.field_0 + some_metric.field_0 AS some_metric.field_0 + some_metric.field_0 [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), some_metric.field_0 + some_metric.field_0:Float64;N]\ \n Inner Join: lhs.tag_0 = some_metric.tag_0, lhs.timestamp = some_metric.timestamp [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N, tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]\ \n SubqueryAlias: lhs [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]\ \n PromInstantManipulate: range=[0..100000000], lookback=[1000], interval=[5000], time index=[timestamp] [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]\ diff --git a/src/query/src/optimizer.rs b/src/query/src/optimizer.rs index fc986c7e20a2..06efe1f41038 100644 --- a/src/query/src/optimizer.rs +++ b/src/query/src/optimizer.rs @@ -34,6 +34,8 @@ use datatypes::arrow::datatypes::DataType; pub struct TypeConversionRule; impl OptimizerRule for TypeConversionRule { + // TODO(ruihang): fix this warning + #[allow(deprecated)] fn try_optimize( &self, plan: &LogicalPlan, diff --git a/tests/cases/standalone/common/optimizer/filter_push_down.result b/tests/cases/standalone/common/optimizer/filter_push_down.result index 369da7da84a2..118680dd3b02 100644 --- a/tests/cases/standalone/common/optimizer/filter_push_down.result +++ b/tests/cases/standalone/common/optimizer/filter_push_down.result @@ -121,7 +121,13 @@ SELECT * FROM integers WHERE i IN ((SELECT i FROM integers)) AND i<3 ORDER BY i; SELECT i1.i,i2.i FROM integers i1, integers i2 WHERE i IN ((SELECT i FROM integers)) AND i1.i=i2.i ORDER BY 1; -Error: 3000(PlanQuery), Error during planning: column reference i is ambiguous ++---+---+ +| i | i | ++---+---+ +| 1 | 1 | +| 2 | 2 | +| 3 | 3 | ++---+---+ SELECT * FROM integers i1 WHERE EXISTS(SELECT i FROM integers WHERE i=i1.i) ORDER BY i1.i;
fix
ambiguous column reference (#1177)
cc46194f294468b4c7f2f4b3ddfc92d75ff9a6d5
2023-01-19 10:43:33
zyy17
refactor: support TLS private key of RSA format and add the full test certificates generation (#885)
false
diff --git a/src/servers/src/tls.rs b/src/servers/src/tls.rs index 906d685027ff..b0651f56c1f6 100644 --- a/src/servers/src/tls.rs +++ b/src/servers/src/tls.rs @@ -16,7 +16,7 @@ use std::fs::File; use std::io::{BufReader, Error, ErrorKind}; use rustls::{Certificate, PrivateKey, ServerConfig}; -use rustls_pemfile::{certs, pkcs8_private_keys}; +use rustls_pemfile::{certs, pkcs8_private_keys, rsa_private_keys}; use serde::{Deserialize, Serialize}; use strum::EnumString; @@ -80,11 +80,21 @@ impl TlsOption { .map_err(|_| Error::new(ErrorKind::InvalidInput, "invalid cert")) .map(|mut certs| certs.drain(..).map(Certificate).collect())?; - // TODO(SSebo): support more private key types - let key = pkcs8_private_keys(&mut BufReader::new(File::open(&self.key_path)?)) - .map_err(|_| Error::new(ErrorKind::InvalidInput, "invalid key")) - .map(|mut keys| keys.drain(..).map(PrivateKey).next())? - .ok_or_else(|| Error::new(ErrorKind::InvalidInput, "invalid key"))?; + let key = { + let mut pkcs8 = pkcs8_private_keys(&mut BufReader::new(File::open(&self.key_path)?)) + .map_err(|_| Error::new(ErrorKind::InvalidInput, "invalid key"))?; + if !pkcs8.is_empty() { + PrivateKey(pkcs8.remove(0)) + } else { + let mut rsa = rsa_private_keys(&mut BufReader::new(File::open(&self.key_path)?)) + .map_err(|_| Error::new(ErrorKind::InvalidInput, "invalid key"))?; + if !rsa.is_empty() { + PrivateKey(rsa.remove(0)) + } else { + return Err(Error::new(ErrorKind::InvalidInput, "invalid key")); + } + } + }; // TODO(SSebo): with_client_cert_verifier if TlsMode is Required. let config = ServerConfig::builder() diff --git a/src/servers/tests/mysql/mysql_server_test.rs b/src/servers/tests/mysql/mysql_server_test.rs index 0ddb4261621e..9ec9f1caf760 100644 --- a/src/servers/tests/mysql/mysql_server_test.rs +++ b/src/servers/tests/mysql/mysql_server_test.rs @@ -183,49 +183,69 @@ async fn test_query_all_datatypes() -> Result<()> { #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn test_server_prefer_secure_client_plain() -> Result<()> { - let server_tls = TlsOption { - mode: servers::tls::TlsMode::Prefer, - cert_path: "tests/ssl/server.crt".to_owned(), - key_path: "tests/ssl/server.key".to_owned(), - }; - - let client_tls = false; - do_test_query_all_datatypes(server_tls, client_tls).await?; + do_test_query_all_datatypes_with_secure_server(servers::tls::TlsMode::Prefer, false, false) + .await?; Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] -async fn test_server_prefer_secure_client_secure() -> Result<()> { - let server_tls = TlsOption { - mode: servers::tls::TlsMode::Prefer, - cert_path: "tests/ssl/server.crt".to_owned(), - key_path: "tests/ssl/server.key".to_owned(), - }; - - let client_tls = true; - do_test_query_all_datatypes(server_tls, client_tls).await?; +async fn test_server_prefer_secure_client_plain_with_pkcs8_priv_key() -> Result<()> { + do_test_query_all_datatypes_with_secure_server(servers::tls::TlsMode::Prefer, false, true) + .await?; Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn test_server_require_secure_client_secure() -> Result<()> { + do_test_query_all_datatypes_with_secure_server(servers::tls::TlsMode::Require, true, false) + .await?; + Ok(()) +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn test_server_require_secure_client_secure_with_pkcs8_priv_key() -> Result<()> { + do_test_query_all_datatypes_with_secure_server(servers::tls::TlsMode::Require, true, true) + .await?; + Ok(()) +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn test_server_required_secure_client_plain() -> Result<()> { let server_tls = TlsOption { mode: servers::tls::TlsMode::Require, cert_path: "tests/ssl/server.crt".to_owned(), - key_path: "tests/ssl/server.key".to_owned(), + key_path: "tests/ssl/server-rsa.key".to_owned(), }; - let client_tls = true; - do_test_query_all_datatypes(server_tls, client_tls).await?; + let client_tls = false; + + #[allow(unused)] + let TestingData { + column_schemas, + mysql_columns_def, + columns, + mysql_text_output_rows, + } = all_datatype_testing_data(); + let schema = Arc::new(Schema::new(column_schemas.clone())); + let recordbatch = RecordBatch::new(schema, columns).unwrap(); + let table = MemTable::new("all_datatypes", recordbatch); + + let mysql_server = create_mysql_server(table, server_tls)?; + + let listening = "127.0.0.1:0".parse::<SocketAddr>().unwrap(); + let server_addr = mysql_server.start(listening).await.unwrap(); + + let r = create_connection(server_addr.port(), None, client_tls).await; + assert!(r.is_err()); Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] -async fn test_server_required_secure_client_plain() -> Result<()> { +async fn test_server_required_secure_client_plain_with_pkcs8_priv_key() -> Result<()> { let server_tls = TlsOption { mode: servers::tls::TlsMode::Require, cert_path: "tests/ssl/server.crt".to_owned(), - key_path: "tests/ssl/server.key".to_owned(), + key_path: "tests/ssl/server-pkcs8.key".to_owned(), }; let client_tls = false; @@ -393,3 +413,23 @@ async fn create_connection( mysql_async::Conn::new(opts).await } + +async fn do_test_query_all_datatypes_with_secure_server( + server_tls_mode: servers::tls::TlsMode, + client_tls: bool, + is_pkcs8_priv_key: bool, +) -> Result<()> { + let server_tls = TlsOption { + mode: server_tls_mode, + cert_path: "tests/ssl/server.crt".to_owned(), + key_path: { + if is_pkcs8_priv_key { + "tests/ssl/server-pkcs8.key".to_owned() + } else { + "tests/ssl/server-rsa.key".to_owned() + } + }, + }; + + do_test_query_all_datatypes(server_tls, client_tls).await +} diff --git a/src/servers/tests/postgres/mod.rs b/src/servers/tests/postgres/mod.rs index de05fe66bfd0..871b08906753 100644 --- a/src/servers/tests/postgres/mod.rs +++ b/src/servers/tests/postgres/mod.rs @@ -235,15 +235,28 @@ async fn test_query_pg_concurrently() -> Result<()> { #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn test_server_secure_prefer_client_plain() -> Result<()> { common_telemetry::init_default_ut_logging(); + do_simple_query_with_secure_server(servers::tls::TlsMode::Prefer, false, false).await?; + Ok(()) +} - let server_tls = TlsOption { - mode: servers::tls::TlsMode::Prefer, - cert_path: "tests/ssl/server.crt".to_owned(), - key_path: "tests/ssl/server.key".to_owned(), - }; +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn test_server_secure_prefer_client_plain_with_pkcs8_priv_key() -> Result<()> { + common_telemetry::init_default_ut_logging(); + do_simple_query_with_secure_server(servers::tls::TlsMode::Prefer, false, true).await?; + Ok(()) +} - let client_tls = false; - do_simple_query(server_tls, client_tls).await?; +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn test_server_secure_require_client_secure() -> Result<()> { + common_telemetry::init_default_ut_logging(); + do_simple_query_with_secure_server(servers::tls::TlsMode::Require, true, false).await?; + Ok(()) +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn test_server_secure_require_client_secure_with_pkcs8_priv_key() -> Result<()> { + common_telemetry::init_default_ut_logging(); + do_simple_query_with_secure_server(servers::tls::TlsMode::Require, true, true).await?; Ok(()) } @@ -254,7 +267,7 @@ async fn test_server_secure_require_client_plain() -> Result<()> { let server_tls = TlsOption { mode: servers::tls::TlsMode::Require, cert_path: "tests/ssl/server.crt".to_owned(), - key_path: "tests/ssl/server.key".to_owned(), + key_path: "tests/ssl/server-rsa.key".to_owned(), }; let server_port = start_test_server(server_tls).await?; let r = create_plain_connection(server_port, false).await; @@ -263,17 +276,17 @@ async fn test_server_secure_require_client_plain() -> Result<()> { } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] -async fn test_server_secure_require_client_secure() -> Result<()> { +async fn test_server_secure_require_client_plain_with_pkcs8_priv_key() -> Result<()> { common_telemetry::init_default_ut_logging(); let server_tls = TlsOption { mode: servers::tls::TlsMode::Require, cert_path: "tests/ssl/server.crt".to_owned(), - key_path: "tests/ssl/server.key".to_owned(), + key_path: "tests/ssl/server-pkcs8.key".to_owned(), }; - - let client_tls = true; - do_simple_query(server_tls, client_tls).await?; + let server_port = start_test_server(server_tls).await?; + let r = create_plain_connection(server_port, false).await; + assert!(r.is_err()); Ok(()) } @@ -434,3 +447,23 @@ impl ServerCertVerifier for AcceptAllVerifier { Ok(ServerCertVerified::assertion()) } } + +async fn do_simple_query_with_secure_server( + server_tls_mode: servers::tls::TlsMode, + client_tls: bool, + is_pkcs8_priv_key: bool, +) -> Result<()> { + let server_tls = TlsOption { + mode: server_tls_mode, + cert_path: "tests/ssl/server.crt".to_owned(), + key_path: { + if is_pkcs8_priv_key { + "tests/ssl/server-pkcs8.key".to_owned() + } else { + "tests/ssl/server-rsa.key".to_owned() + } + }, + }; + + do_simple_query(server_tls, client_tls).await +} diff --git a/src/servers/tests/ssl/cert.conf b/src/servers/tests/ssl/cert.conf new file mode 100644 index 000000000000..f2764fbe228a --- /dev/null +++ b/src/servers/tests/ssl/cert.conf @@ -0,0 +1,10 @@ +authorityKeyIdentifier=keyid,issuer +basicConstraints=CA:FALSE +keyUsage = digitalSignature, nonRepudiation, keyEncipherment, dataEncipherment +subjectAltName = @alt_names + +[alt_names] +DNS.1 = *.greptime.com +DNS.2 = *.greptime.cloud +DNS.3 = localhost +IP.1 = 127.0.0.1 diff --git a/src/servers/tests/ssl/csr.conf b/src/servers/tests/ssl/csr.conf new file mode 100644 index 000000000000..d911e0270ee4 --- /dev/null +++ b/src/servers/tests/ssl/csr.conf @@ -0,0 +1,23 @@ +[ req ] +default_bits = 2048 +prompt = no +default_md = sha256 +req_extensions = req_ext +distinguished_name = dn + +[ dn ] +C = CN +ST = Hangzhou +L = Hangzhou +O = Greptime +OU = Greptime Developer +CN = greptime.com + +[ req_ext ] +subjectAltName = @alt_names + +[ alt_names ] +DNS.1 = *.greptime.com +DNS.2 = *.greptime.cloud +DNS.3 = localhost +IP.1 = 127.0.0.1 diff --git a/src/servers/tests/ssl/gen-certs.sh b/src/servers/tests/ssl/gen-certs.sh new file mode 100755 index 000000000000..3e1479b48274 --- /dev/null +++ b/src/servers/tests/ssl/gen-certs.sh @@ -0,0 +1,26 @@ +#!/usr/bin/env bash + +# Create the self-signed CA certificate. +openssl req -x509 \ + -sha256 -days 356 \ + -nodes \ + -newkey rsa:2048 \ + -subj "/CN=greptime-ca" \ + -keyout root-ca.key -out root-ca.crt + +# Create the server private key. +openssl genrsa -out server-rsa.key 2048 + +# Create the server certificate signing request. +openssl req -new -key server-rsa.key -out server.csr -config csr.conf + +# Create the server certificate. +openssl x509 -req \ + -in server.csr \ + -CA root-ca.crt -CAkey root-ca.key \ + -CAcreateserial -out server.crt \ + -days 365 \ + -sha256 -extfile cert.conf + +# Create private key of pkcs8 format from rsa key. +openssl pkcs8 -topk8 -inform PEM -in ./server-rsa.key -outform pem -nocrypt -out server-pkcs8.key diff --git a/src/servers/tests/ssl/root-ca.crt b/src/servers/tests/ssl/root-ca.crt new file mode 100644 index 000000000000..1d95f3a18e9b --- /dev/null +++ b/src/servers/tests/ssl/root-ca.crt @@ -0,0 +1,17 @@ +-----BEGIN CERTIFICATE----- +MIICqDCCAZACCQC7+cxd19y8qjANBgkqhkiG9w0BAQsFADAWMRQwEgYDVQQDDAtn +cmVwdGltZS1jYTAeFw0yMzAxMTYxMzQ5MzVaFw0yNDAxMDcxMzQ5MzVaMBYxFDAS +BgNVBAMMC2dyZXB0aW1lLWNhMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC +AQEAwzdEpod7Br06SU41onxvspu1WdYIxx0Zybfv4YeaTbtmIAmSaZON237La1P2 +V72S5lcbH+ImuyJwQkGVy1KZBw4waDbc4pfICX2Sm/UoWCwzegITcBzwYW2Exz4C +skPH09ZU8uHOF4VubJzZwtC3Tx27VUwj+F88/xOD4Ws4btXAPZ+/1Y0CZ8nv5Yjb +t2r+A2B+6YSrifojdKFttTqM8Y8WXRHqhb+YeO9MdxSiqPAWInmwy1sOOXNATVwC +k/BFEfpsjqajCy/NNS9NWUcdvDNAz/zRywJDHzwMk+b5KXzvUkNZuf/ZTXl5jL+d +zzgRmlYKwJylNILH2NsHyERcVwIDAQABMA0GCSqGSIb3DQEBCwUAA4IBAQCi+0Uf +Qd+h2kKo6nm38/RAk6+5sINUzYStoq1C/pNjrYYYz/zVMn4OjBhk5/VtKArSHtEq +YrZL8X6bXqy9e7gNlrwZ4eVxmiCsif5gQt2/jdFrT7hrTRYdax7tEj6yf9XBgjHv +/XZ0TLflbhOhNhy9KA0OyRxmNh9SAcT46psNN+t9S18tLORAHuhE2R95C13P7GHa +HauFFRoG16Wgp1kXXLcrU+mPeJ/+ybWm4OSkyn0ye0wO9XUPfLOLZePTCTeu7xFG +CwXAD1oGR6ZaglZm+guuTR38qG34pPXGcSzLCsBUuTeiMu5amAMOwMIjAbnnH1qe +AtvukomW0uRXHUMw +-----END CERTIFICATE----- diff --git a/src/servers/tests/ssl/root-ca.key b/src/servers/tests/ssl/root-ca.key new file mode 100644 index 000000000000..7458af34b68a --- /dev/null +++ b/src/servers/tests/ssl/root-ca.key @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDDN0Smh3sGvTpJ +TjWifG+ym7VZ1gjHHRnJt+/hh5pNu2YgCZJpk43bfstrU/ZXvZLmVxsf4ia7InBC +QZXLUpkHDjBoNtzil8gJfZKb9ShYLDN6AhNwHPBhbYTHPgKyQ8fT1lTy4c4XhW5s +nNnC0LdPHbtVTCP4Xzz/E4Phazhu1cA9n7/VjQJnye/liNu3av4DYH7phKuJ+iN0 +oW21OozxjxZdEeqFv5h470x3FKKo8BYiebDLWw45c0BNXAKT8EUR+myOpqMLL801 +L01ZRx28M0DP/NHLAkMfPAyT5vkpfO9SQ1m5/9lNeXmMv53POBGaVgrAnKU0gsfY +2wfIRFxXAgMBAAECggEAU1LSzZXEUEMSjtmAESO19XF6vaaaxopISI5nKEdd+FHF +rGUJhmDByu9a2ivTWO4EtqZ1YG2CBJwVeGJQEqHlyVooFUNdkqYgbtSXcFP67W+o +ZSpfq5nejGdXpkd0lSxTLbstNSJmeims0VU9qWa252EUZbsDG29jNKjawKuoQb3h +J/e2RHoAoYcV1G+C/xcryBsKCUppLf0OwDjvsL3XNJq+EI6hViwho0VOIjggwfRn +4DRPnN+lQA0tVVdhyV4+aUv32nPt9/Ss5WpqFRR0+pL1nnd022MkXZXm5796B/8W +YKIxRvRWw1fSufsjc9Q8Hzx42k+tBh5UwF4+XGUEwQKBgQDz9Jb23zPkRHKm196K +U5MQ6Td77TnL2bAuOsjWl8DuBlPoUMi1sH9e30J4q6RWWdjBR0VJKMaYWtvaDRGE +CjchQ82HDtfD9T0ee3nDBjP7kzKgGpJ/giQ5/Jg/ZUyQOB/YGw5w7cc+j+gJc3iK +/tznXXD85pTqq6vn/wJBzKd2oQKBgQDM2qXfGTareMZGEK6m4SfpvbF4mZQOM/YM +bEP6F/FOlNJLpExWVHkoy0vJ0IZMhyAZr2AmyzFWYZ8L9LW5LyrCkFWASXVPKIUe +tF7xS3JKxml1YXUES6GSofb6BPNLB+KEz0G0SLcHQ5kpSOnSso+kJKMpQ1DBiZYn +qGZ4qeKH9wKBgF47lXDI6P98nRjre6/M9prqqx74lIG0lcRVuqyBs+l9kj3DrrPX ++GtKLB/2lSUx0XNfN1k6IfRJ7HB+6cwqMf9sdGB+EERGX5R9t5vosn2z7zM+8GXG +fH3Vn22lkHyI4WwVj295uaPl7IhyDRcLuYK5amKWIuG+7ElSDKokBm/hAoGBAJXb +JRgtU6bgdPrwXTNK5m3BDMCSaJJzRH0V/ixHs4iuqaAYEpfct7016r05w+TbvInN +l2MJpY/xXe3bF8zeSkOGXmW4Vw6PL8KkZAfUD0nQF3l8z6NSyGGCBjAjyu6KWBSb +oQ8HWoz/0F05L4OoiBeljY4z5jGOOr/MGxoN/N9FAoGAIApQmghUZ9+EtqfQcuCe +KZ8t5ckQYHMvZbgn2sZkZfHtThbkYIRi/E5+2yb9CkY3sBL6OpvYzx3qKLJ7iso/ +RSCEvQEj+bdM9QDBCzznC7zdhRGFEYQ3MLjAXSag41HnsxdF5IXKrHlYvb0Rp/lZ +l/TLwp65NEuQ4KMFQOVL5Eo= +-----END PRIVATE KEY----- diff --git a/src/servers/tests/ssl/root-ca.srl b/src/servers/tests/ssl/root-ca.srl new file mode 100644 index 000000000000..9d1029aa35af --- /dev/null +++ b/src/servers/tests/ssl/root-ca.srl @@ -0,0 +1 @@ +EE4175C4833353A6 diff --git a/src/servers/tests/ssl/server-pkcs8.key b/src/servers/tests/ssl/server-pkcs8.key new file mode 100644 index 000000000000..9764e9c64fa6 --- /dev/null +++ b/src/servers/tests/ssl/server-pkcs8.key @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQDNuWUJloRnDJc4 +UvdOqF+YtFdIXn2MPvhzyBdEEqr+vY9nGNnBjBoOhSu6/2yojJpQhFlw8r68p02Q +SKO6ENHrxeZdgs0hQKAFUZBULgF3O3XHWkQ+6P9ajZw8impeYl4cOjFCzYGBXoP1 +r1wnpWRDtvmq8U2lISky9HflcTJz0wqnW/qqzhLLV/i7tik0ShoWeCdr9CpKK95C +vyG1NKsOogM3yc5iIkc/zZX82sZ1o8Fsv4oJIpbrePIowZp93T1Vo5iOSSTEzuBY +DVZhaVkSVLWZtemPOqJs2htKVyIEyTpPsA7ulKhN8mEmbiJyP9Ri+pBkZvjRR3vy +0nH/B5c9AgMBAAECggEBAJ6ysdqfnivQbqcoeVbYVEZ8eAh/u+IAgbDvXeNJc1dn +68PgS7se1Mr2uDFc8Plk3XXXYxfaaoElnpP7NTJH32g+FeN1D8DjFY6EyQ3nH4JX +ABh07ciJ/NJiA3BAZqXAxFCKI44g8hJWUv2n9TMwRxRlhlv0Ia2M0zdXl1YL6Jun +guUi4B9vTSbD1xDxBHRuB8VRNidIocBhT2rLmpiMougRoc2hBoVM7wSNDracpD9a +DL3Rm7Ujv0CwbJbTUPxQuaaRNnkIS6TDqsZvj76n2E35aEOIk+116Fl0nRYZ7LHk +PywnbzLhzVMOQsneiDkEWW7tz735nPNdSIPe96mEeYECgYEA9X9292NW6KR6YpKS ++4IG/TqedKjmwdFPOaEyPj3NoXurVZlM4U+Urgc6M7Lw2m8qM7lcw11Xzhbijz29 +ntjQeMNAOZH0W9/jGjsgkpelO6jF34QRNw2/Cfxl3+nQL14kZjID/7Gw9kyCZa2S +ChJWluudDHZPpS9PybgtvaoRztUCgYEA1oZaldnpDeLN9ftm17UgMUdl8enta6rQ +nN95YEu3gyjtWf2ry3pxTQ44/ZqcrRLRj1y+iu0D5qSqkuz14vntzwGxoVQz3gjq +zdHEkXv9ZpA3M+uMt+dLUbZ9ebpNIGhLf+oxCQxk+v/cEpo2cO5HaZCGiblXQZSr +S7vuov3IaskCgYEAzYKMxn+ka0/1G7tzy5OH4khGCYay1aEwXx/v/WajUwFB5oBU +eXCzGBP4xvqO4WyZuX78hpcHQACsXBjlOapqqg1ZIFhsZNTBOl4w4EaODak1K+1U +s++P8v4VEiKbImv+sIZCDrRjXWui5Rct37yGPAS1DY+lELTQaB8EO3e5PJkCgYBh +uOY+6PsvJigoa5NXo9y8VgfsgWFz8GYDcBF8ekFocBZfLh06HdbLATWY4PuKI85u +fhMWeg2S3WQOdf80nCFmcSEXmqHd/TXo+CuREmhGdl+POTfq9mPrHzRdZS6JGrl5 +1ZbsxkahyDfaCYHPQ9woDHwc9N74st6tKzjz6qOHcQKBgQCP6HeweuBLkCsZfaAv +MUDw1r6MFAosjvAUO+kKtxBSHkxehUwVuhDN/t/nmO2ddetRoQbIDvpTg/3R2obO +vJOiC+FxV8LX+WA8IaTHpv/5Qjl/FDyjGFHWkN+gY+xQ+BTJsR5MPCq9m6ai+ihF +1ynlhxQGWqh6cjuGhdSXYZ6WJg== +-----END PRIVATE KEY----- diff --git a/src/servers/tests/ssl/server-rsa.key b/src/servers/tests/ssl/server-rsa.key new file mode 100644 index 000000000000..5530ed0a11e0 --- /dev/null +++ b/src/servers/tests/ssl/server-rsa.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpQIBAAKCAQEAzbllCZaEZwyXOFL3TqhfmLRXSF59jD74c8gXRBKq/r2PZxjZ +wYwaDoUruv9sqIyaUIRZcPK+vKdNkEijuhDR68XmXYLNIUCgBVGQVC4Bdzt1x1pE +Puj/Wo2cPIpqXmJeHDoxQs2BgV6D9a9cJ6VkQ7b5qvFNpSEpMvR35XEyc9MKp1v6 +qs4Sy1f4u7YpNEoaFngna/QqSiveQr8htTSrDqIDN8nOYiJHP82V/NrGdaPBbL+K +CSKW63jyKMGafd09VaOYjkkkxM7gWA1WYWlZElS1mbXpjzqibNobSlciBMk6T7AO +7pSoTfJhJm4icj/UYvqQZGb40Ud78tJx/weXPQIDAQABAoIBAQCesrHan54r0G6n +KHlW2FRGfHgIf7viAIGw713jSXNXZ+vD4Eu7HtTK9rgxXPD5ZN1112MX2mqBJZ6T ++zUyR99oPhXjdQ/A4xWOhMkN5x+CVwAYdO3IifzSYgNwQGalwMRQiiOOIPISVlL9 +p/UzMEcUZYZb9CGtjNM3V5dWC+ibp4LlIuAfb00mw9cQ8QR0bgfFUTYnSKHAYU9q +y5qYjKLoEaHNoQaFTO8EjQ62nKQ/Wgy90Zu1I79AsGyW01D8ULmmkTZ5CEukw6rG +b4++p9hN+WhDiJPtdehZdJ0WGeyx5D8sJ28y4c1TDkLJ3og5BFlu7c+9+ZzzXUiD +3vephHmBAoGBAPV/dvdjVuikemKSkvuCBv06nnSo5sHRTzmhMj49zaF7q1WZTOFP +lK4HOjOy8NpvKjO5XMNdV84W4o89vZ7Y0HjDQDmR9Fvf4xo7IJKXpTuoxd+EETcN +vwn8Zd/p0C9eJGYyA/+xsPZMgmWtkgoSVpbrnQx2T6UvT8m4Lb2qEc7VAoGBANaG +WpXZ6Q3izfX7Zte1IDFHZfHp7Wuq0JzfeWBLt4Mo7Vn9q8t6cU0OOP2anK0S0Y9c +vortA+akqpLs9eL57c8BsaFUM94I6s3RxJF7/WaQNzPrjLfnS1G2fXm6TSBoS3/q +MQkMZPr/3BKaNnDuR2mQhom5V0GUq0u77qL9yGrJAoGBAM2CjMZ/pGtP9Ru7c8uT +h+JIRgmGstWhMF8f7/1mo1MBQeaAVHlwsxgT+Mb6juFsmbl+/IaXB0AArFwY5Tmq +aqoNWSBYbGTUwTpeMOBGjg2pNSvtVLPvj/L+FRIimyJr/rCGQg60Y11rouUXLd+8 +hjwEtQ2PpRC00GgfBDt3uTyZAoGAYbjmPuj7LyYoKGuTV6PcvFYH7IFhc/BmA3AR +fHpBaHAWXy4dOh3WywE1mOD7iiPObn4TFnoNkt1kDnX/NJwhZnEhF5qh3f016Pgr +kRJoRnZfjzk36vZj6x80XWUuiRq5edWW7MZGocg32gmBz0PcKAx8HPTe+LLerSs4 +8+qjh3ECgYEAj+h3sHrgS5ArGX2gLzFA8Na+jBQKLI7wFDvpCrcQUh5MXoVMFboQ +zf7f55jtnXXrUaEGyA76U4P90dqGzryTogvhcVfC1/lgPCGkx6b/+UI5fxQ8oxhR +1pDfoGPsUPgUybEeTDwqvZumovooRdcp5YcUBlqoenI7hoXUl2GeliY= +-----END RSA PRIVATE KEY----- diff --git a/src/servers/tests/ssl/server.crt b/src/servers/tests/ssl/server.crt index 308430c8bc8e..3b4eca695648 100644 --- a/src/servers/tests/ssl/server.crt +++ b/src/servers/tests/ssl/server.crt @@ -1,77 +1,22 @@ -Certificate: - Data: - Version: 3 (0x2) - Serial Number: - 1e:a1:44:88:27:3d:5c:c8:ff:ef:06:2e:da:21:05:29:30:a5:ce:2c - Signature Algorithm: sha256WithRSAEncryption - Issuer: CN = localhost - Validity - Not Before: Oct 11 07:36:01 2022 GMT - Not After : Oct 8 07:36:01 2032 GMT - Subject: CN = localhost - Subject Public Key Info: - Public Key Algorithm: rsaEncryption - RSA Public-Key: (2048 bit) - Modulus: - 00:d5:b0:29:38:63:13:5e:1e:1d:ae:1f:47:88:b4: - 44:96:21:d8:d7:03:a3:d8:f9:03:2f:4e:79:66:e6: - db:19:55:1d:85:9b:f1:78:2d:87:f3:72:91:13:dc: - ff:00:cb:ab:fd:a1:c8:3a:56:26:e3:88:1d:ec:98: - 4a:af:eb:f9:60:80:27:e1:06:ba:c0:0d:c3:09:0e: - fe:d8:86:1e:25:b4:04:62:a5:75:46:8e:11:e8:61: - 59:aa:97:17:ea:c7:4c:c6:13:8c:6d:54:2a:b9:78: - 86:54:a9:6f:d6:31:96:c6:41:76:a3:c7:67:40:6f: - f2:1a:4c:0d:77:05:bb:3d:0b:16:f8:c7:de:6c:de: - 7b:2e:b6:29:85:4b:a8:36:d3:f2:84:75:e0:85:17: - ce:22:84:4b:94:02:17:8a:36:2b:13:ee:2f:aa:55: - 6b:ff:8b:df:d3:e0:23:8d:fd:c3:f8:e2:c8:a7:d5: - 76:a6:73:7d:a8:5f:6a:49:02:78:a2:c5:66:14:ee: - 86:50:3b:d1:67:7f:1b:0c:27:0d:84:ec:44:0d:39: - 08:ba:69:65:e0:35:a4:67:aa:19:e7:fe:0e:4b:9f: - 23:1e:4e:38:ed:d7:93:57:6e:94:31:05:d3:ae:f7: - 6c:01:3c:30:69:19:f4:7b:b5:48:95:71:c9:9c:30: - 43:9d - Exponent: 65537 (0x10001) - X509v3 extensions: - X509v3 Subject Key Identifier: - 8E:81:0B:60:B1:F9:7D:D8:64:91:BB:30:86:E5:3D:CD:B7:82:D8:31 - X509v3 Authority Key Identifier: - keyid:8E:81:0B:60:B1:F9:7D:D8:64:91:BB:30:86:E5:3D:CD:B7:82:D8:31 - - X509v3 Basic Constraints: critical - CA:TRUE - Signature Algorithm: sha256WithRSAEncryption - 6c:ae:ee:3e:e3:d4:5d:29:37:62:b0:32:ce:a4:36:c7:25:b4: - 6a:9f:ba:b4:f0:2f:0a:96:2f:dc:6d:df:7d:92:e7:f0:ee:f7: - de:44:9d:52:36:ff:0c:98:ef:8b:7f:27:df:6e:fe:64:11:7c: - 01:5d:7f:c8:73:a3:24:24:ba:81:fd:a8:ae:28:4f:93:bb:92: - ff:86:d6:48:a2:ca:a5:1f:ea:1c:0d:02:22:e8:71:23:27:22: - 4f:0f:37:58:9a:d9:fd:70:c5:4c:93:7d:47:1c:b6:ea:1b:4f: - 4e:7c:eb:9d:9a:d3:28:78:67:27:e9:b1:ea:f6:93:68:76:e5: - 2e:52:c6:29:91:ba:0a:96:2e:14:33:69:35:d7:b5:e0:c0:ef: - 05:77:09:9b:a1:cc:7b:b2:f0:6a:cb:5c:5f:a1:27:69:b0:2c: - 6e:93:eb:37:98:cd:97:8d:9e:78:a8:f5:99:12:66:86:48:cf: - b2:e0:68:6f:77:98:06:13:24:55:d1:c3:80:1d:59:53:1f:44: - 85:bc:5d:29:aa:2a:a1:06:17:6b:e7:2b:11:0b:fd:e3:f8:88: - 89:32:57:a3:70:f7:1b:6c:c1:66:c7:3c:a4:2d:e8:5f:00:1c: - 55:2f:72:ed:d4:3a:3f:d0:95:de:6c:a4:96:6e:b4:63:0e:80: - 08:b2:25:d5 -----BEGIN CERTIFICATE----- -MIIDCTCCAfGgAwIBAgIUHqFEiCc9XMj/7wYu2iEFKTClziwwDQYJKoZIhvcNAQEL -BQAwFDESMBAGA1UEAwwJbG9jYWxob3N0MB4XDTIyMTAxMTA3MzYwMVoXDTMyMTAw -ODA3MzYwMVowFDESMBAGA1UEAwwJbG9jYWxob3N0MIIBIjANBgkqhkiG9w0BAQEF -AAOCAQ8AMIIBCgKCAQEA1bApOGMTXh4drh9HiLREliHY1wOj2PkDL055ZubbGVUd -hZvxeC2H83KRE9z/AMur/aHIOlYm44gd7JhKr+v5YIAn4Qa6wA3DCQ7+2IYeJbQE -YqV1Ro4R6GFZqpcX6sdMxhOMbVQquXiGVKlv1jGWxkF2o8dnQG/yGkwNdwW7PQsW -+MfebN57LrYphUuoNtPyhHXghRfOIoRLlAIXijYrE+4vqlVr/4vf0+Ajjf3D+OLI -p9V2pnN9qF9qSQJ4osVmFO6GUDvRZ38bDCcNhOxEDTkIumll4DWkZ6oZ5/4OS58j -Hk447deTV26UMQXTrvdsATwwaRn0e7VIlXHJnDBDnQIDAQABo1MwUTAdBgNVHQ4E -FgQUjoELYLH5fdhkkbswhuU9zbeC2DEwHwYDVR0jBBgwFoAUjoELYLH5fdhkkbsw -huU9zbeC2DEwDwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEAbK7u -PuPUXSk3YrAyzqQ2xyW0ap+6tPAvCpYv3G3ffZLn8O733kSdUjb/DJjvi38n327+ -ZBF8AV1/yHOjJCS6gf2orihPk7uS/4bWSKLKpR/qHA0CIuhxIyciTw83WJrZ/XDF -TJN9Rxy26htPTnzrnZrTKHhnJ+mx6vaTaHblLlLGKZG6CpYuFDNpNde14MDvBXcJ -m6HMe7LwastcX6EnabAsbpPrN5jNl42eeKj1mRJmhkjPsuBob3eYBhMkVdHDgB1Z -Ux9EhbxdKaoqoQYXa+crEQv94/iIiTJXo3D3G2zBZsc8pC3oXwAcVS9y7dQ6P9CV -3myklm60Yw6ACLIl1Q== +MIIDnzCCAoegAwIBAgIJAO5BdcSDM1OmMA0GCSqGSIb3DQEBCwUAMBYxFDASBgNV +BAMMC2dyZXB0aW1lLWNhMB4XDTIzMDExNjEzNDkzNVoXDTI0MDExNjEzNDkzNVow +ejELMAkGA1UEBhMCQ04xETAPBgNVBAgMCEhhbmd6aG91MREwDwYDVQQHDAhIYW5n +emhvdTERMA8GA1UECgwIR3JlcHRpbWUxGzAZBgNVBAsMEkdyZXB0aW1lIERldmVs +b3BlcjEVMBMGA1UEAwwMZ3JlcHRpbWUuY29tMIIBIjANBgkqhkiG9w0BAQEFAAOC +AQ8AMIIBCgKCAQEAzbllCZaEZwyXOFL3TqhfmLRXSF59jD74c8gXRBKq/r2PZxjZ +wYwaDoUruv9sqIyaUIRZcPK+vKdNkEijuhDR68XmXYLNIUCgBVGQVC4Bdzt1x1pE +Puj/Wo2cPIpqXmJeHDoxQs2BgV6D9a9cJ6VkQ7b5qvFNpSEpMvR35XEyc9MKp1v6 +qs4Sy1f4u7YpNEoaFngna/QqSiveQr8htTSrDqIDN8nOYiJHP82V/NrGdaPBbL+K +CSKW63jyKMGafd09VaOYjkkkxM7gWA1WYWlZElS1mbXpjzqibNobSlciBMk6T7AO +7pSoTfJhJm4icj/UYvqQZGb40Ud78tJx/weXPQIDAQABo4GLMIGIMDAGA1UdIwQp +MCehGqQYMBYxFDASBgNVBAMMC2dyZXB0aW1lLWNhggkAu/nMXdfcvKowCQYDVR0T +BAIwADALBgNVHQ8EBAMCBPAwPAYDVR0RBDUwM4IOKi5ncmVwdGltZS5jb22CECou +Z3JlcHRpbWUuY2xvdWSCCWxvY2FsaG9zdIcEfwAAATANBgkqhkiG9w0BAQsFAAOC +AQEAXiy7KEFEuxsWzEkY59C2TMPjtUL3vrceExyvsguZDZ2DeGSraq5CWH9f6vD8 +fjJhehSYFC7Y0YZlJOo9b0kh7yAvN5T6US0+wzFOr8RMVmCWJhVAiC3weT5YyDMK +V3dfJZtCej/E0Vd5tAR+lArV/FqTsoMR4k9g+8IXwlJVzQ4eX1GAIOEocAHmw/Et +HIQlUAZZTXBWMFDWl9Z+Ro0jPjNS5cvqZxBV27NoIM/3Y5PoqTQ7NSw1CTqLjZoR +J30GrrF3oXtIqgNAPUefCdwa+QJ9Td4n6NvFsNVl6tIodCN10wjqwWpAnadePYmx +tPqVZk/RXHRBC5Z3jsH5jmnLBw== -----END CERTIFICATE----- diff --git a/src/servers/tests/ssl/server.csr b/src/servers/tests/ssl/server.csr new file mode 100644 index 000000000000..115c1abc4582 --- /dev/null +++ b/src/servers/tests/ssl/server.csr @@ -0,0 +1,19 @@ +-----BEGIN CERTIFICATE REQUEST----- +MIIDDjCCAfYCAQAwejELMAkGA1UEBhMCQ04xETAPBgNVBAgMCEhhbmd6aG91MREw +DwYDVQQHDAhIYW5nemhvdTERMA8GA1UECgwIR3JlcHRpbWUxGzAZBgNVBAsMEkdy +ZXB0aW1lIERldmVsb3BlcjEVMBMGA1UEAwwMZ3JlcHRpbWUuY29tMIIBIjANBgkq +hkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAzbllCZaEZwyXOFL3TqhfmLRXSF59jD74 +c8gXRBKq/r2PZxjZwYwaDoUruv9sqIyaUIRZcPK+vKdNkEijuhDR68XmXYLNIUCg +BVGQVC4Bdzt1x1pEPuj/Wo2cPIpqXmJeHDoxQs2BgV6D9a9cJ6VkQ7b5qvFNpSEp +MvR35XEyc9MKp1v6qs4Sy1f4u7YpNEoaFngna/QqSiveQr8htTSrDqIDN8nOYiJH +P82V/NrGdaPBbL+KCSKW63jyKMGafd09VaOYjkkkxM7gWA1WYWlZElS1mbXpjzqi +bNobSlciBMk6T7AO7pSoTfJhJm4icj/UYvqQZGb40Ud78tJx/weXPQIDAQABoE8w +TQYJKoZIhvcNAQkOMUAwPjA8BgNVHREENTAzgg4qLmdyZXB0aW1lLmNvbYIQKi5n +cmVwdGltZS5jbG91ZIIJbG9jYWxob3N0hwR/AAABMA0GCSqGSIb3DQEBCwUAA4IB +AQAEL6seksdR8Y2BBuyglesooQmZ7gslbMFz6SAf116c6pg7Jmfm4s+X9bNkIR1F +hJenBoFFVLYTcIOQsmyS8xbEd9Mu39VkCT6vZwE1hUq3SC2z6r5/CflMY12EjWmn +DpNEY7GtyB6jFXmeIMsI+BLt57QuDnA8uP9/dGMO0bb43RVucLwqoaBZPfeO6KYz +kXcQUCzdXzYmRC3FDmfST+LbAC6ZAh7orFQR7RxjgQcVk0cLGqrgNkq/E8BLDumH +c1TeHjMVy2EmM+rMXa7bF12SoZjaBcH/o0O8HjelY1SSqJ4hvzMRH6EiVEdxYU3I +zs5tbOAAnMKrJ6PKkzNDA0vq +-----END CERTIFICATE REQUEST----- diff --git a/src/servers/tests/ssl/server.key b/src/servers/tests/ssl/server.key deleted file mode 100644 index 61b3c4eb9084..000000000000 --- a/src/servers/tests/ssl/server.key +++ /dev/null @@ -1,28 +0,0 @@ ------BEGIN PRIVATE KEY----- -MIIEwAIBADANBgkqhkiG9w0BAQEFAASCBKowggSmAgEAAoIBAQDVsCk4YxNeHh2u -H0eItESWIdjXA6PY+QMvTnlm5tsZVR2Fm/F4LYfzcpET3P8Ay6v9ocg6VibjiB3s -mEqv6/lggCfhBrrADcMJDv7Yhh4ltARipXVGjhHoYVmqlxfqx0zGE4xtVCq5eIZU -qW/WMZbGQXajx2dAb/IaTA13Bbs9Cxb4x95s3nsutimFS6g20/KEdeCFF84ihEuU -AheKNisT7i+qVWv/i9/T4CON/cP44sin1Xamc32oX2pJAniixWYU7oZQO9FnfxsM -Jw2E7EQNOQi6aWXgNaRnqhnn/g5LnyMeTjjt15NXbpQxBdOu92wBPDBpGfR7tUiV -ccmcMEOdAgMBAAECggEBAMMCIJv0zpf1o+Bja0S2PmFEQj72c3Buzxk85E2kIA7e -PjLQPW0PICJrSzp1U8HGHQ85tSCHvrWmYqin0oD5OHt4eOxC1+qspHB/3tJ6ksiV -n+rmVEAvJuiK7ulfOdRoTQf2jxC23saj1vMsLYOrfY0v8LVGJFQJ1UdqYF9eO6FX -8i6eQekV0n8u+DMUysYXfePDXEwpunKrlZwZtThgBY31gAIOdNo/FOAFe1yBJdPl -rUFZes1IrE0c4CNxodajuRNCjtNWoX8TK1cXQVUpPprdFLBcYG2P9mPZ7SkZWJc7 -rkyPX6Wkb7q3laUCBxuKL1iOJIwaVBYaKfv4HS7VuYECgYEA9H7VB8+whWx2cTFb -9oYbcaU3HtbKRh6KQP8eB4IWeKV/c/ceWVAxtU9Hx2QU1zZ2fLl+KkaOGeECNNqD -BP1O5qk2qmkjJcP4kzh1K+p7zkqAkrhHqB36y/gwptB8v7JbCchQq9cnBeYsXNIa -j13KvteprRSnanKu18d2aC43cNMCgYEA3746ITtqy1g6AQ0Q/MXN/axsXixKfVjf -kgN/lpjy6oeoEIWKqiNrOQpwy4NeBo6ZN+cwjUUr9SY/BKsZqMGErO8Xuu+QtJYD -ioW/My9rTrTElbpsLpSvZDLc9IRepV4k+5PpXTIRBqp7Q3BZnTjbRMc8x/owG23G -eXnfVKlWM88CgYEA5HBQuMCrzK3/qFkW9Kpun+tfKfhD++nzATGcrCU2u7jd8cr1 -1zsfhqkxhrIS6tYfNP/XSsarZLCgcCOuAQ5wFwIJaoVbaqDE80Dv8X1f+eoQYYW+ -peyE9OjLBEGOHUoW13gLL9ORyWg7EOraGBPpKBC2n1nJ5qKKjF/4WPS9pjMCgYEA -3UuUyxGtivn0RN3bk2dBWkmT1YERG/EvD4gORbF5caZDADRU9fqaLoy5C1EfSnT3 -7mbnipKD67CsW72vX04oH7NLUUVpZnOJhRTMC6A3Dl2UolMEdP3yi7QS/nV99ymq -gnnFMrw2QtWTnRweRnbZyKkW4OP/eOGWkMeNsHrcG9kCgYEAz/09cKumk349AIXV -g6Jw64gCTjWh157wnD3ZSPPEcr/09/fZwf1W0gkY/tbCVrVPJHWb3K5t2nRXjLlz -HMnQXmcMxMlY3Ufvm2H3ov1ODPKwpcBWUZqnpFTZX7rC58lO/wvgiKpgtHA3pDdw -oYDaaozVP4EnnByxhmHaM7ce07U= ------END PRIVATE KEY-----
refactor
support TLS private key of RSA format and add the full test certificates generation (#885)
49403012b52ebd357fb159d55f02ea05a5024633
2022-11-10 11:51:24
xiaomin tang
docs: Add Apache 2.0 license (#434)
false
diff --git a/LICENSE b/LICENSE new file mode 100644 index 000000000000..11d287daa6be --- /dev/null +++ b/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2022 Greptime Team + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License.
docs
Add Apache 2.0 license (#434)
a8cbec824c935dcaefeb7f57c4c063ac92233e0c
2024-02-28 11:48:09
Weny Xu
refactor: refactor TableRouteManager (#3392)
false
diff --git a/codecov.yml b/codecov.yml index 506f3445de42..526ce5640717 100644 --- a/codecov.yml +++ b/codecov.yml @@ -8,5 +8,6 @@ coverage: ignore: - "**/error*.rs" # ignore all error.rs files - "tests/runner/*.rs" # ignore integration test runner + - "tests-integration/**/*.rs" # ignore integration tests comment: # this is a top-level key layout: "diff" diff --git a/src/common/meta/src/ddl/create_table.rs b/src/common/meta/src/ddl/create_table.rs index 0d149c3af7bf..84f5ada3dd8f 100644 --- a/src/common/meta/src/ddl/create_table.rs +++ b/src/common/meta/src/ddl/create_table.rs @@ -206,12 +206,12 @@ impl CreateTableProcedure { .context .table_metadata_manager .table_route_manager() - .get(physical_table_id) + .try_get_physical_table_route(physical_table_id) .await? .context(TableRouteNotFoundSnafu { table_id: physical_table_id, })?; - let region_routes = physical_table_route.region_routes()?; + let region_routes = &physical_table_route.region_routes; let request_builder = self.new_region_request_builder(Some(physical_table_id))?; diff --git a/src/common/meta/src/key.rs b/src/common/meta/src/key.rs index c2007377e54e..1d4ee73f9e4c 100644 --- a/src/common/meta/src/key.rs +++ b/src/common/meta/src/key.rs @@ -363,8 +363,10 @@ impl TableMetadataManager { Option<DeserializedValueWithBytes<TableInfoValue>>, Option<DeserializedValueWithBytes<TableRouteValue>>, )> { - let (get_table_route_txn, table_route_decoder) = - self.table_route_manager.build_get_txn(table_id); + let (get_table_route_txn, table_route_decoder) = self + .table_route_manager + .table_route_storage() + .build_get_txn(table_id); let (get_table_info_txn, table_info_decoder) = self.table_info_manager.build_get_txn(table_id); @@ -414,6 +416,7 @@ impl TableMetadataManager { let (create_table_route_txn, on_create_table_route_failure) = self .table_route_manager() + .table_route_storage() .build_create_txn(table_id, &table_route_value)?; let mut txn = Txn::merge_all(vec![ @@ -506,6 +509,7 @@ impl TableMetadataManager { let (create_table_route_txn, on_create_table_route_failure) = self .table_route_manager() + .table_route_storage() .build_create_txn(table_id, &table_route_value)?; txns.push(create_table_route_txn); @@ -579,6 +583,7 @@ impl TableMetadataManager { // Deletes table route. let delete_table_route_txn = self .table_route_manager() + .table_route_storage() .build_delete_txn(table_id, table_route_value)?; let txn = Txn::merge_all(vec![ @@ -713,6 +718,7 @@ impl TableMetadataManager { let (update_table_route_txn, on_update_table_route_failure) = self .table_route_manager() + .table_route_storage() .build_update_txn(table_id, current_table_route_value, &new_table_route_value)?; let txn = Txn::merge_all(vec![update_datanode_table_txn, update_table_route_txn]); @@ -765,6 +771,7 @@ impl TableMetadataManager { let (update_table_route_txn, on_update_table_route_failure) = self .table_route_manager() + .table_route_storage() .build_update_txn(table_id, current_table_route_value, &new_table_route_value)?; let r = self.kv_backend.txn(update_table_route_txn).await?; @@ -1096,6 +1103,7 @@ mod tests { assert!(table_metadata_manager .table_route_manager() + .table_route_storage() .get(table_id) .await .unwrap() @@ -1120,7 +1128,8 @@ mod tests { let removed_table_route = table_metadata_manager .table_route_manager() - .get_removed(table_id) + .table_route_storage() + .get_raw_removed(table_id) .await .unwrap() .unwrap() @@ -1316,6 +1325,7 @@ mod tests { let updated_route_value = table_metadata_manager .table_route_manager() + .table_route_storage() .get(table_id) .await .unwrap() diff --git a/src/common/meta/src/key/table_route.rs b/src/common/meta/src/key/table_route.rs index a932f1474000..4cdf81140d50 100644 --- a/src/common/meta/src/key/table_route.rs +++ b/src/common/meta/src/key/table_route.rs @@ -22,7 +22,7 @@ use table::metadata::TableId; use super::{txn_helper, DeserializedValueWithBytes, TableMetaValue}; use crate::error::{ - MetadataCorruptionSnafu, Result, SerdeJsonSnafu, TableRouteNotFoundSnafu, + self, MetadataCorruptionSnafu, Result, SerdeJsonSnafu, TableRouteNotFoundSnafu, UnexpectedLogicalRouteTableSnafu, }; use crate::key::{to_removed_key, RegionDistribution, TableMetaKey, TABLE_ROUTE_PREFIX}; @@ -77,7 +77,7 @@ impl TableRouteValue { err_msg: format!("{self:?} is a non-physical TableRouteValue."), } ); - let version = self.physical_table_route().version; + let version = self.as_physical_table_route_ref().version; Ok(Self::Physical(PhysicalTableRouteValue { region_routes, version: version + 1, @@ -95,7 +95,7 @@ impl TableRouteValue { err_msg: format!("{self:?} is a non-physical TableRouteValue."), } ); - Ok(self.physical_table_route().version) + Ok(self.as_physical_table_route_ref().version) } /// Returns the corresponding [RegionRoute], returns `None` if it's the specific region is not found. @@ -109,7 +109,7 @@ impl TableRouteValue { } ); Ok(self - .physical_table_route() + .as_physical_table_route_ref() .region_routes .iter() .find(|route| route.region.id == region_id) @@ -129,10 +129,25 @@ impl TableRouteValue { err_msg: format!("{self:?} is a non-physical TableRouteValue."), } ); - Ok(&self.physical_table_route().region_routes) + Ok(&self.as_physical_table_route_ref().region_routes) } - fn physical_table_route(&self) -> &PhysicalTableRouteValue { + /// Returns the reference of [`PhysicalTableRouteValue`]. + /// + /// # Panic + /// If it is not the [`PhysicalTableRouteValue`]. + fn as_physical_table_route_ref(&self) -> &PhysicalTableRouteValue { + match self { + TableRouteValue::Physical(x) => x, + _ => unreachable!("Mistakenly been treated as a Physical TableRoute: {self:?}"), + } + } + + /// Converts to [`PhysicalTableRouteValue`]. + /// + /// # Panic + /// If it is not the [`PhysicalTableRouteValue`]. + fn into_physical_table_route(self) -> PhysicalTableRouteValue { match self { TableRouteValue::Physical(x) => x, _ => unreachable!("Mistakenly been treated as a Physical TableRoute: {self:?}"), @@ -213,111 +228,53 @@ impl Display for TableRouteKey { } pub struct TableRouteManager { - kv_backend: KvBackendRef, + storage: TableRouteStorage, } impl TableRouteManager { pub fn new(kv_backend: KvBackendRef) -> Self { - Self { kv_backend } - } - - pub(crate) fn build_get_txn( - &self, - table_id: TableId, - ) -> ( - Txn, - impl FnOnce(&Vec<TxnOpResponse>) -> Result<Option<DeserializedValueWithBytes<TableRouteValue>>>, - ) { - let key = TableRouteKey::new(table_id); - let raw_key = key.as_raw_key(); - let txn = Txn::new().and_then(vec![TxnOp::Get(raw_key.clone())]); - - (txn, txn_helper::build_txn_response_decoder_fn(raw_key)) - } - - /// Builds a create table route transaction. it expected the `__table_route/{table_id}` wasn't occupied. - pub fn build_create_txn( - &self, - table_id: TableId, - table_route_value: &TableRouteValue, - ) -> Result<( - Txn, - impl FnOnce(&Vec<TxnOpResponse>) -> Result<Option<DeserializedValueWithBytes<TableRouteValue>>>, - )> { - let key = TableRouteKey::new(table_id); - let raw_key = key.as_raw_key(); - - let txn = txn_helper::build_put_if_absent_txn( - raw_key.clone(), - table_route_value.try_as_raw_value()?, - ); - - Ok((txn, txn_helper::build_txn_response_decoder_fn(raw_key))) - } - - /// Builds a update table route transaction, it expected the remote value equals the `current_table_route_value`. - /// It retrieves the latest value if the comparing failed. - pub(crate) fn build_update_txn( - &self, - table_id: TableId, - current_table_route_value: &DeserializedValueWithBytes<TableRouteValue>, - new_table_route_value: &TableRouteValue, - ) -> Result<( - Txn, - impl FnOnce(&Vec<TxnOpResponse>) -> Result<Option<DeserializedValueWithBytes<TableRouteValue>>>, - )> { - let key = TableRouteKey::new(table_id); - let raw_key = key.as_raw_key(); - let raw_value = current_table_route_value.get_raw_bytes(); - let new_raw_value: Vec<u8> = new_table_route_value.try_as_raw_value()?; - - let txn = txn_helper::build_compare_and_put_txn(raw_key.clone(), raw_value, new_raw_value); - - Ok((txn, txn_helper::build_txn_response_decoder_fn(raw_key))) - } - - /// Builds a delete table route transaction, it expected the remote value equals the `table_route_value`. - pub(crate) fn build_delete_txn( - &self, - table_id: TableId, - table_route_value: &DeserializedValueWithBytes<TableRouteValue>, - ) -> Result<Txn> { - let key = TableRouteKey::new(table_id); - let raw_key = key.as_raw_key(); - let raw_value = table_route_value.get_raw_bytes(); - let removed_key = to_removed_key(&String::from_utf8_lossy(&raw_key)); - - let txn = Txn::new().and_then(vec![ - TxnOp::Delete(raw_key), - TxnOp::Put(removed_key.into_bytes(), raw_value), - ]); - - Ok(txn) + Self { + storage: TableRouteStorage::new(kv_backend), + } } - pub async fn get( + /// Returns the [`PhysicalTableRouteValue`] in the first level, + /// It won't follow the [`LogicalTableRouteValue`] to find the next level [`PhysicalTableRouteValue`]. + /// + /// Returns an error if the first level value is not a [`PhysicalTableRouteValue`]. + pub async fn try_get_physical_table_route( &self, table_id: TableId, - ) -> Result<Option<DeserializedValueWithBytes<TableRouteValue>>> { - let key = TableRouteKey::new(table_id); - self.kv_backend - .get(&key.as_raw_key()) - .await? - .map(|kv| DeserializedValueWithBytes::from_inner_slice(&kv.value)) - .transpose() + ) -> Result<Option<PhysicalTableRouteValue>> { + match self.storage.get(table_id).await? { + Some(route) => { + ensure!( + route.is_physical(), + error::UnexpectedLogicalRouteTableSnafu { + err_msg: format!("{route:?} is a non-physical TableRouteValue.") + } + ); + Ok(Some(route.into_physical_table_route())) + } + None => Ok(None), + } } + /// Returns the [TableId] recursively. + /// + /// Returns a [TableRouteNotFound](crate::error::Error::TableRouteNotFound) Error if: + /// - the table(`logical_or_physical_table_id`) does not exist. pub async fn get_physical_table_id( &self, logical_or_physical_table_id: TableId, ) -> Result<TableId> { let table_route = self + .storage .get(logical_or_physical_table_id) .await? .context(TableRouteNotFoundSnafu { table_id: logical_or_physical_table_id, - })? - .into_inner(); + })?; match table_route { TableRouteValue::Physical(_) => Ok(logical_or_physical_table_id), @@ -325,46 +282,58 @@ impl TableRouteManager { } } - /// Returns the [TableRouteValue::Physical] of table. + /// Returns the [TableRouteValue::Physical] recursively. /// /// Returns a [TableRouteNotFound](crate::error::Error::TableRouteNotFound) Error if: - /// - the physical table(`logical_or_physical_table_id`) does not exists - /// - the corresponding physical table of the logical table(`logical_or_physical_table_id`) does not exists. + /// - the physical table(`logical_or_physical_table_id`) does not exist + /// - the corresponding physical table of the logical table(`logical_or_physical_table_id`) does not exist. pub async fn get_physical_table_route( &self, logical_or_physical_table_id: TableId, ) -> Result<(TableId, PhysicalTableRouteValue)> { let table_route = self + .storage .get(logical_or_physical_table_id) .await? .context(TableRouteNotFoundSnafu { table_id: logical_or_physical_table_id, - })? - .into_inner(); + })?; match table_route { TableRouteValue::Physical(x) => Ok((logical_or_physical_table_id, x)), TableRouteValue::Logical(x) => { let physical_table_id = x.physical_table_id(); - let physical_table_route = - self.get(physical_table_id) - .await? - .context(TableRouteNotFoundSnafu { - table_id: physical_table_id, - })?; - Ok(( - physical_table_id, - physical_table_route.physical_table_route().clone(), - )) + let physical_table_route = self.storage.get(physical_table_id).await?.context( + TableRouteNotFoundSnafu { + table_id: physical_table_id, + }, + )?; + let physical_table_route = physical_table_route.into_physical_table_route(); + Ok((physical_table_id, physical_table_route)) } } } + /// Returns the [TableRouteValue::Physical] recursively. + /// + /// Returns a [TableRouteNotFound](crate::error::Error::TableRouteNotFound) Error if: + /// - one of the logical tables corresponding to the physical table does not exist. + /// + /// **Notes**: it may return a subset of `logical_or_physical_table_ids`. pub async fn batch_get_physical_table_routes( &self, logical_or_physical_table_ids: &[TableId], ) -> Result<HashMap<TableId, PhysicalTableRouteValue>> { - let table_routes = self.batch_get(logical_or_physical_table_ids).await?; + let table_routes = self + .storage + .batch_get(logical_or_physical_table_ids) + .await?; + // Returns a subset of `logical_or_physical_table_ids`. + let table_routes = table_routes + .into_iter() + .zip(logical_or_physical_table_ids) + .filter_map(|(route, id)| route.map(|route| (*id, route))) + .collect::<HashMap<_, _>>(); let mut physical_table_routes = HashMap::with_capacity(table_routes.len()); let mut logical_table_ids = HashMap::with_capacity(table_routes.len()); @@ -384,13 +353,22 @@ impl TableRouteManager { return Ok(physical_table_routes); } + // Finds the logical tables corresponding to the physical tables. let physical_table_ids = logical_table_ids .values() .cloned() .collect::<HashSet<_>>() .into_iter() .collect::<Vec<_>>(); - let table_routes = self.batch_get(&physical_table_ids).await?; + let table_routes = self + .table_route_storage() + .batch_get(&physical_table_ids) + .await?; + let table_routes = table_routes + .into_iter() + .zip(physical_table_ids) + .filter_map(|(route, id)| route.map(|route| (id, route))) + .collect::<HashMap<_, _>>(); for (logical_table_id, physical_table_id) in logical_table_ids { let table_route = @@ -419,40 +397,114 @@ impl TableRouteManager { Ok(physical_table_routes) } - /// It may return a subset of the `table_ids`. - pub async fn batch_get( + /// Returns [`RegionDistribution`] of the table(`table_id`). + pub async fn get_region_distribution( &self, - table_ids: &[TableId], - ) -> Result<HashMap<TableId, TableRouteValue>> { - let lookup_table = table_ids - .iter() - .map(|id| (TableRouteKey::new(*id).as_raw_key(), id)) - .collect::<HashMap<_, _>>(); + table_id: TableId, + ) -> Result<Option<RegionDistribution>> { + self.storage + .get(table_id) + .await? + .map(|table_route| Ok(region_distribution(table_route.region_routes()?))) + .transpose() + } - let resp = self - .kv_backend - .batch_get(BatchGetRequest { - keys: lookup_table.keys().cloned().collect::<Vec<_>>(), - }) - .await?; + /// Returns low-level APIs. + pub fn table_route_storage(&self) -> &TableRouteStorage { + &self.storage + } +} - let values = resp - .kvs - .iter() - .map(|kv| { - Ok(( - // Safety: must exist. - **lookup_table.get(kv.key()).unwrap(), - TableRouteValue::try_from_raw_value(&kv.value)?, - )) - }) - .collect::<Result<HashMap<_, _>>>()?; +/// Low-level operations of [TableRouteValue]. +pub struct TableRouteStorage { + kv_backend: KvBackendRef, +} + +impl TableRouteStorage { + pub fn new(kv_backend: KvBackendRef) -> Self { + Self { kv_backend } + } + + /// Builds a get table route transaction(readonly). + pub(crate) fn build_get_txn( + &self, + table_id: TableId, + ) -> ( + Txn, + impl FnOnce(&Vec<TxnOpResponse>) -> Result<Option<DeserializedValueWithBytes<TableRouteValue>>>, + ) { + let key = TableRouteKey::new(table_id); + let raw_key = key.as_raw_key(); + let txn = Txn::new().and_then(vec![TxnOp::Get(raw_key.clone())]); + + (txn, txn_helper::build_txn_response_decoder_fn(raw_key)) + } + + /// Builds a create table route transaction, + /// it expected the `__table_route/{table_id}` wasn't occupied. + pub fn build_create_txn( + &self, + table_id: TableId, + table_route_value: &TableRouteValue, + ) -> Result<( + Txn, + impl FnOnce(&Vec<TxnOpResponse>) -> Result<Option<DeserializedValueWithBytes<TableRouteValue>>>, + )> { + let key = TableRouteKey::new(table_id); + let raw_key = key.as_raw_key(); + + let txn = txn_helper::build_put_if_absent_txn( + raw_key.clone(), + table_route_value.try_as_raw_value()?, + ); + + Ok((txn, txn_helper::build_txn_response_decoder_fn(raw_key))) + } + + /// Builds a update table route transaction, + /// it expected the remote value equals the `current_table_route_value`. + /// It retrieves the latest value if the comparing failed. + pub(crate) fn build_update_txn( + &self, + table_id: TableId, + current_table_route_value: &DeserializedValueWithBytes<TableRouteValue>, + new_table_route_value: &TableRouteValue, + ) -> Result<( + Txn, + impl FnOnce(&Vec<TxnOpResponse>) -> Result<Option<DeserializedValueWithBytes<TableRouteValue>>>, + )> { + let key = TableRouteKey::new(table_id); + let raw_key = key.as_raw_key(); + let raw_value = current_table_route_value.get_raw_bytes(); + let new_raw_value: Vec<u8> = new_table_route_value.try_as_raw_value()?; + + let txn = txn_helper::build_compare_and_put_txn(raw_key.clone(), raw_value, new_raw_value); + + Ok((txn, txn_helper::build_txn_response_decoder_fn(raw_key))) + } + + /// Builds a delete table route transaction, + /// it expected the remote value equals the `table_route_value`. + pub(crate) fn build_delete_txn( + &self, + table_id: TableId, + table_route_value: &DeserializedValueWithBytes<TableRouteValue>, + ) -> Result<Txn> { + let key = TableRouteKey::new(table_id); + let raw_key = key.as_raw_key(); + let raw_value = table_route_value.get_raw_bytes(); + let removed_key = to_removed_key(&String::from_utf8_lossy(&raw_key)); + + let txn = Txn::new().and_then(vec![ + TxnOp::Delete(raw_key), + TxnOp::Put(removed_key.into_bytes(), raw_value), + ]); - Ok(values) + Ok(txn) } #[cfg(test)] - pub async fn get_removed( + pub async fn get_raw_removed( &self, table_id: TableId, ) -> Result<Option<DeserializedValueWithBytes<TableRouteValue>>> { @@ -465,20 +517,64 @@ impl TableRouteManager { .transpose() } - pub async fn get_region_distribution( + /// Returns the [`TableRouteValue`]. + pub async fn get(&self, table_id: TableId) -> Result<Option<TableRouteValue>> { + let key = TableRouteKey::new(table_id); + self.kv_backend + .get(&key.as_raw_key()) + .await? + .map(|kv| TableRouteValue::try_from_raw_value(&kv.value)) + .transpose() + } + + /// Returns the [`TableRouteValue`] wrapped with [`DeserializedValueWithBytes`]. + pub async fn get_raw( &self, table_id: TableId, - ) -> Result<Option<RegionDistribution>> { - self.get(table_id) + ) -> Result<Option<DeserializedValueWithBytes<TableRouteValue>>> { + let key = TableRouteKey::new(table_id); + self.kv_backend + .get(&key.as_raw_key()) .await? - .map(|table_route| Ok(region_distribution(table_route.region_routes()?))) + .map(|kv| DeserializedValueWithBytes::from_inner_slice(&kv.value)) .transpose() } + + /// Returns batch of [`TableRouteValue`] that respects the order of `table_ids`. + pub async fn batch_get(&self, table_ids: &[TableId]) -> Result<Vec<Option<TableRouteValue>>> { + let keys = table_ids + .iter() + .map(|id| TableRouteKey::new(*id).as_raw_key()) + .collect::<Vec<_>>(); + let resp = self + .kv_backend + .batch_get(BatchGetRequest { keys: keys.clone() }) + .await?; + + let kvs = resp + .kvs + .into_iter() + .map(|kv| (kv.key, kv.value)) + .collect::<HashMap<_, _>>(); + keys.into_iter() + .map(|key| { + if let Some(value) = kvs.get(&key) { + Ok(Some(TableRouteValue::try_from_raw_value(value)?)) + } else { + Ok(None) + } + }) + .collect::<Result<Vec<_>>>() + } } #[cfg(test)] mod tests { + use std::sync::Arc; + use super::*; + use crate::kv_backend::memory::MemoryKvBackend; + use crate::kv_backend::TxnService; #[test] fn test_table_route_compatibility() { @@ -491,4 +587,81 @@ mod tests { r#"Physical(PhysicalTableRouteValue { region_routes: [RegionRoute { region: Region { id: 1(0, 1), name: "r1", partition: None, attrs: {} }, leader_peer: Some(Peer { id: 2, addr: "a2" }), follower_peers: [], leader_status: None, leader_down_since: None }, RegionRoute { region: Region { id: 1(0, 1), name: "r1", partition: None, attrs: {} }, leader_peer: Some(Peer { id: 2, addr: "a2" }), follower_peers: [], leader_status: None, leader_down_since: None }], version: 0 })"# ); } + + #[tokio::test] + async fn test_table_route_storage_get_raw_empty() { + let kv = Arc::new(MemoryKvBackend::default()); + let table_route_storage = TableRouteStorage::new(kv); + let table_route = table_route_storage.get_raw(1024).await.unwrap(); + assert!(table_route.is_none()); + } + + #[tokio::test] + async fn test_table_route_storage_get_raw() { + let kv = Arc::new(MemoryKvBackend::default()); + let table_route_storage = TableRouteStorage::new(kv.clone()); + let table_route = table_route_storage.get_raw(1024).await.unwrap(); + assert!(table_route.is_none()); + let table_route_manager = TableRouteManager::new(kv.clone()); + let table_route_value = TableRouteValue::Logical(LogicalTableRouteValue { + physical_table_id: 1023, + region_ids: vec![RegionId::new(1023, 1)], + }); + let (txn, _) = table_route_manager + .table_route_storage() + .build_create_txn(1024, &table_route_value) + .unwrap(); + let r = kv.txn(txn).await.unwrap(); + assert!(r.succeeded); + let table_route = table_route_storage.get_raw(1024).await.unwrap(); + assert!(table_route.is_some()); + let got = table_route.unwrap().inner; + assert_eq!(got, table_route_value); + } + + #[tokio::test] + async fn test_table_route_batch_get() { + let kv = Arc::new(MemoryKvBackend::default()); + let table_route_storage = TableRouteStorage::new(kv.clone()); + let routes = table_route_storage + .batch_get(&[1023, 1024, 1025]) + .await + .unwrap(); + + assert!(routes.iter().all(Option::is_none)); + let table_route_manager = TableRouteManager::new(kv.clone()); + let routes = [ + ( + 1024, + TableRouteValue::Logical(LogicalTableRouteValue { + physical_table_id: 1023, + region_ids: vec![RegionId::new(1023, 1)], + }), + ), + ( + 1025, + TableRouteValue::Logical(LogicalTableRouteValue { + physical_table_id: 1023, + region_ids: vec![RegionId::new(1023, 2)], + }), + ), + ]; + for (table_id, route) in &routes { + let (txn, _) = table_route_manager + .table_route_storage() + .build_create_txn(*table_id, route) + .unwrap(); + let r = kv.txn(txn).await.unwrap(); + assert!(r.succeeded); + } + + let results = table_route_storage + .batch_get(&[9999, 1025, 8888, 1024]) + .await + .unwrap(); + assert!(results[0].is_none()); + assert_eq!(results[1].as_ref().unwrap(), &routes[1].1); + assert!(results[2].is_none()); + assert_eq!(results[3].as_ref().unwrap(), &routes[0].1); + } } diff --git a/src/meta-srv/src/procedure/region_failover/deactivate_region.rs b/src/meta-srv/src/procedure/region_failover/deactivate_region.rs index 650c794126a6..d6e2c088945c 100644 --- a/src/meta-srv/src/procedure/region_failover/deactivate_region.rs +++ b/src/meta-srv/src/procedure/region_failover/deactivate_region.rs @@ -52,7 +52,8 @@ impl DeactivateRegion { let table_route_value = ctx .table_metadata_manager .table_route_manager() - .get(table_id) + .table_route_storage() + .get_raw(table_id) .await .context(error::TableMetadataManagerSnafu)? .context(error::TableRouteNotFoundSnafu { table_id })?; @@ -201,6 +202,7 @@ mod tests { .context .table_metadata_manager .table_route_manager() + .table_route_storage() .get(table_id) .await .unwrap() diff --git a/src/meta-srv/src/procedure/region_failover/update_metadata.rs b/src/meta-srv/src/procedure/region_failover/update_metadata.rs index 542b02ca08e7..6302d20eee73 100644 --- a/src/meta-srv/src/procedure/region_failover/update_metadata.rs +++ b/src/meta-srv/src/procedure/region_failover/update_metadata.rs @@ -82,7 +82,8 @@ impl UpdateRegionMetadata { let table_route_value = ctx .table_metadata_manager .table_route_manager() - .get(table_id) + .table_route_storage() + .get_raw(table_id) .await .context(error::TableMetadataManagerSnafu)? .context(TableRouteNotFoundSnafu { table_id })?; @@ -233,7 +234,8 @@ mod tests { env.context .table_metadata_manager .table_route_manager() - .get(table_id) + .table_route_storage() + .get_raw(table_id) .await .unwrap() .unwrap() @@ -396,11 +398,11 @@ mod tests { .context .table_metadata_manager .table_route_manager() + .table_route_storage() .get(table_id) .await .unwrap() - .unwrap() - .into_inner(); + .unwrap(); let peers = &extract_all_peers(table_route_value.region_routes().unwrap()); let actual = table_route_value.region_routes().unwrap(); @@ -416,11 +418,11 @@ mod tests { let manager = &env.context.table_metadata_manager; let table_route_value = manager .table_route_manager() + .table_route_storage() .get(table_id) .await .unwrap() - .unwrap() - .into_inner(); + .unwrap(); let map = region_distribution(table_route_value.region_routes().unwrap()); assert_eq!(map.len(), 2); diff --git a/src/meta-srv/src/procedure/region_migration.rs b/src/meta-srv/src/procedure/region_migration.rs index 77e1493cfc7e..011be7c88c35 100644 --- a/src/meta-srv/src/procedure/region_migration.rs +++ b/src/meta-srv/src/procedure/region_migration.rs @@ -218,7 +218,8 @@ impl Context { let table_route = self .table_metadata_manager .table_route_manager() - .get(table_id) + .table_route_storage() + .get_raw(table_id) .await .context(error::TableMetadataManagerSnafu) .map_err(BoxedError::new) @@ -803,6 +804,7 @@ mod tests { .env() .table_metadata_manager() .table_route_manager() + .table_route_storage() .get(region_id.table_id()) .await .unwrap() diff --git a/src/meta-srv/src/procedure/region_migration/manager.rs b/src/meta-srv/src/procedure/region_migration/manager.rs index ad1b88efe0f3..000520ee576d 100644 --- a/src/meta-srv/src/procedure/region_migration/manager.rs +++ b/src/meta-srv/src/procedure/region_migration/manager.rs @@ -177,6 +177,7 @@ impl RegionMigrationManager { .context_factory .table_metadata_manager .table_route_manager() + .table_route_storage() .get(region_id.table_id()) .await .context(error::TableMetadataManagerSnafu)? @@ -184,7 +185,7 @@ impl RegionMigrationManager { table_id: region_id.table_id(), })?; - Ok(table_route.into_inner()) + Ok(table_route) } /// Verifies the type of region migration table route. diff --git a/src/meta-srv/src/procedure/region_migration/test_util.rs b/src/meta-srv/src/procedure/region_migration/test_util.rs index c09d18c8965f..8d22fd8104e3 100644 --- a/src/meta-srv/src/procedure/region_migration/test_util.rs +++ b/src/meta-srv/src/procedure/region_migration/test_util.rs @@ -416,11 +416,11 @@ impl ProcedureMigrationTestSuite { .env .table_metadata_manager .table_route_manager() + .table_route_storage() .get(region_id.table_id()) .await .unwrap() - .unwrap() - .into_inner(); + .unwrap(); let region_routes = table_route.region_routes().unwrap(); let expected_leader_id = self.context.persistent_ctx.to_peer.id; diff --git a/src/meta-srv/src/procedure/region_migration/update_metadata/downgrade_leader_region.rs b/src/meta-srv/src/procedure/region_migration/update_metadata/downgrade_leader_region.rs index e018053407a6..1404b8aca2b7 100644 --- a/src/meta-srv/src/procedure/region_migration/update_metadata/downgrade_leader_region.rs +++ b/src/meta-srv/src/procedure/region_migration/update_metadata/downgrade_leader_region.rs @@ -145,7 +145,8 @@ mod tests { let table_metadata_manager = env.table_metadata_manager(); let original_table_route = table_metadata_manager .table_route_manager() - .get(table_id) + .table_route_storage() + .get_raw(table_id) .await .unwrap() .unwrap(); @@ -201,6 +202,7 @@ mod tests { let latest_table_route = table_metadata_manager .table_route_manager() + .table_route_storage() .get(table_id) .await .unwrap() @@ -243,6 +245,7 @@ mod tests { let latest_table_route = table_metadata_manager .table_route_manager() + .table_route_storage() .get(table_id) .await .unwrap() diff --git a/src/meta-srv/src/procedure/region_migration/update_metadata/rollback_downgraded_region.rs b/src/meta-srv/src/procedure/region_migration/update_metadata/rollback_downgraded_region.rs index 81e73e270af7..c9253be2d597 100644 --- a/src/meta-srv/src/procedure/region_migration/update_metadata/rollback_downgraded_region.rs +++ b/src/meta-srv/src/procedure/region_migration/update_metadata/rollback_downgraded_region.rs @@ -134,7 +134,8 @@ mod tests { let table_metadata_manager = env.table_metadata_manager(); let old_table_route = table_metadata_manager .table_route_manager() - .get(table_id) + .table_route_storage() + .get_raw(table_id) .await .unwrap() .unwrap(); @@ -165,11 +166,11 @@ mod tests { let table_route = table_metadata_manager .table_route_manager() + .table_route_storage() .get(table_id) .await .unwrap() - .unwrap() - .into_inner(); + .unwrap(); assert_eq!( &expected_region_routes, table_route.region_routes().unwrap() @@ -229,11 +230,11 @@ mod tests { let table_route = table_metadata_manager .table_route_manager() + .table_route_storage() .get(table_id) .await .unwrap() - .unwrap() - .into_inner(); + .unwrap(); assert_eq!( &expected_region_routes, table_route.region_routes().unwrap() diff --git a/src/meta-srv/src/procedure/region_migration/update_metadata/upgrade_candidate_region.rs b/src/meta-srv/src/procedure/region_migration/update_metadata/upgrade_candidate_region.rs index 2c5a5f61d0d6..9272491e1967 100644 --- a/src/meta-srv/src/procedure/region_migration/update_metadata/upgrade_candidate_region.rs +++ b/src/meta-srv/src/procedure/region_migration/update_metadata/upgrade_candidate_region.rs @@ -335,7 +335,8 @@ mod tests { let table_metadata_manager = env.table_metadata_manager(); let original_table_route = table_metadata_manager .table_route_manager() - .get(table_id) + .table_route_storage() + .get_raw(table_id) .await .unwrap() .unwrap(); @@ -473,11 +474,11 @@ mod tests { let table_route = table_metadata_manager .table_route_manager() + .table_route_storage() .get(table_id) .await .unwrap() - .unwrap() - .into_inner(); + .unwrap(); let region_routes = table_route.region_routes().unwrap(); assert!(ctx.volatile_ctx.table_route.is_none()); diff --git a/src/meta-srv/src/procedure/tests.rs b/src/meta-srv/src/procedure/tests.rs index 39b984476a43..669c9e56c03a 100644 --- a/src/meta-srv/src/procedure/tests.rs +++ b/src/meta-srv/src/procedure/tests.rs @@ -261,6 +261,7 @@ async fn test_on_datanode_create_logical_regions() { let physical_route_txn = ctx .table_metadata_manager .table_route_manager() + .table_route_storage() .build_create_txn(physical_table_id, &physical_table_route) .unwrap() .0; diff --git a/src/meta-srv/src/region/lease_keeper.rs b/src/meta-srv/src/region/lease_keeper.rs index 9aea23232b5f..968deb7e12df 100644 --- a/src/meta-srv/src/region/lease_keeper.rs +++ b/src/meta-srv/src/region/lease_keeper.rs @@ -105,11 +105,18 @@ impl RegionLeaseKeeper { // The subset of all table metadata. // TODO: considers storing all active regions in meta's memory. - let metadata_subset = table_route_manager + let table_routes = table_route_manager + .table_route_storage() .batch_get(table_ids) .await .context(error::TableMetadataManagerSnafu)?; + let metadata_subset = table_routes + .into_iter() + .zip(table_ids) + .filter_map(|(route, id)| route.map(|route| (*id, route))) + .collect::<HashMap<_, _>>(); + Ok(metadata_subset) } diff --git a/src/meta-srv/src/selector/load_based.rs b/src/meta-srv/src/selector/load_based.rs index 9573757a3ffc..5f89a5cfeb2f 100644 --- a/src/meta-srv/src/selector/load_based.rs +++ b/src/meta-srv/src/selector/load_based.rs @@ -138,6 +138,7 @@ async fn get_leader_peer_ids( ) -> Result<Vec<u64>> { table_metadata_manager .table_route_manager() + .table_route_storage() .get(table_id) .await .context(error::TableMetadataManagerSnafu) diff --git a/src/meta-srv/src/service/admin/route.rs b/src/meta-srv/src/service/admin/route.rs index 217133ad3d88..4339e1a56417 100644 --- a/src/meta-srv/src/service/admin/route.rs +++ b/src/meta-srv/src/service/admin/route.rs @@ -54,6 +54,7 @@ impl HttpHandler for RouteHandler { let table_route_value = self .table_metadata_manager .table_route_manager() + .table_route_storage() .get(table_id) .await .context(TableMetadataManagerSnafu)? diff --git a/tests-integration/src/grpc.rs b/tests-integration/src/grpc.rs index 9076d487655a..a7572066f703 100644 --- a/tests-integration/src/grpc.rs +++ b/tests-integration/src/grpc.rs @@ -516,11 +516,11 @@ CREATE TABLE {table_name} ( let table_route_value = instance .table_metadata_manager() .table_route_manager() + .table_route_storage() .get(table_id) .await .unwrap() - .unwrap() - .into_inner(); + .unwrap(); let region_to_dn_map = region_distribution( table_route_value diff --git a/tests-integration/src/instance.rs b/tests-integration/src/instance.rs index 556c4a2ebcbe..d1f4c8929d0e 100644 --- a/tests-integration/src/instance.rs +++ b/tests-integration/src/instance.rs @@ -211,11 +211,11 @@ mod tests { let table_route_value = manager .table_route_manager() + .table_route_storage() .get(table_id) .await .unwrap() - .unwrap() - .into_inner(); + .unwrap(); let region_to_dn_map = region_distribution( table_route_value
refactor
refactor TableRouteManager (#3392)
96b6235f259c638f27e97a36d009666d5f8e3f5c
2024-01-04 16:23:43
Yingwen
feat(mito): Add WriteCache struct and write SSTs to write cache (#2999)
false
diff --git a/src/mito2/src/access_layer.rs b/src/mito2/src/access_layer.rs index 1f4fd731efa7..2e22da087c6b 100644 --- a/src/mito2/src/access_layer.rs +++ b/src/mito2/src/access_layer.rs @@ -18,18 +18,22 @@ use object_store::ObjectStore; use snafu::ResultExt; use store_api::metadata::RegionMetadataRef; +use crate::cache::write_cache::SstUploadRequest; +use crate::cache::CacheManagerRef; use crate::error::{DeleteSstSnafu, Result}; use crate::read::Source; use crate::sst::file::{FileHandle, FileId}; use crate::sst::location; use crate::sst::parquet::reader::ParquetReaderBuilder; use crate::sst::parquet::writer::ParquetWriter; +use crate::sst::parquet::{SstInfo, WriteOptions}; pub type AccessLayerRef = Arc<AccessLayer>; /// A layer to access SST files under the same directory. pub struct AccessLayer { region_dir: String, + /// Target object store. object_store: ObjectStore, } @@ -74,15 +78,44 @@ impl AccessLayer { ParquetReaderBuilder::new(self.region_dir.clone(), file, self.object_store.clone()) } - /// Returns a new parquet writer to write the SST for specific `file_id`. - // TODO(hl): maybe rename to [sst_writer]. - pub(crate) fn write_sst( + /// Writes a SST with specific `file_id` and `metadata` to the layer. + /// + /// Returns the info of the SST. If no data written, returns None. + pub(crate) async fn write_sst( &self, - file_id: FileId, - metadata: RegionMetadataRef, - source: Source, - ) -> ParquetWriter { - let path = location::sst_file_path(&self.region_dir, file_id); - ParquetWriter::new(path, metadata, source, self.object_store.clone()) + request: SstWriteRequest, + write_opts: &WriteOptions, + ) -> Result<Option<SstInfo>> { + let path = location::sst_file_path(&self.region_dir, request.file_id); + + if let Some(write_cache) = request.cache_manager.write_cache() { + // Write to the write cache. + return write_cache + .write_and_upload_sst( + SstUploadRequest { + file_id: request.file_id, + metadata: request.metadata, + source: request.source, + storage: request.storage, + upload_path: path, + remote_store: self.object_store.clone(), + }, + write_opts, + ) + .await; + } + + // Write cache is disabled. + let mut writer = ParquetWriter::new(path, request.metadata, self.object_store.clone()); + writer.write_all(request.source, write_opts).await } } + +/// Contents to build a SST. +pub(crate) struct SstWriteRequest { + pub(crate) file_id: FileId, + pub(crate) metadata: RegionMetadataRef, + pub(crate) source: Source, + pub(crate) cache_manager: CacheManagerRef, + pub(crate) storage: Option<String>, +} diff --git a/src/mito2/src/cache.rs b/src/mito2/src/cache.rs index cc02a2d037ce..62c91b2b4155 100644 --- a/src/mito2/src/cache.rs +++ b/src/mito2/src/cache.rs @@ -20,6 +20,8 @@ mod cache_size; pub(crate) mod file_cache; #[cfg(test)] pub(crate) mod test_util; +#[allow(unused)] +pub(crate) mod write_cache; use std::mem; use std::sync::Arc; @@ -32,6 +34,7 @@ use parquet::file::metadata::ParquetMetaData; use store_api::storage::RegionId; use crate::cache::cache_size::parquet_meta_size; +use crate::cache::write_cache::WriteCacheRef; use crate::metrics::{CACHE_BYTES, CACHE_HIT, CACHE_MISS}; use crate::sst::file::FileId; @@ -44,6 +47,8 @@ const PAGE_TYPE: &str = "page"; // Metrics type key for files on the local store. const FILE_TYPE: &str = "file"; +// TODO(yingwen): Builder for cache manager. + /// Manages cached data for the engine. pub struct CacheManager { /// Cache for SST metadata. @@ -52,6 +57,10 @@ pub struct CacheManager { vector_cache: Option<VectorCache>, /// Cache for SST pages. page_cache: Option<PageCache>, + /// A Cache for writing files to object stores. + // TODO(yingwen): Remove this once the cache is ready. + #[allow(unused)] + write_cache: Option<WriteCacheRef>, } pub type CacheManagerRef = Arc<CacheManager>; @@ -111,6 +120,7 @@ impl CacheManager { sst_meta_cache, vector_cache, page_cache, + write_cache: None, } } @@ -184,6 +194,11 @@ impl CacheManager { cache.insert(page_key, pages); } } + + /// Gets the the write cache. + pub(crate) fn write_cache(&self) -> Option<&WriteCacheRef> { + self.write_cache.as_ref() + } } fn meta_cache_weight(k: &SstMetaKey, v: &Arc<ParquetMetaData>) -> u32 { diff --git a/src/mito2/src/cache/file_cache.rs b/src/mito2/src/cache/file_cache.rs index 25fd5d6d62bf..3fd3408edd89 100644 --- a/src/mito2/src/cache/file_cache.rs +++ b/src/mito2/src/cache/file_cache.rs @@ -14,6 +14,7 @@ //! A cache for files. +use std::sync::Arc; use std::time::Instant; use common_base::readable_size::ReadableSize; @@ -21,7 +22,7 @@ use common_telemetry::{info, warn}; use futures::{FutureExt, TryStreamExt}; use moka::future::Cache; use moka::notification::RemovalCause; -use object_store::util::{join_dir, join_path}; +use object_store::util::join_path; use object_store::{ErrorKind, Metakey, ObjectStore, Reader}; use snafu::ResultExt; use store_api::storage::RegionId; @@ -32,7 +33,7 @@ use crate::metrics::{CACHE_BYTES, CACHE_HIT, CACHE_MISS}; use crate::sst::file::FileId; /// Subdirectory of cached files. -const FILE_DIR: &str = "files"; +const FILE_DIR: &str = "files/"; /// A file cache manages files on local store and evict files based /// on size. @@ -40,25 +41,18 @@ const FILE_DIR: &str = "files"; pub(crate) struct FileCache { /// Local store to cache files. local_store: ObjectStore, - /// Cached file directory under cache home. - file_dir: String, /// Index to track cached files. /// /// File id is enough to identity a file uniquely. memory_index: Cache<IndexKey, IndexValue>, } +pub(crate) type FileCacheRef = Arc<FileCache>; + impl FileCache { /// Creates a new file cache. - pub(crate) fn new( - local_store: ObjectStore, - cache_home: String, - capacity: ReadableSize, - ) -> FileCache { - // Stores files under `cache_home/{FILE_DIR}`. - let file_dir = cache_file_dir(&cache_home); + pub(crate) fn new(local_store: ObjectStore, capacity: ReadableSize) -> FileCache { let cache_store = local_store.clone(); - let cache_file_dir = file_dir.clone(); let memory_index = Cache::builder() .weigher(|_key, value: &IndexValue| -> u32 { // We only measure space on local store. @@ -67,7 +61,8 @@ impl FileCache { .max_capacity(capacity.as_bytes()) .async_eviction_listener(move |key, value, cause| { let store = cache_store.clone(); - let file_path = cache_file_path(&cache_file_dir, *key); + // Stores files under FILE_DIR. + let file_path = cache_file_path(FILE_DIR, *key); async move { if let RemovalCause::Replaced = cause { // The cache is replaced by another file. This is unexpected, we don't remove the same @@ -91,7 +86,6 @@ impl FileCache { .build(); FileCache { local_store, - file_dir, memory_index, } } @@ -145,7 +139,7 @@ impl FileCache { let mut lister = self .local_store - .lister_with(&self.file_dir) + .lister_with(FILE_DIR) .metakey(Metakey::ContentLength) .await .context(OpenDalSnafu)?; @@ -182,7 +176,7 @@ impl FileCache { /// Returns the cache file path for the key. pub(crate) fn cache_file_path(&self, key: IndexKey) -> String { - cache_file_path(&self.file_dir, key) + cache_file_path(FILE_DIR, key) } /// Returns the local store of the file cache. @@ -203,11 +197,6 @@ pub(crate) struct IndexValue { file_size: u32, } -/// Returns the directory to store files. -fn cache_file_dir(cache_home: &str) -> String { - join_dir(cache_home, FILE_DIR) -} - /// Generates the path to the cached file. /// /// The file name format is `{region_id}.{file_id}` @@ -245,13 +234,8 @@ mod tests { async fn test_file_cache_basic() { let dir = create_temp_dir(""); let local_store = new_fs_store(dir.path().to_str().unwrap()); - let cache_home = "cache".to_string(); - let cache = FileCache::new( - local_store.clone(), - cache_home.clone(), - ReadableSize::mb(10), - ); + let cache = FileCache::new(local_store.clone(), ReadableSize::mb(10)); let region_id = RegionId::new(2000, 0); let file_id = FileId::random(); let key = (region_id, file_id); @@ -291,13 +275,8 @@ mod tests { async fn test_file_cache_file_removed() { let dir = create_temp_dir(""); let local_store = new_fs_store(dir.path().to_str().unwrap()); - let cache_home = "cache".to_string(); - let cache = FileCache::new( - local_store.clone(), - cache_home.clone(), - ReadableSize::mb(10), - ); + let cache = FileCache::new(local_store.clone(), ReadableSize::mb(10)); let region_id = RegionId::new(2000, 0); let file_id = FileId::random(); let key = (region_id, file_id); @@ -326,12 +305,7 @@ mod tests { async fn test_file_cache_recover() { let dir = create_temp_dir(""); let local_store = new_fs_store(dir.path().to_str().unwrap()); - let cache_home = "cache".to_string(); - let cache = FileCache::new( - local_store.clone(), - cache_home.clone(), - ReadableSize::mb(10), - ); + let cache = FileCache::new(local_store.clone(), ReadableSize::mb(10)); let region_id = RegionId::new(2000, 0); // Write N files. @@ -354,11 +328,7 @@ mod tests { } // Recover the cache. - let cache = FileCache::new( - local_store.clone(), - cache_home.clone(), - ReadableSize::mb(10), - ); + let cache = FileCache::new(local_store.clone(), ReadableSize::mb(10)); // No entry before recovery. assert!(cache.reader((region_id, file_ids[0])).await.is_none()); cache.recover().await.unwrap(); diff --git a/src/mito2/src/cache/write_cache.rs b/src/mito2/src/cache/write_cache.rs new file mode 100644 index 000000000000..b640ba896666 --- /dev/null +++ b/src/mito2/src/cache/write_cache.rs @@ -0,0 +1,86 @@ +// Copyright 2023 Greptime Team +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! A write-through cache for remote object stores. + +use std::sync::Arc; + +use common_base::readable_size::ReadableSize; +use object_store::manager::ObjectStoreManagerRef; +use object_store::ObjectStore; +use store_api::metadata::RegionMetadataRef; + +use crate::cache::file_cache::{FileCache, FileCacheRef}; +use crate::error::Result; +use crate::read::Source; +use crate::sst::file::FileId; +use crate::sst::parquet::writer::ParquetWriter; +use crate::sst::parquet::{SstInfo, WriteOptions}; + +/// A cache for uploading files to remote object stores. +/// +/// It keeps files in local disk and then sends files to object stores. +pub struct WriteCache { + /// Local file cache. + file_cache: FileCacheRef, + /// Object store manager. + object_store_manager: ObjectStoreManagerRef, +} + +pub type WriteCacheRef = Arc<WriteCache>; + +impl WriteCache { + /// Create the cache with a `local_store` to cache files and a + /// `object_store_manager` for all object stores. + pub fn new( + local_store: ObjectStore, + object_store_manager: ObjectStoreManagerRef, + cache_capacity: ReadableSize, + ) -> Self { + Self { + file_cache: Arc::new(FileCache::new(local_store, cache_capacity)), + object_store_manager, + } + } + + /// Recovers the write cache from local store. + pub async fn recover(&self) -> Result<()> { + self.file_cache.recover().await + } + + /// Writes SST to the cache and then uploads it to the remote object store. + pub async fn write_and_upload_sst( + &self, + request: SstUploadRequest, + write_opts: &WriteOptions, + ) -> Result<Option<SstInfo>> { + // TODO(yingwen): Write to the local store and then upload. + // Now we write to the remote and ignore local cache. + let mut writer = + ParquetWriter::new(request.upload_path, request.metadata, request.remote_store); + writer.write_all(request.source, write_opts).await + } +} + +/// Request to write and upload a SST. +pub struct SstUploadRequest { + pub file_id: FileId, + pub metadata: RegionMetadataRef, + pub source: Source, + pub storage: Option<String>, + /// Path to upload the file. + pub upload_path: String, + /// Remote object store to upload. + pub remote_store: ObjectStore, +} diff --git a/src/mito2/src/compaction.rs b/src/mito2/src/compaction.rs index 94f04b17aa66..0ddcec61d0f2 100644 --- a/src/mito2/src/compaction.rs +++ b/src/mito2/src/compaction.rs @@ -12,7 +12,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -mod output; mod picker; #[cfg(test)] mod test_util; @@ -30,6 +29,7 @@ use store_api::storage::RegionId; use tokio::sync::mpsc::{self, Sender}; use crate::access_layer::AccessLayerRef; +use crate::cache::CacheManagerRef; use crate::compaction::twcs::TwcsPicker; use crate::config::MitoConfig; use crate::error::{ @@ -55,6 +55,7 @@ pub struct CompactionRequest { pub(crate) start_time: Instant, /// Buffering threshold while writing SST files. pub(crate) sst_write_buffer_size: ReadableSize, + pub(crate) cache_manager: CacheManagerRef, } impl CompactionRequest { @@ -88,14 +89,20 @@ pub(crate) struct CompactionScheduler { region_status: HashMap<RegionId, CompactionStatus>, /// Request sender of the worker that this scheduler belongs to. request_sender: Sender<WorkerRequest>, + cache_manager: CacheManagerRef, } impl CompactionScheduler { - pub(crate) fn new(scheduler: SchedulerRef, request_sender: Sender<WorkerRequest>) -> Self { + pub(crate) fn new( + scheduler: SchedulerRef, + request_sender: Sender<WorkerRequest>, + cache_manager: CacheManagerRef, + ) -> Self { Self { scheduler, region_status: HashMap::new(), request_sender, + cache_manager, } } @@ -122,8 +129,12 @@ impl CompactionScheduler { access_layer.clone(), file_purger.clone(), ); - let request = - status.new_compaction_request(self.request_sender.clone(), waiter, engine_config); + let request = status.new_compaction_request( + self.request_sender.clone(), + waiter, + engine_config, + self.cache_manager.clone(), + ); self.region_status.insert(region_id, status); self.schedule_compaction_request(request) } @@ -142,6 +153,7 @@ impl CompactionScheduler { self.request_sender.clone(), OptionOutputTx::none(), engine_config, + self.cache_manager.clone(), ); // Try to schedule next compaction task for this region. if let Err(e) = self.schedule_compaction_request(request) { @@ -314,6 +326,7 @@ impl CompactionStatus { request_sender: Sender<WorkerRequest>, waiter: OptionOutputTx, engine_config: Arc<MitoConfig>, + cache_manager: CacheManagerRef, ) -> CompactionRequest { let current_version = self.version_control.current().version; let start_time = Instant::now(); @@ -325,6 +338,7 @@ impl CompactionStatus { file_purger: self.file_purger.clone(), start_time, sst_write_buffer_size: engine_config.sst_write_buffer_size, + cache_manager, }; if let Some(pending) = self.pending_compaction.take() { diff --git a/src/mito2/src/compaction/output.rs b/src/mito2/src/compaction/output.rs deleted file mode 100644 index 6111e95c40e7..000000000000 --- a/src/mito2/src/compaction/output.rs +++ /dev/null @@ -1,86 +0,0 @@ -// Copyright 2023 Greptime Team -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use common_base::readable_size::ReadableSize; -use store_api::metadata::RegionMetadataRef; -use store_api::storage::RegionId; - -use crate::access_layer::AccessLayerRef; -use crate::error; -use crate::read::projection::ProjectionMapper; -use crate::read::seq_scan::SeqScan; -use crate::read::{BoxedBatchReader, Source}; -use crate::sst::file::{FileHandle, FileId, FileMeta, Level}; -use crate::sst::parquet::{SstInfo, WriteOptions}; - -#[derive(Debug)] -pub(crate) struct CompactionOutput { - pub output_file_id: FileId, - /// Compaction output file level. - pub output_level: Level, - /// Compaction input files. - pub inputs: Vec<FileHandle>, -} - -impl CompactionOutput { - pub(crate) async fn build( - &self, - region_id: RegionId, - schema: RegionMetadataRef, - sst_layer: AccessLayerRef, - sst_write_buffer_size: ReadableSize, - ) -> error::Result<Option<FileMeta>> { - let reader = build_sst_reader(schema.clone(), sst_layer.clone(), &self.inputs).await?; - - let opts = WriteOptions { - write_buffer_size: sst_write_buffer_size, - ..Default::default() - }; - - // TODO(hl): measure merge elapsed time. - - let mut writer = sst_layer.write_sst(self.output_file_id, schema, Source::Reader(reader)); - let meta = writer.write_all(&opts).await?.map( - |SstInfo { - time_range, - file_size, - .. - }| { - FileMeta { - region_id, - file_id: self.output_file_id, - time_range, - level: self.output_level, - file_size, - } - }, - ); - - Ok(meta) - } -} - -/// Builds [BoxedBatchReader] that reads all SST files and yields batches in primary key order. -async fn build_sst_reader( - schema: RegionMetadataRef, - sst_layer: AccessLayerRef, - inputs: &[FileHandle], -) -> error::Result<BoxedBatchReader> { - SeqScan::new(sst_layer, ProjectionMapper::all(&schema)?) - .with_files(inputs.to_vec()) - // We ignore file not found error during compaction. - .with_ignore_file_not_found(true) - .build_reader() - .await -} diff --git a/src/mito2/src/compaction/twcs.rs b/src/mito2/src/compaction/twcs.rs index 6b853cc98313..9cf45cdf9089 100644 --- a/src/mito2/src/compaction/twcs.rs +++ b/src/mito2/src/compaction/twcs.rs @@ -27,18 +27,21 @@ use store_api::metadata::RegionMetadataRef; use store_api::storage::RegionId; use tokio::sync::mpsc; -use crate::access_layer::AccessLayerRef; -use crate::compaction::output::CompactionOutput; +use crate::access_layer::{AccessLayerRef, SstWriteRequest}; +use crate::cache::CacheManagerRef; use crate::compaction::picker::{CompactionTask, Picker}; use crate::compaction::CompactionRequest; -use crate::error; -use crate::error::CompactRegionSnafu; +use crate::error::{self, CompactRegionSnafu}; use crate::metrics::{COMPACTION_FAILURE_COUNT, COMPACTION_STAGE_ELAPSED}; +use crate::read::projection::ProjectionMapper; +use crate::read::seq_scan::SeqScan; +use crate::read::{BoxedBatchReader, Source}; use crate::request::{ BackgroundNotify, CompactionFailed, CompactionFinished, OutputTx, WorkerRequest, }; -use crate::sst::file::{FileHandle, FileId, FileMeta}; +use crate::sst::file::{FileHandle, FileId, FileMeta, Level}; use crate::sst::file_purger::FilePurgerRef; +use crate::sst::parquet::WriteOptions; use crate::sst::version::LevelMeta; const MAX_PARALLEL_COMPACTION: usize = 8; @@ -126,6 +129,7 @@ impl Picker for TwcsPicker { file_purger, start_time, sst_write_buffer_size, + cache_manager, } = req; let region_metadata = current_version.metadata.clone(); @@ -169,7 +173,7 @@ impl Picker for TwcsPicker { } let task = TwcsCompactionTask { region_id, - schema: region_metadata, + metadata: region_metadata, sst_layer: access_layer, outputs, expired_ssts, @@ -179,6 +183,8 @@ impl Picker for TwcsPicker { waiters, file_purger, start_time, + cache_manager, + storage: current_version.options.storage.clone(), }; Some(Box::new(task)) } @@ -228,7 +234,7 @@ fn find_latest_window_in_seconds<'a>( pub(crate) struct TwcsCompactionTask { pub region_id: RegionId, - pub schema: RegionMetadataRef, + pub metadata: RegionMetadataRef, pub sst_layer: AccessLayerRef, pub outputs: Vec<CompactionOutput>, pub expired_ssts: Vec<FileHandle>, @@ -241,6 +247,9 @@ pub(crate) struct TwcsCompactionTask { pub waiters: Vec<OutputTx>, /// Start time of compaction task pub start_time: Instant, + pub(crate) cache_manager: CacheManagerRef, + /// Target storage of the region. + pub(crate) storage: Option<String>, } impl Debug for TwcsCompactionTask { @@ -274,11 +283,8 @@ impl TwcsCompactionTask { let mut futs = Vec::with_capacity(self.outputs.len()); let mut compacted_inputs = Vec::with_capacity(self.outputs.iter().map(|o| o.inputs.len()).sum()); - let region_id = self.region_id; + for output in self.outputs.drain(..) { - let schema = self.schema.clone(); - let sst_layer = self.sst_layer.clone(); - let sst_write_buffer_size = self.sst_write_buffer_size; compacted_inputs.extend(output.inputs.iter().map(FileHandle::meta)); info!( @@ -293,15 +299,42 @@ impl TwcsCompactionTask { output.output_file_id ); - // TODO(hl): Maybe spawn to runtime to exploit in-job parallelism. + let write_opts = WriteOptions { + write_buffer_size: self.sst_write_buffer_size, + ..Default::default() + }; + let metadata = self.metadata.clone(); + let sst_layer = self.sst_layer.clone(); + let region_id = self.region_id; + let cache_manager = self.cache_manager.clone(); + let storage = self.storage.clone(); futs.push(async move { - output - .build(region_id, schema, sst_layer, sst_write_buffer_size) - .await + let reader = + build_sst_reader(metadata.clone(), sst_layer.clone(), &output.inputs).await?; + let file_meta_opt = sst_layer + .write_sst( + SstWriteRequest { + file_id: output.output_file_id, + metadata, + source: Source::Reader(reader), + cache_manager, + storage, + }, + &write_opts, + ) + .await? + .map(|sst_info| FileMeta { + region_id, + file_id: output.output_file_id, + time_range: sst_info.time_range, + level: output.output_level, + file_size: sst_info.file_size, + }); + Ok(file_meta_opt) }); } - let mut outputs = Vec::with_capacity(futs.len()); + let mut output_files = Vec::with_capacity(futs.len()); while !futs.is_empty() { let mut task_chunk = Vec::with_capacity(MAX_PARALLEL_COMPACTION); for _ in 0..MAX_PARALLEL_COMPACTION { @@ -314,11 +347,11 @@ impl TwcsCompactionTask { .context(error::JoinSnafu)? .into_iter() .collect::<error::Result<Vec<_>>>()?; - outputs.extend(metas.into_iter().flatten()); + output_files.extend(metas.into_iter().flatten()); } let inputs = compacted_inputs.into_iter().collect(); - Ok((outputs, inputs)) + Ok((output_files, inputs)) } async fn handle_compaction(&mut self) -> error::Result<(Vec<FileMeta>, Vec<FileMeta>)> { @@ -485,6 +518,29 @@ fn get_expired_ssts( .collect() } +#[derive(Debug)] +pub(crate) struct CompactionOutput { + pub output_file_id: FileId, + /// Compaction output file level. + pub output_level: Level, + /// Compaction input files. + pub inputs: Vec<FileHandle>, +} + +/// Builds [BoxedBatchReader] that reads all SST files and yields batches in primary key order. +async fn build_sst_reader( + metadata: RegionMetadataRef, + sst_layer: AccessLayerRef, + inputs: &[FileHandle], +) -> error::Result<BoxedBatchReader> { + SeqScan::new(sst_layer, ProjectionMapper::all(&metadata)?) + .with_files(inputs.to_vec()) + // We ignore file not found error during compaction. + .with_ignore_file_not_found(true) + .build_reader() + .await +} + #[cfg(test)] mod tests { use std::collections::HashSet; diff --git a/src/mito2/src/flush.rs b/src/mito2/src/flush.rs index 99b880de7cf2..49c68e489fa3 100644 --- a/src/mito2/src/flush.rs +++ b/src/mito2/src/flush.rs @@ -24,7 +24,8 @@ use store_api::storage::RegionId; use strum::IntoStaticStr; use tokio::sync::mpsc; -use crate::access_layer::AccessLayerRef; +use crate::access_layer::{AccessLayerRef, SstWriteRequest}; +use crate::cache::CacheManagerRef; use crate::config::MitoConfig; use crate::error::{ Error, FlushRegionSnafu, RegionClosedSnafu, RegionDroppedSnafu, RegionTruncatedSnafu, Result, @@ -200,6 +201,7 @@ pub(crate) struct RegionFlushTask { pub(crate) listener: WorkerListener, pub(crate) engine_config: Arc<MitoConfig>, pub(crate) row_group_size: Option<usize>, + pub(crate) cache_manager: CacheManagerRef, } impl RegionFlushTask { @@ -243,6 +245,7 @@ impl RegionFlushTask { async fn do_flush(&mut self, version_data: VersionControlData) { let timer = FLUSH_ELAPSED.with_label_values(&["total"]).start_timer(); self.listener.on_flush_begin(self.region_id).await; + let worker_request = match self.flush_memtables(&version_data.version).await { Ok(file_metas) => { let memtables_to_remove = version_data @@ -252,6 +255,7 @@ impl RegionFlushTask { .iter() .map(|m| m.id()) .collect(); + let flush_finished = FlushFinished { region_id: self.region_id, file_metas, @@ -297,10 +301,10 @@ impl RegionFlushTask { if let Some(row_group_size) = self.row_group_size { write_opts.row_group_size = row_group_size; } + let memtables = version.memtables.immutables(); let mut file_metas = Vec::with_capacity(memtables.len()); let mut flushed_bytes = 0; - for mem in memtables { if mem.is_empty() { // Skip empty memtables. @@ -310,22 +314,32 @@ impl RegionFlushTask { let file_id = FileId::random(); let iter = mem.iter(None, None); let source = Source::Iter(iter); - let mut writer = self + + // Flush to level 0. + let write_request = SstWriteRequest { + file_id, + metadata: version.metadata.clone(), + source, + cache_manager: self.cache_manager.clone(), + storage: version.options.storage.clone(), + }; + let Some(sst_info) = self .access_layer - .write_sst(file_id, version.metadata.clone(), source); - let Some(sst_info) = writer.write_all(&write_opts).await? else { + .write_sst(write_request, &write_opts) + .await? + else { // No data written. continue; }; - flushed_bytes += sst_info.file_size; - file_metas.push(FileMeta { - region_id: version.metadata.region_id, + let file_meta = FileMeta { + region_id: self.region_id, file_id, time_range: sst_info.time_range, level: 0, file_size: sst_info.file_size, - }); + }; + file_metas.push(file_meta); } if !file_metas.is_empty() { @@ -334,8 +348,8 @@ impl RegionFlushTask { let file_ids: Vec<_> = file_metas.iter().map(|f| f.file_id).collect(); info!( - "Successfully flush memtables, region: {}, reason: {}, files: {:?}, cost: {:?}", - version.metadata.region_id, + "Successfully flush memtables, region: {}, reason: {}, files: {:?}, cost: {:?}s", + self.region_id, self.reason.as_str(), file_ids, timer.stop_and_record(), @@ -652,6 +666,7 @@ mod tests { use tokio::sync::oneshot; use super::*; + use crate::cache::CacheManager; use crate::test_util::scheduler_util::SchedulerEnv; use crate::test_util::version_util::VersionControlBuilder; @@ -728,6 +743,7 @@ mod tests { listener: WorkerListener::default(), engine_config: Arc::new(MitoConfig::default()), row_group_size: None, + cache_manager: Arc::new(CacheManager::new(0, 0, 0)), }; task.push_sender(OptionOutputTx::from(output_tx)); scheduler diff --git a/src/mito2/src/region/options.rs b/src/mito2/src/region/options.rs index c4415887f386..8fb640399ea7 100644 --- a/src/mito2/src/region/options.rs +++ b/src/mito2/src/region/options.rs @@ -37,7 +37,7 @@ pub struct RegionOptions { pub ttl: Option<Duration>, /// Compaction options. pub compaction: CompactionOptions, - /// Custom storage. + /// Custom storage. Uses default storage if it is `None`. pub storage: Option<String>, /// Wal options. pub wal_options: WalOptions, diff --git a/src/mito2/src/region_write_ctx.rs b/src/mito2/src/region_write_ctx.rs index 7d27e49eaf73..6cf8043174a1 100644 --- a/src/mito2/src/region_write_ctx.rs +++ b/src/mito2/src/region_write_ctx.rs @@ -27,8 +27,6 @@ use crate::region::version::{VersionControlData, VersionControlRef, VersionRef}; use crate::request::OptionOutputTx; use crate::wal::{EntryId, WalWriter}; -/// Context to keep region metadata and buffer write requests. - /// Notifier to notify write result on drop. struct WriteNotify { /// Error to send to the waiter. diff --git a/src/mito2/src/sst/parquet.rs b/src/mito2/src/sst/parquet.rs index 584faf1ab964..20259672e3bb 100644 --- a/src/mito2/src/sst/parquet.rs +++ b/src/mito2/src/sst/parquet.rs @@ -114,8 +114,12 @@ mod tests { ..Default::default() }; - let mut writer = ParquetWriter::new(file_path, metadata, source, object_store.clone()); - let info = writer.write_all(&write_opts).await.unwrap().unwrap(); + let mut writer = ParquetWriter::new(file_path, metadata, object_store.clone()); + let info = writer + .write_all(source, &write_opts) + .await + .unwrap() + .unwrap(); assert_eq!(200, info.num_rows); assert!(info.file_size > 0); assert_eq!( @@ -159,9 +163,12 @@ mod tests { ..Default::default() }; // Prepare data. - let mut writer = - ParquetWriter::new(file_path, metadata.clone(), source, object_store.clone()); - writer.write_all(&write_opts).await.unwrap().unwrap(); + let mut writer = ParquetWriter::new(file_path, metadata.clone(), object_store.clone()); + writer + .write_all(source, &write_opts) + .await + .unwrap() + .unwrap(); let cache = Some(Arc::new(CacheManager::new(0, 0, 64 * 1024 * 1024))); let builder = ParquetReaderBuilder::new(FILE_DIR.to_string(), handle.clone(), object_store) @@ -220,10 +227,9 @@ mod tests { // write the sst file and get sst info // sst info contains the parquet metadata, which is converted from FileMetaData - let mut writer = - ParquetWriter::new(file_path, metadata.clone(), source, object_store.clone()); + let mut writer = ParquetWriter::new(file_path, metadata.clone(), object_store.clone()); let sst_info = writer - .write_all(&write_opts) + .write_all(source, &write_opts) .await .unwrap() .expect("write_all should return sst info"); diff --git a/src/mito2/src/sst/parquet/writer.rs b/src/mito2/src/sst/parquet/writer.rs index febec27c0d36..5d8392b6d58d 100644 --- a/src/mito2/src/sst/parquet/writer.rs +++ b/src/mito2/src/sst/parquet/writer.rs @@ -36,8 +36,6 @@ use crate::sst::parquet::{SstInfo, WriteOptions, PARQUET_METADATA_KEY}; pub struct ParquetWriter { /// SST output file path. file_path: String, - /// Input data source. - source: Source, /// Region metadata of the source and the target SST. metadata: RegionMetadataRef, object_store: ObjectStore, @@ -48,12 +46,10 @@ impl ParquetWriter { pub fn new( file_path: String, metadata: RegionMetadataRef, - source: Source, object_store: ObjectStore, ) -> ParquetWriter { ParquetWriter { file_path, - source, metadata, object_store, } @@ -62,7 +58,11 @@ impl ParquetWriter { /// Iterates source and writes all rows to Parquet file. /// /// Returns the [SstInfo] if the SST is written. - pub async fn write_all(&mut self, opts: &WriteOptions) -> Result<Option<SstInfo>> { + pub async fn write_all( + &mut self, + mut source: Source, + opts: &WriteOptions, + ) -> Result<Option<SstInfo>> { let json = self.metadata.to_json().context(InvalidMetadataSnafu)?; let key_value_meta = KeyValue::new(PARQUET_METADATA_KEY.to_string(), json); @@ -88,7 +88,7 @@ impl ParquetWriter { .context(WriteBufferSnafu)?; let mut stats = SourceStats::default(); - while let Some(batch) = self.source.next_batch().await? { + while let Some(batch) = source.next_batch().await? { stats.update(&batch); let arrow_batch = write_format.convert_batch(&batch)?; diff --git a/src/mito2/src/test_util/scheduler_util.rs b/src/mito2/src/test_util/scheduler_util.rs index 611371a7ec0b..3cf69c8456ca 100644 --- a/src/mito2/src/test_util/scheduler_util.rs +++ b/src/mito2/src/test_util/scheduler_util.rs @@ -22,6 +22,7 @@ use object_store::ObjectStore; use tokio::sync::mpsc::Sender; use crate::access_layer::{AccessLayer, AccessLayerRef}; +use crate::cache::CacheManager; use crate::compaction::CompactionScheduler; use crate::flush::FlushScheduler; use crate::request::WorkerRequest; @@ -65,7 +66,11 @@ impl SchedulerEnv { ) -> CompactionScheduler { let scheduler = self.get_scheduler(); - CompactionScheduler::new(scheduler, request_sender) + CompactionScheduler::new( + scheduler, + request_sender, + Arc::new(CacheManager::new(0, 0, 0)), + ) } /// Creates a new flush scheduler. diff --git a/src/mito2/src/worker.rs b/src/mito2/src/worker.rs index c969fdd008c1..40bb14a401fd 100644 --- a/src/mito2/src/worker.rs +++ b/src/mito2/src/worker.rs @@ -286,7 +286,11 @@ impl<S: LogStore> WorkerStarter<S> { scheduler: self.scheduler.clone(), write_buffer_manager: self.write_buffer_manager, flush_scheduler: FlushScheduler::new(self.scheduler.clone()), - compaction_scheduler: CompactionScheduler::new(self.scheduler, sender.clone()), + compaction_scheduler: CompactionScheduler::new( + self.scheduler, + sender.clone(), + self.cache_manager.clone(), + ), stalled_requests: StalledRequests::default(), listener: self.listener, cache_manager: self.cache_manager, diff --git a/src/mito2/src/worker/handle_flush.rs b/src/mito2/src/worker/handle_flush.rs index 043cb60cc9d3..c913a7b67612 100644 --- a/src/mito2/src/worker/handle_flush.rs +++ b/src/mito2/src/worker/handle_flush.rs @@ -137,7 +137,6 @@ impl<S> RegionWorkerLoop<S> { row_group_size: Option<usize>, engine_config: Arc<MitoConfig>, ) -> RegionFlushTask { - // TODO(yingwen): metrics for flush requested. RegionFlushTask { region_id: region.region_id, reason, @@ -149,6 +148,7 @@ impl<S> RegionWorkerLoop<S> { listener: self.listener.clone(), engine_config, row_group_size, + cache_manager: self.cache_manager.clone(), } } }
feat
Add WriteCache struct and write SSTs to write cache (#2999)
0967678a518bd8dc4756c6a2c3169e0e62a47659
2023-08-16 07:23:11
dennis zhuang
feat: don't enable telemetry for debug building (#2177)
false
diff --git a/src/datanode/src/greptimedb_telemetry.rs b/src/datanode/src/greptimedb_telemetry.rs index e05be6c2ac89..a8043ea9a2d6 100644 --- a/src/datanode/src/greptimedb_telemetry.rs +++ b/src/datanode/src/greptimedb_telemetry.rs @@ -57,7 +57,7 @@ pub async fn get_greptimedb_telemetry_task( mode: &Mode, enable: bool, ) -> Arc<GreptimeDBTelemetryTask> { - if !enable || cfg!(test) { + if !enable || cfg!(test) || cfg!(debug_assertions) { return Arc::new(GreptimeDBTelemetryTask::disable()); } diff --git a/src/meta-srv/src/greptimedb_telemetry.rs b/src/meta-srv/src/greptimedb_telemetry.rs index ea0685a53fc3..043a8de4f9d5 100644 --- a/src/meta-srv/src/greptimedb_telemetry.rs +++ b/src/meta-srv/src/greptimedb_telemetry.rs @@ -60,7 +60,7 @@ pub async fn get_greptimedb_telemetry_task( meta_peer_client: MetaPeerClientRef, enable: bool, ) -> Arc<GreptimeDBTelemetryTask> { - if !enable || cfg!(test) { + if !enable || cfg!(test) || cfg!(debug_assertions) { return Arc::new(GreptimeDBTelemetryTask::disable()); }
feat
don't enable telemetry for debug building (#2177)
af0c4c068a544147616d3f0decf4365e6fb67b0f
2024-01-09 09:14:00
Ruihang Xia
feat: support PromQL function `vector` (#3036)
false
diff --git a/Cargo.lock b/Cargo.lock index 519f59c33dfc..a5e188f70c17 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6558,6 +6558,7 @@ dependencies = [ "common-catalog", "common-error", "common-macro", + "common-query", "common-recordbatch", "common-telemetry", "datafusion", diff --git a/src/common/query/src/prelude.rs b/src/common/query/src/prelude.rs index e65586e669e5..b334edb9030c 100644 --- a/src/common/query/src/prelude.rs +++ b/src/common/query/src/prelude.rs @@ -18,3 +18,10 @@ pub use crate::columnar_value::ColumnarValue; pub use crate::function::*; pub use crate::logical_plan::{create_udf, AggregateFunction, Expr, ScalarUdf}; pub use crate::signature::{Signature, TypeSignature, Volatility}; + +/// Default timestamp column name for Prometheus metrics. +pub const GREPTIME_TIMESTAMP: &str = "greptime_timestamp"; +/// Default value column name for Prometheus metrics. +pub const GREPTIME_VALUE: &str = "greptime_value"; +/// Default counter column name for OTLP metrics. +pub const GREPTIME_COUNT: &str = "greptime_count"; diff --git a/src/promql/Cargo.toml b/src/promql/Cargo.toml index 6be12de4e343..fc8533656746 100644 --- a/src/promql/Cargo.toml +++ b/src/promql/Cargo.toml @@ -13,6 +13,7 @@ catalog.workspace = true common-catalog.workspace = true common-error.workspace = true common-macro.workspace = true +common-query.workspace = true common-recordbatch.workspace = true common-telemetry.workspace = true datafusion.workspace = true diff --git a/src/promql/src/extension_plan/empty_metric.rs b/src/promql/src/extension_plan/empty_metric.rs index f4c33cf30f63..74d25d8ebb8f 100644 --- a/src/promql/src/extension_plan/empty_metric.rs +++ b/src/promql/src/extension_plan/empty_metric.rs @@ -104,8 +104,8 @@ impl EmptyMetric { .map(|expr| { physical_planner.create_physical_expr( expr, - &self.result_schema, - &ArrowSchema::from(self.result_schema.as_ref()), + &self.time_index_schema, + &ArrowSchema::from(self.time_index_schema.as_ref()), session_state, ) }) diff --git a/src/promql/src/planner.rs b/src/promql/src/planner.rs index 7c8176d7b95e..3f3a5bd07c8c 100644 --- a/src/promql/src/planner.rs +++ b/src/promql/src/planner.rs @@ -19,6 +19,7 @@ use std::time::UNIX_EPOCH; use async_recursion::async_recursion; use catalog::table_source::DfTableSourceProvider; +use common_query::prelude::GREPTIME_VALUE; use datafusion::common::{DFSchemaRef, OwnedTableReference, Result as DfResult}; use datafusion::datasource::DefaultTableSource; use datafusion::logical_expr::expr::{AggregateFunction, Alias, ScalarFunction, ScalarUDF}; @@ -36,8 +37,9 @@ use datatypes::arrow::datatypes::DataType as ArrowDataType; use promql_parser::label::{MatchOp, Matcher, Matchers, METRIC_NAME}; use promql_parser::parser::{ token, AggregateExpr, BinModifier, BinaryExpr as PromBinaryExpr, Call, EvalStmt, - Expr as PromExpr, Function, LabelModifier, MatrixSelector, NumberLiteral, Offset, ParenExpr, - StringLiteral, SubqueryExpr, TokenType, UnaryExpr, VectorMatchCardinality, VectorSelector, + Expr as PromExpr, Function, FunctionArgs as PromFunctionArgs, LabelModifier, MatrixSelector, + NumberLiteral, Offset, ParenExpr, StringLiteral, SubqueryExpr, TokenType, UnaryExpr, + VectorMatchCardinality, VectorSelector, }; use snafu::{ensure, OptionExt, ResultExt}; use table::table::adapter::DfTableProviderAdapter; @@ -63,6 +65,8 @@ use crate::functions::{ const SPECIAL_TIME_FUNCTION: &str = "time"; /// `histogram_quantile` function in PromQL const SPECIAL_HISTOGRAM_QUANTILE: &str = "histogram_quantile"; +/// `vector` function in PromQL +const SPECIAL_VECTOR_FUNCTION: &str = "vector"; /// `le` column for conventional histogram. const LE_COLUMN_NAME: &str = "le"; @@ -289,13 +293,13 @@ impl PromPlanner { let left_field_columns = self.ctx.field_columns.clone(); let left_table_ref: OwnedTableReference = self.ctx.table_name.clone().unwrap_or_default().into(); - let left_tag_cols = self.ctx.tag_columns.clone(); + let left_context = self.ctx.clone(); let right_input = self.prom_expr_to_plan(*rhs.clone()).await?; let right_field_columns = self.ctx.field_columns.clone(); let right_table_ref: OwnedTableReference = self.ctx.table_name.clone().unwrap_or_default().into(); - let right_tag_cols = self.ctx.tag_columns.clone(); + let right_context = self.ctx.clone(); // TODO(ruihang): avoid join if left and right are the same table @@ -304,8 +308,8 @@ impl PromPlanner { return self.set_op_on_non_field_columns( left_input, right_input, - left_tag_cols, - right_tag_cols, + left_context, + right_context, *op, modifier, ); @@ -463,57 +467,14 @@ impl PromPlanner { }) } PromExpr::Call(Call { func, args }) => { - if func.name == SPECIAL_HISTOGRAM_QUANTILE { - if args.args.len() != 2 { - return FunctionInvalidArgumentSnafu { - fn_name: SPECIAL_HISTOGRAM_QUANTILE.to_string(), - } - .fail(); - } - let phi = Self::try_build_float_literal(&args.args[0]).with_context(|| { - FunctionInvalidArgumentSnafu { - fn_name: SPECIAL_HISTOGRAM_QUANTILE.to_string(), - } - })?; - let input = args.args[1].as_ref().clone(); - let input_plan = self.prom_expr_to_plan(input).await?; - - if !self.ctx.has_le_tag() { - return ColumnNotFoundSnafu { - col: LE_COLUMN_NAME.to_string(), - } - .fail(); - } - let time_index_column = - self.ctx.time_index_column.clone().with_context(|| { - TimeIndexNotFoundSnafu { - table: self.ctx.table_name.clone().unwrap_or_default(), - } - })?; - // FIXME(ruihang): support multi fields - let field_column = self - .ctx - .field_columns - .first() - .with_context(|| FunctionInvalidArgumentSnafu { - fn_name: SPECIAL_HISTOGRAM_QUANTILE.to_string(), - })? - .clone(); - - return Ok(LogicalPlan::Extension(Extension { - node: Arc::new( - HistogramFold::new( - LE_COLUMN_NAME.to_string(), - field_column, - time_index_column, - phi, - input_plan, - ) - .context(DataFusionPlanningSnafu)?, - ), - })); + // some special functions that are not expression but a plan + match func.name { + SPECIAL_HISTOGRAM_QUANTILE => return self.create_histogram_plan(args).await, + SPECIAL_VECTOR_FUNCTION => return self.create_vector_plan(args).await, + _ => {} } + // transform function arguments let args = self.create_function_args(&args.args)?; let input = if let Some(prom_expr) = args.input { self.prom_expr_to_plan(prom_expr).await? @@ -1303,6 +1264,93 @@ impl PromPlanner { Ok(exprs) } + /// Create a [SPECIAL_HISTOGRAM_QUANTILE] plan. + async fn create_histogram_plan(&mut self, args: &PromFunctionArgs) -> Result<LogicalPlan> { + if args.args.len() != 2 { + return FunctionInvalidArgumentSnafu { + fn_name: SPECIAL_HISTOGRAM_QUANTILE.to_string(), + } + .fail(); + } + let phi = Self::try_build_float_literal(&args.args[0]).with_context(|| { + FunctionInvalidArgumentSnafu { + fn_name: SPECIAL_HISTOGRAM_QUANTILE.to_string(), + } + })?; + let input = args.args[1].as_ref().clone(); + let input_plan = self.prom_expr_to_plan(input).await?; + + if !self.ctx.has_le_tag() { + return ColumnNotFoundSnafu { + col: LE_COLUMN_NAME.to_string(), + } + .fail(); + } + let time_index_column = + self.ctx + .time_index_column + .clone() + .with_context(|| TimeIndexNotFoundSnafu { + table: self.ctx.table_name.clone().unwrap_or_default(), + })?; + // FIXME(ruihang): support multi fields + let field_column = self + .ctx + .field_columns + .first() + .with_context(|| FunctionInvalidArgumentSnafu { + fn_name: SPECIAL_HISTOGRAM_QUANTILE.to_string(), + })? + .clone(); + + Ok(LogicalPlan::Extension(Extension { + node: Arc::new( + HistogramFold::new( + LE_COLUMN_NAME.to_string(), + field_column, + time_index_column, + phi, + input_plan, + ) + .context(DataFusionPlanningSnafu)?, + ), + })) + } + + /// Create a [SPECIAL_VECTOR_FUNCTION] plan + async fn create_vector_plan(&mut self, args: &PromFunctionArgs) -> Result<LogicalPlan> { + if args.args.len() != 1 { + return FunctionInvalidArgumentSnafu { + fn_name: SPECIAL_VECTOR_FUNCTION.to_string(), + } + .fail(); + } + let lit = Self::try_build_float_literal(&args.args[0]).with_context(|| { + FunctionInvalidArgumentSnafu { + fn_name: SPECIAL_VECTOR_FUNCTION.to_string(), + } + })?; + + // reuse `SPECIAL_TIME_FUNCTION` as name of time index column + self.ctx.time_index_column = Some(SPECIAL_TIME_FUNCTION.to_string()); + self.ctx.table_name = Some(String::new()); + self.ctx.tag_columns = vec![]; + self.ctx.field_columns = vec![GREPTIME_VALUE.to_string()]; + Ok(LogicalPlan::Extension(Extension { + node: Arc::new( + EmptyMetric::new( + self.ctx.start, + self.ctx.end, + self.ctx.interval, + SPECIAL_TIME_FUNCTION.to_string(), + GREPTIME_VALUE.to_string(), + Some(DfExpr::Literal(ScalarValue::Float64(Some(lit)))), + ) + .context(DataFusionPlanningSnafu)?, + ), + })) + } + /// Try to build a DataFusion Literal Expression from PromQL Expr, return /// `None` if the input is not a literal expression. fn try_build_literal_expr(expr: &PromExpr) -> Option<DfExpr> { @@ -1491,19 +1539,35 @@ impl PromPlanner { /// Build a set operator (AND/OR/UNLESS) fn set_op_on_non_field_columns( - &self, + &mut self, left: LogicalPlan, right: LogicalPlan, - left_tag_cols: Vec<String>, - right_tag_cols: Vec<String>, + left_context: PromPlannerContext, + right_context: PromPlannerContext, op: TokenType, modifier: &Option<BinModifier>, ) -> Result<LogicalPlan> { - let mut left_tag_col_set = left_tag_cols.into_iter().collect::<HashSet<_>>(); - let mut right_tag_col_set = right_tag_cols.into_iter().collect::<HashSet<_>>(); + let mut left_tag_col_set = left_context + .tag_columns + .iter() + .cloned() + .collect::<HashSet<_>>(); + let mut right_tag_col_set = right_context + .tag_columns + .iter() + .cloned() + .collect::<HashSet<_>>(); if matches!(op.id(), token::T_LOR) { - return self.or_operator(left, right, left_tag_col_set, right_tag_col_set, modifier); + return self.or_operator( + left, + right, + left_tag_col_set, + right_tag_col_set, + left_context, + right_context, + modifier, + ); } // apply modifier @@ -1585,19 +1649,24 @@ impl PromPlanner { .build() .context(DataFusionPlanningSnafu), token::T_LOR => { - self.or_operator(left, right, left_tag_col_set, right_tag_col_set, modifier) + // OR is handled at the beginning of this function, as it cannot + // be expressed using JOIN like AND and UNLESS. + unreachable!() } _ => UnexpectedTokenSnafu { token: op }.fail(), } } // TODO(ruihang): change function name + #[allow(clippy::too_many_arguments)] fn or_operator( - &self, + &mut self, left: LogicalPlan, right: LogicalPlan, left_tag_cols_set: HashSet<String>, right_tag_cols_set: HashSet<String>, + left_context: PromPlannerContext, + right_context: PromPlannerContext, modifier: &Option<BinModifier>, ) -> Result<LogicalPlan> { // prepare hash sets @@ -1623,18 +1692,37 @@ impl PromPlanner { .as_ref() .map(|r| r.to_string()) .unwrap_or_default(); + let left_time_index_column = + left_context + .time_index_column + .clone() + .with_context(|| TimeIndexNotFoundSnafu { + table: left_qualifier_string.clone(), + })?; + let right_time_index_column = + right_context + .time_index_column + .clone() + .with_context(|| TimeIndexNotFoundSnafu { + table: right_qualifier_string.clone(), + })?; // step 0: fill all columns in output schema - let all_columns_set = left + let mut all_columns_set = left .schema() .fields() .iter() .chain(right.schema().fields().iter()) .map(|field| field.name().clone()) .collect::<HashSet<_>>(); + // remove time index column + all_columns_set.remove(&left_time_index_column); + all_columns_set.remove(&right_time_index_column); let mut all_columns = all_columns_set.into_iter().collect::<Vec<_>>(); // sort to ensure the generated schema is not volatile all_columns.sort_unstable(); + // use left time index column name as the result time index column name + all_columns.insert(0, left_time_index_column.clone()); // step 1: align schema using project, fill non-exist columns with null let left_proj_exprs = all_columns.iter().map(|col| { @@ -1644,13 +1732,21 @@ impl PromPlanner { DfExpr::Column(Column::new(left_qualifier.clone(), col)) } }); - let right_proj_exprs = all_columns.iter().map(|col| { + let right_time_index_expr = DfExpr::Column(Column::new( + right_qualifier.clone(), + right_time_index_column, + )) + .alias(left_time_index_column.clone()); + let right_proj_exprs_without_time_index = all_columns.iter().skip(1).map(|col| { if tags_not_in_right.contains(col) { DfExpr::Literal(ScalarValue::Utf8(None)).alias(col.to_string()) } else { DfExpr::Column(Column::new(right_qualifier.clone(), col)) } }); + let right_proj_exprs = [right_time_index_expr] + .into_iter() + .chain(right_proj_exprs_without_time_index); let left_projected = LogicalPlanBuilder::from(left) .project(left_proj_exprs) @@ -1691,13 +1787,17 @@ impl PromPlanner { left_projected, right_projected, match_columns, - self.ctx.time_index_column.clone().unwrap(), + left_time_index_column.clone(), schema, ); let result = LogicalPlan::Extension(Extension { node: Arc::new(union_distinct_on), }); + // step 4: update context + self.ctx.time_index_column = Some(left_time_index_column); + self.ctx.tag_columns = all_tags.into_iter().collect(); + Ok(result) } diff --git a/src/query/src/sql.rs b/src/query/src/sql.rs index 869cb883ef96..28e803f48e73 100644 --- a/src/query/src/sql.rs +++ b/src/query/src/sql.rs @@ -25,6 +25,7 @@ use common_datasource::file_format::{infer_schemas, FileFormat, Format}; use common_datasource::lister::{Lister, Source}; use common_datasource::object_store::build_backend; use common_datasource::util::find_dir_and_filename; +use common_query::prelude::GREPTIME_TIMESTAMP; use common_query::Output; use common_recordbatch::{RecordBatch, RecordBatches}; use common_time::Timestamp; @@ -57,8 +58,6 @@ const NULLABLE_YES: &str = "YES"; const NULLABLE_NO: &str = "NO"; const PRI_KEY: &str = "PRI"; -const GREPTIME_TIMESTAMP: &str = "greptime_timestamp"; - static DESCRIBE_TABLE_OUTPUT_SCHEMA: Lazy<Arc<Schema>> = Lazy::new(|| { Arc::new(Schema::new(vec![ ColumnSchema::new( diff --git a/src/servers/src/http/prometheus.rs b/src/servers/src/http/prometheus.rs index 2aae0b3a0009..ee8939744ff8 100644 --- a/src/servers/src/http/prometheus.rs +++ b/src/servers/src/http/prometheus.rs @@ -22,6 +22,7 @@ use common_catalog::consts::DEFAULT_SCHEMA_NAME; use common_catalog::parse_catalog_and_schema_from_db_string; use common_error::ext::ErrorExt; use common_error::status_code::StatusCode; +use common_query::prelude::{GREPTIME_TIMESTAMP, GREPTIME_VALUE}; use common_query::Output; use common_recordbatch::RecordBatches; use common_time::util::{current_time_rfc3339, yesterday_rfc3339}; @@ -43,7 +44,7 @@ use snafu::{Location, OptionExt, ResultExt}; use crate::error::{ CollectRecordbatchSnafu, Error, InternalSnafu, InvalidQuerySnafu, Result, UnexpectedResultSnafu, }; -use crate::prom_store::{FIELD_COLUMN_NAME, METRIC_NAME_LABEL, TIMESTAMP_COLUMN_NAME}; +use crate::prom_store::METRIC_NAME_LABEL; use crate::prometheus_handler::PrometheusHandlerRef; #[derive(Debug, Default, Serialize, Deserialize, JsonSchema, PartialEq)] @@ -499,8 +500,8 @@ pub async fn labels_query( } } - let _ = labels.remove(TIMESTAMP_COLUMN_NAME); - let _ = labels.remove(FIELD_COLUMN_NAME); + let _ = labels.remove(GREPTIME_TIMESTAMP); + let _ = labels.remove(GREPTIME_VALUE); let mut sorted_labels: Vec<String> = labels.into_iter().collect(); sorted_labels.sort(); diff --git a/src/servers/src/opentsdb.rs b/src/servers/src/opentsdb.rs index 07cde1e14765..8f0ee6300fab 100644 --- a/src/servers/src/opentsdb.rs +++ b/src/servers/src/opentsdb.rs @@ -22,6 +22,7 @@ use std::sync::Arc; use api::v1::RowInsertRequests; use async_trait::async_trait; +use common_query::prelude::{GREPTIME_TIMESTAMP, GREPTIME_VALUE}; use common_runtime::Runtime; use common_telemetry::logging::error; use futures::StreamExt; @@ -31,7 +32,6 @@ use self::codec::DataPoint; use crate::error::Result; use crate::opentsdb::connection::Connection; use crate::opentsdb::handler::Handler; -use crate::prom_store::{FIELD_COLUMN_NAME, TIMESTAMP_COLUMN_NAME}; use crate::query_handler::OpentsdbProtocolHandlerRef; use crate::row_writer::{self, MultiTableData}; use crate::server::{AbortableStream, BaseTcpServer, Server}; @@ -151,11 +151,11 @@ pub fn data_point_to_grpc_row_insert_requests( row_writer::write_tags(table_data, tags.into_iter(), &mut one_row)?; // value - row_writer::write_f64(table_data, FIELD_COLUMN_NAME, value, &mut one_row)?; + row_writer::write_f64(table_data, GREPTIME_VALUE, value, &mut one_row)?; // timestamp row_writer::write_ts_millis( table_data, - TIMESTAMP_COLUMN_NAME, + GREPTIME_TIMESTAMP, Some(timestamp), &mut one_row, )?; diff --git a/src/servers/src/opentsdb/codec.rs b/src/servers/src/opentsdb/codec.rs index 55e160460554..ae7b8354cec0 100644 --- a/src/servers/src/opentsdb/codec.rs +++ b/src/servers/src/opentsdb/codec.rs @@ -13,12 +13,10 @@ // limitations under the License. use api::v1::{column, Column, ColumnDataType, InsertRequest as GrpcInsertRequest, SemanticType}; +use common_query::prelude::{GREPTIME_TIMESTAMP, GREPTIME_VALUE}; use crate::error::{self, Result}; -pub const OPENTSDB_TIMESTAMP_COLUMN_NAME: &str = "greptime_timestamp"; -pub const OPENTSDB_FIELD_COLUMN_NAME: &str = "greptime_value"; - #[derive(Debug, Clone)] pub struct DataPoint { metric: String, @@ -131,7 +129,7 @@ impl DataPoint { let mut columns = Vec::with_capacity(2 + self.tags.len()); let ts_column = Column { - column_name: OPENTSDB_TIMESTAMP_COLUMN_NAME.to_string(), + column_name: GREPTIME_TIMESTAMP.to_string(), values: Some(column::Values { timestamp_millisecond_values: vec![self.ts_millis], ..Default::default() @@ -143,7 +141,7 @@ impl DataPoint { columns.push(ts_column); let field_column = Column { - column_name: OPENTSDB_FIELD_COLUMN_NAME.to_string(), + column_name: GREPTIME_VALUE.to_string(), values: Some(column::Values { f64_values: vec![self.value], ..Default::default() @@ -269,7 +267,7 @@ mod test { assert_eq!(row_count, 1); assert_eq!(columns.len(), 4); - assert_eq!(columns[0].column_name, OPENTSDB_TIMESTAMP_COLUMN_NAME); + assert_eq!(columns[0].column_name, GREPTIME_TIMESTAMP); assert_eq!( columns[0] .values @@ -279,7 +277,7 @@ mod test { vec![1000] ); - assert_eq!(columns[1].column_name, OPENTSDB_FIELD_COLUMN_NAME); + assert_eq!(columns[1].column_name, GREPTIME_VALUE); assert_eq!(columns[1].values.as_ref().unwrap().f64_values, vec![1.0]); assert_eq!(columns[2].column_name, "tagk1"); diff --git a/src/servers/src/otlp.rs b/src/servers/src/otlp.rs index 0f35a7b39ef0..5b92d12cb629 100644 --- a/src/servers/src/otlp.rs +++ b/src/servers/src/otlp.rs @@ -15,7 +15,3 @@ pub mod metrics; pub mod plugin; pub mod trace; - -const GREPTIME_TIMESTAMP: &str = "greptime_timestamp"; -const GREPTIME_VALUE: &str = "greptime_value"; -const GREPTIME_COUNT: &str = "greptime_count"; diff --git a/src/servers/src/otlp/metrics.rs b/src/servers/src/otlp/metrics.rs index cd7bbc7db81a..fd8dcfc3292c 100644 --- a/src/servers/src/otlp/metrics.rs +++ b/src/servers/src/otlp/metrics.rs @@ -14,11 +14,11 @@ use api::v1::{RowInsertRequests, Value}; use common_grpc::writer::Precision; +use common_query::prelude::{GREPTIME_COUNT, GREPTIME_TIMESTAMP, GREPTIME_VALUE}; use opentelemetry_proto::tonic::collector::metrics::v1::ExportMetricsServiceRequest; use opentelemetry_proto::tonic::common::v1::{any_value, KeyValue}; use opentelemetry_proto::tonic::metrics::v1::{metric, number_data_point, *}; -use super::{GREPTIME_COUNT, GREPTIME_TIMESTAMP, GREPTIME_VALUE}; use crate::error::Result; use crate::row_writer::{self, MultiTableData, TableData}; diff --git a/src/servers/src/otlp/trace.rs b/src/servers/src/otlp/trace.rs index 16de53f5656d..88a5d9b35074 100644 --- a/src/servers/src/otlp/trace.rs +++ b/src/servers/src/otlp/trace.rs @@ -15,10 +15,10 @@ use api::v1::value::ValueData; use api::v1::{ColumnDataType, RowInsertRequests}; use common_grpc::writer::Precision; +use common_query::prelude::{GREPTIME_TIMESTAMP, GREPTIME_VALUE}; use opentelemetry_proto::tonic::collector::trace::v1::ExportTraceServiceRequest; use self::span::{parse_span, TraceSpan, TraceSpans}; -use super::{GREPTIME_TIMESTAMP, GREPTIME_VALUE}; use crate::error::Result; use crate::row_writer::{self, MultiTableData, TableData}; diff --git a/src/servers/src/prom_store.rs b/src/servers/src/prom_store.rs index 15183e217a42..52591678b67e 100644 --- a/src/servers/src/prom_store.rs +++ b/src/servers/src/prom_store.rs @@ -21,6 +21,7 @@ use std::hash::{Hash, Hasher}; use api::prom_store::remote::label_matcher::Type as MatcherType; use api::prom_store::remote::{Label, Query, Sample, TimeSeries, WriteRequest}; use api::v1::RowInsertRequests; +use common_query::prelude::{GREPTIME_TIMESTAMP, GREPTIME_VALUE}; use common_recordbatch::{RecordBatch, RecordBatches}; use common_time::timestamp::TimeUnit; use datafusion::prelude::{col, lit, regexp_match, Expr}; @@ -35,8 +36,6 @@ use snap::raw::{Decoder, Encoder}; use crate::error::{self, Result}; use crate::row_writer::{self, MultiTableData}; -pub const TIMESTAMP_COLUMN_NAME: &str = "greptime_timestamp"; -pub const FIELD_COLUMN_NAME: &str = "greptime_value"; pub const METRIC_NAME_LABEL: &str = "__name__"; /// Metrics for push gateway protocol @@ -73,9 +72,8 @@ pub fn query_to_plan(dataframe: DataFrame, q: &Query) -> Result<LogicalPlan> { let mut conditions = Vec::with_capacity(label_matches.len() + 1); - conditions - .push(col(TIMESTAMP_COLUMN_NAME).gt_eq(lit_timestamp_millisecond(start_timestamp_ms))); - conditions.push(col(TIMESTAMP_COLUMN_NAME).lt_eq(lit_timestamp_millisecond(end_timestamp_ms))); + conditions.push(col(GREPTIME_TIMESTAMP).gt_eq(lit_timestamp_millisecond(start_timestamp_ms))); + conditions.push(col(GREPTIME_TIMESTAMP).lt_eq(lit_timestamp_millisecond(end_timestamp_ms))); for m in label_matches { let name = &m.name; @@ -204,9 +202,7 @@ fn collect_timeseries_ids(table_name: &str, recordbatch: &RecordBatch) -> Vec<Ti )); for (i, column_schema) in recordbatch.schema.column_schemas().iter().enumerate() { - if column_schema.name == FIELD_COLUMN_NAME - || column_schema.name == TIMESTAMP_COLUMN_NAME - { + if column_schema.name == GREPTIME_VALUE || column_schema.name == GREPTIME_TIMESTAMP { continue; } @@ -239,7 +235,7 @@ pub fn recordbatches_to_timeseries( } fn recordbatch_to_timeseries(table: &str, recordbatch: RecordBatch) -> Result<Vec<TimeSeries>> { - let ts_column = recordbatch.column_by_name(TIMESTAMP_COLUMN_NAME).context( + let ts_column = recordbatch.column_by_name(GREPTIME_TIMESTAMP).context( error::InvalidPromRemoteReadQueryResultSnafu { msg: "missing greptime_timestamp column in query result", }, @@ -254,7 +250,7 @@ fn recordbatch_to_timeseries(table: &str, recordbatch: RecordBatch) -> Result<Ve } ); - let field_column = recordbatch.column_by_name(FIELD_COLUMN_NAME).context( + let field_column = recordbatch.column_by_name(GREPTIME_VALUE).context( error::InvalidPromRemoteReadQueryResultSnafu { msg: "missing greptime_value column in query result", }, @@ -341,11 +337,11 @@ pub fn to_grpc_row_insert_requests(request: WriteRequest) -> Result<(RowInsertRe }); row_writer::write_tags(table_data, kvs, &mut one_row)?; // value - row_writer::write_f64(table_data, FIELD_COLUMN_NAME, *value, &mut one_row)?; + row_writer::write_f64(table_data, GREPTIME_VALUE, *value, &mut one_row)?; // timestamp row_writer::write_ts_millis( table_data, - TIMESTAMP_COLUMN_NAME, + GREPTIME_TIMESTAMP, Some(*timestamp), &mut one_row, )?; @@ -494,15 +490,11 @@ mod tests { let schema = Arc::new(Schema::new(vec![ ColumnSchema::new( - TIMESTAMP_COLUMN_NAME, + GREPTIME_TIMESTAMP, ConcreteDataType::timestamp_millisecond_datatype(), true, ), - ColumnSchema::new( - FIELD_COLUMN_NAME, - ConcreteDataType::float64_datatype(), - true, - ), + ColumnSchema::new(GREPTIME_VALUE, ConcreteDataType::float64_datatype(), true), ColumnSchema::new("instance", ConcreteDataType::string_datatype(), true), ColumnSchema::new("job", ConcreteDataType::string_datatype(), true), ])); @@ -701,15 +693,11 @@ mod tests { fn test_recordbatches_to_timeseries() { let schema = Arc::new(Schema::new(vec![ ColumnSchema::new( - TIMESTAMP_COLUMN_NAME, + GREPTIME_TIMESTAMP, ConcreteDataType::timestamp_millisecond_datatype(), true, ), - ColumnSchema::new( - FIELD_COLUMN_NAME, - ConcreteDataType::float64_datatype(), - true, - ), + ColumnSchema::new(GREPTIME_VALUE, ConcreteDataType::float64_datatype(), true), ColumnSchema::new("instance", ConcreteDataType::string_datatype(), true), ])); diff --git a/tests/cases/standalone/common/promql/set_operation.result b/tests/cases/standalone/common/promql/set_operation.result index 15a7a865a317..31a0a6638fd7 100644 --- a/tests/cases/standalone/common/promql/set_operation.result +++ b/tests/cases/standalone/common/promql/set_operation.result @@ -6,7 +6,7 @@ create table http_requests ( job string, instance string, g string, -- for `group` - val double, + greptime_value double, primary key (job, instance, g) ); @@ -32,7 +32,7 @@ Affected Rows: 0 create table vector_matching_a( ts timestamp time index, l string primary key, - val double, + greptime_value double, ); Affected Rows: 0 @@ -49,12 +49,12 @@ Affected Rows: 2 -- SQLNESS SORT_RESULT 3 1 tql eval (3000, 3000, '1s') http_requests{g="canary"} and http_requests{instance="0"}; -+---------------------+-----+----------+--------+-------+ -| ts | job | instance | g | val | -+---------------------+-----+----------+--------+-------+ -| 1970-01-01T00:50:00 | api | 0 | canary | 300.0 | -| 1970-01-01T00:50:00 | app | 0 | canary | 700.0 | -+---------------------+-----+----------+--------+-------+ ++---------------------+-----+----------+--------+----------------+ +| ts | job | instance | g | greptime_value | ++---------------------+-----+----------+--------+----------------+ +| 1970-01-01T00:50:00 | api | 0 | canary | 300.0 | +| 1970-01-01T00:50:00 | app | 0 | canary | 700.0 | ++---------------------+-----+----------+--------+----------------+ -- eval instant at 50m (http_requests{group="canary"} + 1) and http_requests{instance="0"} -- {group="canary", instance="0", job="api-server"} 301 @@ -62,12 +62,12 @@ tql eval (3000, 3000, '1s') http_requests{g="canary"} and http_requests{instance -- SQLNESS SORT_RESULT 3 1 tql eval (3000, 3000, '1s') (http_requests{g="canary"} + 1) and http_requests{instance="0"}; -+-----+----------+--------+---------------------+------------------+ -| job | instance | g | ts | val + Float64(1) | -+-----+----------+--------+---------------------+------------------+ -| api | 0 | canary | 1970-01-01T00:50:00 | 301.0 | -| app | 0 | canary | 1970-01-01T00:50:00 | 701.0 | -+-----+----------+--------+---------------------+------------------+ ++-----+----------+--------+---------------------+-----------------------------+ +| job | instance | g | ts | greptime_value + Float64(1) | ++-----+----------+--------+---------------------+-----------------------------+ +| api | 0 | canary | 1970-01-01T00:50:00 | 301.0 | +| app | 0 | canary | 1970-01-01T00:50:00 | 701.0 | ++-----+----------+--------+---------------------+-----------------------------+ -- eval instant at 50m (http_requests{group="canary"} + 1) and on(instance, job) http_requests{instance="0", group="production"} -- {group="canary", instance="0", job="api-server"} 301 @@ -75,12 +75,12 @@ tql eval (3000, 3000, '1s') (http_requests{g="canary"} + 1) and http_requests{in -- SQLNESS SORT_RESULT 3 1 tql eval (3000, 3000, '1s') (http_requests{g="canary"} + 1) and on(instance, job) http_requests{instance="0", g="production"}; -+-----+----------+--------+---------------------+------------------+ -| job | instance | g | ts | val + Float64(1) | -+-----+----------+--------+---------------------+------------------+ -| api | 0 | canary | 1970-01-01T00:50:00 | 301.0 | -| app | 0 | canary | 1970-01-01T00:50:00 | 701.0 | -+-----+----------+--------+---------------------+------------------+ ++-----+----------+--------+---------------------+-----------------------------+ +| job | instance | g | ts | greptime_value + Float64(1) | ++-----+----------+--------+---------------------+-----------------------------+ +| api | 0 | canary | 1970-01-01T00:50:00 | 301.0 | +| app | 0 | canary | 1970-01-01T00:50:00 | 701.0 | ++-----+----------+--------+---------------------+-----------------------------+ -- eval instant at 50m (http_requests{group="canary"} + 1) and on(instance) http_requests{instance="0", group="production"} -- {group="canary", instance="0", job="api-server"} 301 @@ -88,12 +88,12 @@ tql eval (3000, 3000, '1s') (http_requests{g="canary"} + 1) and on(instance, job -- SQLNESS SORT_RESULT 3 1 tql eval (3000, 3000, '1s') (http_requests{g="canary"} + 1) and on(instance) http_requests{instance="0", g="production"}; -+-----+----------+--------+---------------------+------------------+ -| job | instance | g | ts | val + Float64(1) | -+-----+----------+--------+---------------------+------------------+ -| api | 0 | canary | 1970-01-01T00:50:00 | 301.0 | -| app | 0 | canary | 1970-01-01T00:50:00 | 701.0 | -+-----+----------+--------+---------------------+------------------+ ++-----+----------+--------+---------------------+-----------------------------+ +| job | instance | g | ts | greptime_value + Float64(1) | ++-----+----------+--------+---------------------+-----------------------------+ +| api | 0 | canary | 1970-01-01T00:50:00 | 301.0 | +| app | 0 | canary | 1970-01-01T00:50:00 | 701.0 | ++-----+----------+--------+---------------------+-----------------------------+ -- eval instant at 50m (http_requests{group="canary"} + 1) and ignoring(group) http_requests{instance="0", group="production"} -- {group="canary", instance="0", job="api-server"} 301 @@ -101,12 +101,12 @@ tql eval (3000, 3000, '1s') (http_requests{g="canary"} + 1) and on(instance) htt -- SQLNESS SORT_RESULT 3 1 tql eval (3000, 3000, '1s') (http_requests{g="canary"} + 1) and ignoring(g) http_requests{instance="0", g="production"}; -+-----+----------+--------+---------------------+------------------+ -| job | instance | g | ts | val + Float64(1) | -+-----+----------+--------+---------------------+------------------+ -| api | 0 | canary | 1970-01-01T00:50:00 | 301.0 | -| app | 0 | canary | 1970-01-01T00:50:00 | 701.0 | -+-----+----------+--------+---------------------+------------------+ ++-----+----------+--------+---------------------+-----------------------------+ +| job | instance | g | ts | greptime_value + Float64(1) | ++-----+----------+--------+---------------------+-----------------------------+ +| api | 0 | canary | 1970-01-01T00:50:00 | 301.0 | +| app | 0 | canary | 1970-01-01T00:50:00 | 701.0 | ++-----+----------+--------+---------------------+-----------------------------+ -- eval instant at 50m (http_requests{group="canary"} + 1) and ignoring(group, job) http_requests{instance="0", group="production"} -- {group="canary", instance="0", job="api-server"} 301 @@ -114,12 +114,12 @@ tql eval (3000, 3000, '1s') (http_requests{g="canary"} + 1) and ignoring(g) http -- SQLNESS SORT_RESULT 3 1 tql eval (3000, 3000, '1s') (http_requests{g="canary"} + 1) and ignoring(g, job) http_requests{instance="0", g="production"}; -+-----+----------+--------+---------------------+------------------+ -| job | instance | g | ts | val + Float64(1) | -+-----+----------+--------+---------------------+------------------+ -| api | 0 | canary | 1970-01-01T00:50:00 | 301.0 | -| app | 0 | canary | 1970-01-01T00:50:00 | 701.0 | -+-----+----------+--------+---------------------+------------------+ ++-----+----------+--------+---------------------+-----------------------------+ +| job | instance | g | ts | greptime_value + Float64(1) | ++-----+----------+--------+---------------------+-----------------------------+ +| api | 0 | canary | 1970-01-01T00:50:00 | 301.0 | +| app | 0 | canary | 1970-01-01T00:50:00 | 701.0 | ++-----+----------+--------+---------------------+-----------------------------+ -- eval instant at 50m http_requests{group="canary"} or http_requests{group="production"} -- http_requests{group="canary", instance="0", job="api-server"} 300 @@ -133,18 +133,18 @@ tql eval (3000, 3000, '1s') (http_requests{g="canary"} + 1) and ignoring(g, job) -- SQLNESS SORT_RESULT 3 1 tql eval (3000, 3000, '1s') http_requests{g="canary"} or http_requests{g="production"}; -+------------+----------+-----+---------------------+-------+ -| g | instance | job | ts | val | -+------------+----------+-----+---------------------+-------+ -| canary | 0 | api | 1970-01-01T00:50:00 | 300.0 | -| canary | 0 | app | 1970-01-01T00:50:00 | 700.0 | -| canary | 1 | api | 1970-01-01T00:50:00 | 400.0 | -| canary | 1 | app | 1970-01-01T00:50:00 | 800.0 | -| production | 0 | api | 1970-01-01T00:50:00 | 100.0 | -| production | 0 | app | 1970-01-01T00:50:00 | 500.0 | -| production | 1 | api | 1970-01-01T00:50:00 | 200.0 | -| production | 1 | app | 1970-01-01T00:50:00 | 600.0 | -+------------+----------+-----+---------------------+-------+ ++---------------------+------------+----------------+----------+-----+ +| ts | g | greptime_value | instance | job | ++---------------------+------------+----------------+----------+-----+ +| 1970-01-01T00:50:00 | canary | 300.0 | 0 | api | +| 1970-01-01T00:50:00 | canary | 400.0 | 1 | api | +| 1970-01-01T00:50:00 | canary | 700.0 | 0 | app | +| 1970-01-01T00:50:00 | canary | 800.0 | 1 | app | +| 1970-01-01T00:50:00 | production | 100.0 | 0 | api | +| 1970-01-01T00:50:00 | production | 200.0 | 1 | api | +| 1970-01-01T00:50:00 | production | 500.0 | 0 | app | +| 1970-01-01T00:50:00 | production | 600.0 | 1 | app | ++---------------------+------------+----------------+----------+-----+ -- # On overlap the rhs samples must be dropped. -- eval instant at 50m (http_requests{group="canary"} + 1) or http_requests{instance="1"} @@ -157,7 +157,7 @@ tql eval (3000, 3000, '1s') http_requests{g="canary"} or http_requests{g="produc -- SQLNESS SORT_RESULT 3 1 tql eval (3000, 3000, '1s') (http_requests{g="canary"} + 1) or http_requests{instance="1"}; -Error: 1004(InvalidArguments), Internal error during building DataFusion plan: No field named http_requests.val. Valid fields are http_requests.job, http_requests.instance, http_requests.g, http_requests.ts, "val + Float64(1)". +Error: 1004(InvalidArguments), Internal error during building DataFusion plan: No field named http_requests.greptime_value. Valid fields are http_requests.job, http_requests.instance, http_requests.g, http_requests.ts, "greptime_value + Float64(1)". -- # Matching only on instance excludes everything that has instance=0/1 but includes -- # entries without the instance label. @@ -172,7 +172,7 @@ Error: 1004(InvalidArguments), Internal error during building DataFusion plan: N -- NOT SUPPORTED: `or` tql eval (3000, 3000, '1s') (http_requests{g="canary"} + 1) or on(instance) (http_requests or cpu_count or vector_matching_a); -Error: 1004(InvalidArguments), Internal error during building DataFusion plan: No field named cpu_count.val. Valid fields are cpu_count.ts. +Error: 1004(InvalidArguments), Internal error during building DataFusion plan: No field named cpu_count.greptime_value. Valid fields are cpu_count.ts. -- eval instant at 50m (http_requests{group="canary"} + 1) or ignoring(l, group, job) (http_requests or cpu_count or vector_matching_a) -- {group="canary", instance="0", job="api-server"} 301 @@ -185,7 +185,7 @@ Error: 1004(InvalidArguments), Internal error during building DataFusion plan: N -- NOT SUPPORTED: `or` tql eval (3000, 3000, '1s') (http_requests{g="canary"} + 1) or ignoring(l, g, job) (http_requests or cpu_count or vector_matching_a); -Error: 1004(InvalidArguments), Internal error during building DataFusion plan: No field named cpu_count.val. Valid fields are cpu_count.ts. +Error: 1004(InvalidArguments), Internal error during building DataFusion plan: No field named cpu_count.greptime_value. Valid fields are cpu_count.ts. -- eval instant at 50m http_requests{group="canary"} unless http_requests{instance="0"} -- http_requests{group="canary", instance="1", job="api-server"} 400 @@ -193,12 +193,12 @@ Error: 1004(InvalidArguments), Internal error during building DataFusion plan: N -- SQLNESS SORT_RESULT 3 1 tql eval (3000, 3000, '1s') http_requests{g="canary"} unless http_requests{instance="0"}; -+---------------------+-----+----------+--------+-------+ -| ts | job | instance | g | val | -+---------------------+-----+----------+--------+-------+ -| 1970-01-01T00:50:00 | api | 1 | canary | 400.0 | -| 1970-01-01T00:50:00 | app | 1 | canary | 800.0 | -+---------------------+-----+----------+--------+-------+ ++---------------------+-----+----------+--------+----------------+ +| ts | job | instance | g | greptime_value | ++---------------------+-----+----------+--------+----------------+ +| 1970-01-01T00:50:00 | api | 1 | canary | 400.0 | +| 1970-01-01T00:50:00 | app | 1 | canary | 800.0 | ++---------------------+-----+----------+--------+----------------+ -- eval instant at 50m http_requests{group="canary"} unless on(job) http_requests{instance="0"} tql eval (3000, 3000, '1s') http_requests{g="canary"} unless on(job) http_requests{instance="0"}; @@ -212,12 +212,12 @@ tql eval (3000, 3000, '1s') http_requests{g="canary"} unless on(job) http_reques -- SQLNESS SORT_RESULT 3 1 tql eval (3000, 3000, '1s') http_requests{g="canary"} unless on(job, instance) http_requests{instance="0"}; -+---------------------+-----+----------+--------+-------+ -| ts | job | instance | g | val | -+---------------------+-----+----------+--------+-------+ -| 1970-01-01T00:50:00 | api | 1 | canary | 400.0 | -| 1970-01-01T00:50:00 | app | 1 | canary | 800.0 | -+---------------------+-----+----------+--------+-------+ ++---------------------+-----+----------+--------+----------------+ +| ts | job | instance | g | greptime_value | ++---------------------+-----+----------+--------+----------------+ +| 1970-01-01T00:50:00 | api | 1 | canary | 400.0 | +| 1970-01-01T00:50:00 | app | 1 | canary | 800.0 | ++---------------------+-----+----------+--------+----------------+ -- eval instant at 50m http_requests{group="canary"} unless ignoring(group, instance) http_requests{instance="0"} tql eval (3000, 3000, '1s') http_requests{g="canary"} unless ignoring(g, instance) http_requests{instance="0"}; @@ -231,12 +231,12 @@ tql eval (3000, 3000, '1s') http_requests{g="canary"} unless ignoring(g, instanc -- SQLNESS SORT_RESULT 3 1 tql eval (3000, 3000, '1s') http_requests{g="canary"} unless ignoring(g) http_requests{instance="0"}; -+---------------------+-----+----------+--------+-------+ -| ts | job | instance | g | val | -+---------------------+-----+----------+--------+-------+ -| 1970-01-01T00:50:00 | api | 1 | canary | 400.0 | -| 1970-01-01T00:50:00 | app | 1 | canary | 800.0 | -+---------------------+-----+----------+--------+-------+ ++---------------------+-----+----------+--------+----------------+ +| ts | job | instance | g | greptime_value | ++---------------------+-----+----------+--------+----------------+ +| 1970-01-01T00:50:00 | api | 1 | canary | 400.0 | +| 1970-01-01T00:50:00 | app | 1 | canary | 800.0 | ++---------------------+-----+----------+--------+----------------+ -- # https://github.com/prometheus/prometheus/issues/1489 -- eval instant at 50m http_requests AND ON (dummy) vector(1) @@ -251,7 +251,7 @@ tql eval (3000, 3000, '1s') http_requests{g="canary"} unless ignoring(g) http_re -- NOT SUPPORTED: `vector()` tql eval (3000, 3000, '1s') http_requests AND ON (dummy) vector(1); -Error: 1004(InvalidArguments), Unsupported expr type: vector +Error: 1004(InvalidArguments), Internal error during building DataFusion plan: No field named time. Valid fields are http_requests.ts, http_requests.job, http_requests.instance, http_requests.g, http_requests.greptime_value. -- eval instant at 50m http_requests AND IGNORING (group, instance, job) vector(1) -- http_requests{group="canary", instance="0", job="api-server"} 300 @@ -265,7 +265,7 @@ Error: 1004(InvalidArguments), Unsupported expr type: vector -- NOT SUPPORTED: `vector()` tql eval (3000, 3000, '1s') http_requests AND IGNORING (g, instance, job) vector(1); -Error: 1004(InvalidArguments), Unsupported expr type: vector +Error: 1004(InvalidArguments), Internal error during building DataFusion plan: No field named time. Valid fields are http_requests.ts, http_requests.job, http_requests.instance, http_requests.g, http_requests.greptime_value. drop table http_requests; @@ -280,7 +280,7 @@ drop table vector_matching_a; Affected Rows: 0 -- the following cases are not from Prometheus. -create table t1 (ts timestamp time index, job string primary key, val double); +create table t1 (ts timestamp time index, job string primary key, greptime_value double); Affected Rows: 0 @@ -288,7 +288,7 @@ insert into t1 values (0, "a", 1.0), (500000, "b", 2.0), (1000000, "a", 3.0), (1 Affected Rows: 4 -create table t2 (ts timestamp time index, val double); +create table t2 (ts timestamp time index, greptime_value double); Affected Rows: 0 @@ -299,102 +299,102 @@ Affected Rows: 7 -- SQLNESS SORT_RESULT 3 1 tql eval (0, 2000, '400') t1 or t2; -+-----+---------------------+-----+ -| job | ts | val | -+-----+---------------------+-----+ -| | 1970-01-01T00:00:00 | 0.0 | -| | 1970-01-01T00:06:40 | 0.0 | -| | 1970-01-01T00:13:20 | 0.0 | -| | 1970-01-01T00:20:00 | 0.0 | -| | 1970-01-01T00:26:40 | 0.0 | -| | 1970-01-01T00:33:20 | 0.0 | -| a | 1970-01-01T00:00:00 | 1.0 | -| a | 1970-01-01T00:20:00 | 3.0 | -| b | 1970-01-01T00:13:20 | 2.0 | -| c | 1970-01-01T00:26:40 | 4.0 | -+-----+---------------------+-----+ ++---------------------+----------------+-----+ +| ts | greptime_value | job | ++---------------------+----------------+-----+ +| 1970-01-01T00:00:00 | 0.0 | | +| 1970-01-01T00:00:00 | 1.0 | a | +| 1970-01-01T00:06:40 | 0.0 | | +| 1970-01-01T00:13:20 | 0.0 | | +| 1970-01-01T00:13:20 | 2.0 | b | +| 1970-01-01T00:20:00 | 0.0 | | +| 1970-01-01T00:20:00 | 3.0 | a | +| 1970-01-01T00:26:40 | 0.0 | | +| 1970-01-01T00:26:40 | 4.0 | c | +| 1970-01-01T00:33:20 | 0.0 | | ++---------------------+----------------+-----+ -- SQLNESS SORT_RESULT 3 1 tql eval (0, 2000, '400') t1 or on () t2; -+-----+---------------------+-----+ -| job | ts | val | -+-----+---------------------+-----+ -| | 1970-01-01T00:06:40 | 0.0 | -| | 1970-01-01T00:33:20 | 0.0 | -| a | 1970-01-01T00:00:00 | 1.0 | -| a | 1970-01-01T00:20:00 | 3.0 | -| b | 1970-01-01T00:13:20 | 2.0 | -| c | 1970-01-01T00:26:40 | 4.0 | -+-----+---------------------+-----+ ++---------------------+----------------+-----+ +| ts | greptime_value | job | ++---------------------+----------------+-----+ +| 1970-01-01T00:00:00 | 1.0 | a | +| 1970-01-01T00:06:40 | 0.0 | | +| 1970-01-01T00:13:20 | 2.0 | b | +| 1970-01-01T00:20:00 | 3.0 | a | +| 1970-01-01T00:26:40 | 4.0 | c | +| 1970-01-01T00:33:20 | 0.0 | | ++---------------------+----------------+-----+ -- SQLNESS SORT_RESULT 3 1 tql eval (0, 2000, '400') t1 or on (job) t2; -+-----+---------------------+-----+ -| job | ts | val | -+-----+---------------------+-----+ -| | 1970-01-01T00:00:00 | 0.0 | -| | 1970-01-01T00:06:40 | 0.0 | -| | 1970-01-01T00:13:20 | 0.0 | -| | 1970-01-01T00:20:00 | 0.0 | -| | 1970-01-01T00:26:40 | 0.0 | -| | 1970-01-01T00:33:20 | 0.0 | -| a | 1970-01-01T00:00:00 | 1.0 | -| a | 1970-01-01T00:20:00 | 3.0 | -| b | 1970-01-01T00:13:20 | 2.0 | -| c | 1970-01-01T00:26:40 | 4.0 | -+-----+---------------------+-----+ ++---------------------+----------------+-----+ +| ts | greptime_value | job | ++---------------------+----------------+-----+ +| 1970-01-01T00:00:00 | 0.0 | | +| 1970-01-01T00:00:00 | 1.0 | a | +| 1970-01-01T00:06:40 | 0.0 | | +| 1970-01-01T00:13:20 | 0.0 | | +| 1970-01-01T00:13:20 | 2.0 | b | +| 1970-01-01T00:20:00 | 0.0 | | +| 1970-01-01T00:20:00 | 3.0 | a | +| 1970-01-01T00:26:40 | 0.0 | | +| 1970-01-01T00:26:40 | 4.0 | c | +| 1970-01-01T00:33:20 | 0.0 | | ++---------------------+----------------+-----+ -- SQLNESS SORT_RESULT 3 1 tql eval (0, 2000, '400') t2 or t1; -+-----+---------------------+-----+ -| job | ts | val | -+-----+---------------------+-----+ -| | 1970-01-01T00:00:00 | 0.0 | -| | 1970-01-01T00:06:40 | 0.0 | -| | 1970-01-01T00:13:20 | 0.0 | -| | 1970-01-01T00:20:00 | 0.0 | -| | 1970-01-01T00:26:40 | 0.0 | -| | 1970-01-01T00:33:20 | 0.0 | -| a | 1970-01-01T00:00:00 | 1.0 | -| a | 1970-01-01T00:20:00 | 3.0 | -| b | 1970-01-01T00:13:20 | 2.0 | -| c | 1970-01-01T00:26:40 | 4.0 | -+-----+---------------------+-----+ ++---------------------+----------------+-----+ +| ts | greptime_value | job | ++---------------------+----------------+-----+ +| 1970-01-01T00:00:00 | 0.0 | | +| 1970-01-01T00:00:00 | 1.0 | a | +| 1970-01-01T00:06:40 | 0.0 | | +| 1970-01-01T00:13:20 | 0.0 | | +| 1970-01-01T00:13:20 | 2.0 | b | +| 1970-01-01T00:20:00 | 0.0 | | +| 1970-01-01T00:20:00 | 3.0 | a | +| 1970-01-01T00:26:40 | 0.0 | | +| 1970-01-01T00:26:40 | 4.0 | c | +| 1970-01-01T00:33:20 | 0.0 | | ++---------------------+----------------+-----+ -- SQLNESS SORT_RESULT 3 1 tql eval (0, 2000, '400') t2 or on () t1; -+-----+---------------------+-----+ -| job | ts | val | -+-----+---------------------+-----+ -| | 1970-01-01T00:00:00 | 0.0 | -| | 1970-01-01T00:06:40 | 0.0 | -| | 1970-01-01T00:13:20 | 0.0 | -| | 1970-01-01T00:20:00 | 0.0 | -| | 1970-01-01T00:26:40 | 0.0 | -| | 1970-01-01T00:33:20 | 0.0 | -+-----+---------------------+-----+ ++---------------------+----------------+-----+ +| ts | greptime_value | job | ++---------------------+----------------+-----+ +| 1970-01-01T00:00:00 | 0.0 | | +| 1970-01-01T00:06:40 | 0.0 | | +| 1970-01-01T00:13:20 | 0.0 | | +| 1970-01-01T00:20:00 | 0.0 | | +| 1970-01-01T00:26:40 | 0.0 | | +| 1970-01-01T00:33:20 | 0.0 | | ++---------------------+----------------+-----+ -- SQLNESS SORT_RESULT 3 1 tql eval (0, 2000, '400') t2 or on(job) t1; -+-----+---------------------+-----+ -| job | ts | val | -+-----+---------------------+-----+ -| | 1970-01-01T00:00:00 | 0.0 | -| | 1970-01-01T00:06:40 | 0.0 | -| | 1970-01-01T00:13:20 | 0.0 | -| | 1970-01-01T00:20:00 | 0.0 | -| | 1970-01-01T00:26:40 | 0.0 | -| | 1970-01-01T00:33:20 | 0.0 | -| a | 1970-01-01T00:00:00 | 1.0 | -| a | 1970-01-01T00:20:00 | 3.0 | -| b | 1970-01-01T00:13:20 | 2.0 | -| c | 1970-01-01T00:26:40 | 4.0 | -+-----+---------------------+-----+ ++---------------------+----------------+-----+ +| ts | greptime_value | job | ++---------------------+----------------+-----+ +| 1970-01-01T00:00:00 | 0.0 | | +| 1970-01-01T00:00:00 | 1.0 | a | +| 1970-01-01T00:06:40 | 0.0 | | +| 1970-01-01T00:13:20 | 0.0 | | +| 1970-01-01T00:13:20 | 2.0 | b | +| 1970-01-01T00:20:00 | 0.0 | | +| 1970-01-01T00:20:00 | 3.0 | a | +| 1970-01-01T00:26:40 | 0.0 | | +| 1970-01-01T00:26:40 | 4.0 | c | +| 1970-01-01T00:33:20 | 0.0 | | ++---------------------+----------------+-----+ drop table t1; diff --git a/tests/cases/standalone/common/promql/set_operation.sql b/tests/cases/standalone/common/promql/set_operation.sql index 6a71711bd896..521791a02c4f 100644 --- a/tests/cases/standalone/common/promql/set_operation.sql +++ b/tests/cases/standalone/common/promql/set_operation.sql @@ -7,7 +7,7 @@ create table http_requests ( job string, instance string, g string, -- for `group` - val double, + greptime_value double, primary key (job, instance, g) ); @@ -27,7 +27,7 @@ create table cpu_count(ts timestamp time index); create table vector_matching_a( ts timestamp time index, l string primary key, - val double, + greptime_value double, ); insert into vector_matching_a values @@ -176,11 +176,11 @@ drop table vector_matching_a; -- the following cases are not from Prometheus. -create table t1 (ts timestamp time index, job string primary key, val double); +create table t1 (ts timestamp time index, job string primary key, greptime_value double); insert into t1 values (0, "a", 1.0), (500000, "b", 2.0), (1000000, "a", 3.0), (1500000, "c", 4.0); -create table t2 (ts timestamp time index, val double); +create table t2 (ts timestamp time index, greptime_value double); insert into t2 values (0, 0), (300000, 0), (600000, 0), (900000, 0), (1200000, 0), (1500000, 0), (1800000, 0);
feat
support PromQL function `vector` (#3036)
d094f488224ba26b077f7184785f8642bbc3f353
2025-02-11 11:49:58
ZonaHe
feat: update dashboard to v0.7.9 (#5508)
false
diff --git a/src/servers/dashboard/VERSION b/src/servers/dashboard/VERSION index 07c71e560c31..a502cc623bae 100644 --- a/src/servers/dashboard/VERSION +++ b/src/servers/dashboard/VERSION @@ -1 +1 @@ -v0.7.9-rc +v0.7.9
feat
update dashboard to v0.7.9 (#5508)
cefdffff099435bc5d8fc442b90efbaa3d3f4f33
2022-11-10 09:05:16
Yingwen
fix: CURRENT_TIMESTAMP supports int64 type (#436)
false
diff --git a/src/datanode/src/error.rs b/src/datanode/src/error.rs index c2c4adacda11..b8ec95b9be78 100644 --- a/src/datanode/src/error.rs +++ b/src/datanode/src/error.rs @@ -92,6 +92,7 @@ pub enum Error { #[snafu(display("Failed to insert value to table: {}, source: {}", table_name, source))] Insert { table_name: String, + #[snafu(backtrace)] source: TableError, }, diff --git a/src/datanode/src/tests/instance_test.rs b/src/datanode/src/tests/instance_test.rs index 4bbaf9d14d73..c542c4cd35ef 100644 --- a/src/datanode/src/tests/instance_test.rs +++ b/src/datanode/src/tests/instance_test.rs @@ -1,6 +1,8 @@ use arrow::array::{Int64Array, UInt64Array}; use common_query::Output; use common_recordbatch::util; +use datafusion::arrow_print; +use datafusion_common::record_batch::RecordBatch as DfRecordBatch; use datatypes::arrow_array::StringArray; use datatypes::prelude::ConcreteDataType; @@ -240,12 +242,24 @@ pub async fn test_create_table_illegal_timestamp_type() { } } +async fn check_output_stream(output: Output, expected: Vec<&str>) { + match output { + Output::Stream(stream) => { + let recordbatches = util::collect(stream).await.unwrap(); + let recordbatch = recordbatches + .into_iter() + .map(|r| r.df_recordbatch) + .collect::<Vec<DfRecordBatch>>(); + let pretty_print = arrow_print::write(&recordbatch); + let pretty_print = pretty_print.lines().collect::<Vec<&str>>(); + assert_eq!(pretty_print, expected); + } + _ => unreachable!(), + } +} + #[tokio::test] async fn test_alter_table() { - use datafusion::arrow_print; - use datafusion_common::record_batch::RecordBatch as DfRecordBatch; - // TODO(LFC) Use real Mito engine when we can alter its region schema, - // and delete the `new_mock` method. let instance = Instance::new_mock().await.unwrap(); instance.start().await.unwrap(); @@ -278,26 +292,69 @@ async fn test_alter_table() { assert!(matches!(output, Output::AffectedRows(1))); let output = instance.execute_sql("select * from demo").await.unwrap(); - match output { - Output::Stream(stream) => { - let recordbatches = util::collect(stream).await.unwrap(); - let recordbatch = recordbatches - .into_iter() - .map(|r| r.df_recordbatch) - .collect::<Vec<DfRecordBatch>>(); - let pretty_print = arrow_print::write(&recordbatch); - let pretty_print = pretty_print.lines().collect::<Vec<&str>>(); - let expected = vec![ - "+-------+-----+--------+---------------------+--------+", - "| host | cpu | memory | ts | my_tag |", - "+-------+-----+--------+---------------------+--------+", - "| host1 | 1.1 | 100 | 1970-01-01 00:00:01 | |", - "| host2 | 2.2 | 200 | 1970-01-01 00:00:02 | hello |", - "| host3 | 3.3 | 300 | 1970-01-01 00:00:03 | |", - "+-------+-----+--------+---------------------+--------+", - ]; - assert_eq!(pretty_print, expected); - } - _ => unreachable!(), - } + let expected = vec![ + "+-------+-----+--------+---------------------+--------+", + "| host | cpu | memory | ts | my_tag |", + "+-------+-----+--------+---------------------+--------+", + "| host1 | 1.1 | 100 | 1970-01-01 00:00:01 | |", + "| host2 | 2.2 | 200 | 1970-01-01 00:00:02 | hello |", + "| host3 | 3.3 | 300 | 1970-01-01 00:00:03 | |", + "+-------+-----+--------+---------------------+--------+", + ]; + check_output_stream(output, expected).await; +} + +async fn test_insert_with_default_value_for_type(type_name: &str) { + let (opts, _guard) = test_util::create_tmp_dir_and_datanode_opts("execute_create"); + let instance = Instance::with_mock_meta_client(&opts).await.unwrap(); + instance.start().await.unwrap(); + + let create_sql = format!( + r#"create table test_table( + host string, + ts {} DEFAULT CURRENT_TIMESTAMP, + cpu double default 0, + TIME INDEX (ts), + PRIMARY KEY(host) + ) engine=mito with(regions=1);"#, + type_name + ); + let output = instance.execute_sql(&create_sql).await.unwrap(); + assert!(matches!(output, Output::AffectedRows(1))); + + // Insert with ts. + instance + .execute_sql("insert into test_table(host, cpu, ts) values ('host1', 1.1, 1000)") + .await + .unwrap(); + assert!(matches!(output, Output::AffectedRows(1))); + + // Insert without ts, so it should be filled by default value. + let output = instance + .execute_sql("insert into test_table(host, cpu) values ('host2', 2.2)") + .await + .unwrap(); + assert!(matches!(output, Output::AffectedRows(1))); + + let output = instance + .execute_sql("select host, cpu from test_table") + .await + .unwrap(); + let expected = vec![ + "+-------+-----+", + "| host | cpu |", + "+-------+-----+", + "| host1 | 1.1 |", + "| host2 | 2.2 |", + "+-------+-----+", + ]; + check_output_stream(output, expected).await; +} + +#[tokio::test(flavor = "multi_thread")] +async fn test_insert_with_default_value() { + common_telemetry::init_default_ut_logging(); + + test_insert_with_default_value_for_type("timestamp").await; + test_insert_with_default_value_for_type("bigint").await; } diff --git a/src/datatypes/src/schema/constraint.rs b/src/datatypes/src/schema/constraint.rs index 8f54312d121e..50d588033934 100644 --- a/src/datatypes/src/schema/constraint.rs +++ b/src/datatypes/src/schema/constraint.rs @@ -1,14 +1,13 @@ use std::sync::Arc; -use common_time::{util, Timestamp}; +use common_time::util; use serde::{Deserialize, Serialize}; use snafu::{ensure, ResultExt}; use crate::data_type::{ConcreteDataType, DataType}; use crate::error::{self, Result}; -use crate::scalars::ScalarVector; use crate::value::Value; -use crate::vectors::{ConstantVector, TimestampVector, VectorRef}; +use crate::vectors::{Int64Vector, TimestampVector, VectorRef}; const CURRENT_TIMESTAMP: &str = "current_timestamp()"; @@ -107,15 +106,7 @@ impl ColumnDefaultConstraint { match &expr[..] { // TODO(dennis): we only supports current_timestamp right now, // it's better to use a expression framework in future. - CURRENT_TIMESTAMP => { - // TODO(yingwen): We should coerce the type to the physical type of - // input `data_type`. - let vector = - Arc::new(TimestampVector::from_slice(&[Timestamp::from_millis( - util::current_time_millis(), - )])); - Ok(Arc::new(ConstantVector::new(vector, num_rows))) - } + CURRENT_TIMESTAMP => create_current_timestamp_vector(data_type, num_rows), _ => error::UnsupportedDefaultExprSnafu { expr }.fail(), } } @@ -143,9 +134,31 @@ impl ColumnDefaultConstraint { } } +fn create_current_timestamp_vector( + data_type: &ConcreteDataType, + num_rows: usize, +) -> Result<VectorRef> { + match data_type { + ConcreteDataType::Timestamp(_) => Ok(Arc::new(TimestampVector::from_values( + std::iter::repeat(util::current_time_millis()).take(num_rows), + ))), + ConcreteDataType::Int64(_) => Ok(Arc::new(Int64Vector::from_values( + std::iter::repeat(util::current_time_millis()).take(num_rows), + ))), + _ => error::DefaultValueTypeSnafu { + reason: format!( + "Not support to assign current timestamp to {:?} type", + data_type + ), + } + .fail(), + } +} + #[cfg(test)] mod tests { use super::*; + use crate::error::Error; use crate::vectors::Int32Vector; #[test] @@ -224,6 +237,7 @@ mod tests { #[test] fn test_create_default_vector_by_func() { let constraint = ColumnDefaultConstraint::Function(CURRENT_TIMESTAMP.to_string()); + // Timestamp type. let data_type = ConcreteDataType::timestamp_millis_datatype(); let v = constraint .create_default_vector(&data_type, false, 4) @@ -235,10 +249,32 @@ mod tests { v.get(0) ); + // Int64 type. + let data_type = ConcreteDataType::int64_datatype(); + let v = constraint + .create_default_vector(&data_type, false, 4) + .unwrap(); + assert_eq!(4, v.len()); + assert!( + matches!(v.get(0), Value::Int64(_)), + "v {:?} is not timestamp", + v.get(0) + ); + let constraint = ColumnDefaultConstraint::Function("no".to_string()); let data_type = ConcreteDataType::timestamp_millis_datatype(); constraint .create_default_vector(&data_type, false, 4) .unwrap_err(); } + + #[test] + fn test_create_by_func_and_invalid_type() { + let constraint = ColumnDefaultConstraint::Function(CURRENT_TIMESTAMP.to_string()); + let data_type = ConcreteDataType::boolean_datatype(); + let err = constraint + .create_default_vector(&data_type, false, 4) + .unwrap_err(); + assert!(matches!(err, Error::DefaultValueType { .. }), "{:?}", err); + } }
fix
CURRENT_TIMESTAMP supports int64 type (#436)
d9175213fdf46c475d8a9f3f0093027e0c0e8eb6
2024-07-04 13:08:00
discord9
chore: add missing s for `--metasrv-addr` (#4278)
false
diff --git a/docker/docker-compose/cluster-with-etcd.yaml b/docker/docker-compose/cluster-with-etcd.yaml index 9ef97e135f77..833d16883e40 100644 --- a/docker/docker-compose/cluster-with-etcd.yaml +++ b/docker/docker-compose/cluster-with-etcd.yaml @@ -66,7 +66,7 @@ services: - --node-id=0 - --rpc-addr=0.0.0.0:3001 - --rpc-hostname=datanode0:3001 - - --metasrv-addr=metasrv:3002 + - --metasrv-addrs=metasrv:3002 volumes: - /tmp/greptimedb-cluster-docker-compose/datanode0:/tmp/greptimedb depends_on:
chore
add missing s for `--metasrv-addr` (#4278)
4f0984c1d7ac3350d568a7a8d6bd04247e27a5a9
2024-07-05 13:33:46
Weny Xu
chore: remove original region failover implementation (#4237)
false
diff --git a/src/meta-srv/src/lock.rs b/src/meta-srv/src/lock.rs index fb607b192bfe..5eceddac0485 100644 --- a/src/meta-srv/src/lock.rs +++ b/src/meta-srv/src/lock.rs @@ -13,7 +13,6 @@ // limitations under the License. pub mod etcd; -pub(crate) mod keys; pub(crate) mod memory; use std::sync::Arc; diff --git a/src/meta-srv/src/lock/keys.rs b/src/meta-srv/src/lock/keys.rs deleted file mode 100644 index db3f5d81282a..000000000000 --- a/src/meta-srv/src/lock/keys.rs +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2023 Greptime Team -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! All keys used for distributed locking in the Metasrv. -//! Place them in this unified module for better maintenance. - -use common_meta::RegionIdent; - -use crate::lock::Key; - -pub(crate) fn table_metadata_lock_key(region: &RegionIdent) -> Key { - format!( - "table_metadata_lock_({}-{})", - region.cluster_id, region.table_id, - ) - .into_bytes() -} diff --git a/src/meta-srv/src/procedure.rs b/src/meta-srv/src/procedure.rs index dbe63b762c9f..1f430654d224 100644 --- a/src/meta-srv/src/procedure.rs +++ b/src/meta-srv/src/procedure.rs @@ -12,9 +12,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -// TODO(weny): remove it. -#[allow(unused)] -pub mod region_failover; pub mod region_migration; #[cfg(test)] mod tests; diff --git a/src/meta-srv/src/procedure/region_failover.rs b/src/meta-srv/src/procedure/region_failover.rs deleted file mode 100644 index 8e0e1424a6b1..000000000000 --- a/src/meta-srv/src/procedure/region_failover.rs +++ /dev/null @@ -1,844 +0,0 @@ -// Copyright 2023 Greptime Team -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -mod activate_region; -mod deactivate_region; -mod failover_end; -mod failover_start; -mod invalidate_cache; -mod update_metadata; - -use std::collections::HashSet; -use std::fmt::Debug; -use std::sync::{Arc, RwLock}; -use std::time::Duration; - -use async_trait::async_trait; -use common_meta::key::datanode_table::DatanodeTableKey; -use common_meta::key::{TableMetadataManagerRef, MAINTENANCE_KEY}; -use common_meta::kv_backend::{KvBackendRef, ResettableKvBackendRef}; -use common_meta::lock_key::{CatalogLock, RegionLock, SchemaLock, TableLock}; -use common_meta::{ClusterId, RegionIdent}; -use common_procedure::error::{ - Error as ProcedureError, FromJsonSnafu, Result as ProcedureResult, ToJsonSnafu, -}; -use common_procedure::{ - watcher, Context as ProcedureContext, LockKey, Procedure, ProcedureManagerRef, ProcedureWithId, - Status, -}; -use common_telemetry::{error, info, warn}; -use failover_start::RegionFailoverStart; -use serde::{Deserialize, Serialize}; -use snafu::ResultExt; -use store_api::storage::{RegionId, RegionNumber}; -use table::metadata::TableId; -use table::table_name::TableName; - -use crate::error::{ - self, KvBackendSnafu, RegisterProcedureLoaderSnafu, Result, TableMetadataManagerSnafu, -}; -use crate::lock::DistLockRef; -use crate::metasrv::{SelectorContext, SelectorRef}; -use crate::service::mailbox::MailboxRef; - -const OPEN_REGION_MESSAGE_TIMEOUT: Duration = Duration::from_secs(30); - -/// A key for the preventing running multiple failover procedures for the same region. -#[derive(PartialEq, Eq, Hash, Clone)] -pub(crate) struct RegionFailoverKey { - pub(crate) cluster_id: ClusterId, - pub(crate) table_id: TableId, - pub(crate) region_number: RegionNumber, -} - -impl From<RegionIdent> for RegionFailoverKey { - fn from(region_ident: RegionIdent) -> Self { - Self { - cluster_id: region_ident.cluster_id, - table_id: region_ident.table_id, - region_number: region_ident.region_number, - } - } -} - -pub(crate) struct RegionFailoverManager { - region_lease_secs: u64, - in_memory: ResettableKvBackendRef, - kv_backend: KvBackendRef, - mailbox: MailboxRef, - procedure_manager: ProcedureManagerRef, - selector: SelectorRef, - selector_ctx: SelectorContext, - dist_lock: DistLockRef, - running_procedures: Arc<RwLock<HashSet<RegionFailoverKey>>>, - table_metadata_manager: TableMetadataManagerRef, -} - -struct FailoverProcedureGuard { - running_procedures: Arc<RwLock<HashSet<RegionFailoverKey>>>, - key: RegionFailoverKey, -} - -impl Drop for FailoverProcedureGuard { - fn drop(&mut self) { - let _ = self.running_procedures.write().unwrap().remove(&self.key); - } -} - -impl RegionFailoverManager { - #[allow(clippy::too_many_arguments)] - pub(crate) fn new( - region_lease_secs: u64, - in_memory: ResettableKvBackendRef, - kv_backend: KvBackendRef, - mailbox: MailboxRef, - procedure_manager: ProcedureManagerRef, - (selector, selector_ctx): (SelectorRef, SelectorContext), - dist_lock: DistLockRef, - table_metadata_manager: TableMetadataManagerRef, - ) -> Self { - Self { - region_lease_secs, - in_memory, - kv_backend, - mailbox, - procedure_manager, - selector, - selector_ctx, - dist_lock, - running_procedures: Arc::new(RwLock::new(HashSet::new())), - table_metadata_manager, - } - } - - pub(crate) fn create_context(&self) -> RegionFailoverContext { - RegionFailoverContext { - region_lease_secs: self.region_lease_secs, - in_memory: self.in_memory.clone(), - kv_backend: self.kv_backend.clone(), - mailbox: self.mailbox.clone(), - selector: self.selector.clone(), - selector_ctx: self.selector_ctx.clone(), - dist_lock: self.dist_lock.clone(), - table_metadata_manager: self.table_metadata_manager.clone(), - } - } - - pub(crate) fn try_start(&self) -> Result<()> { - let context = self.create_context(); - self.procedure_manager - .register_loader( - RegionFailoverProcedure::TYPE_NAME, - Box::new(move |json| { - let context = context.clone(); - RegionFailoverProcedure::from_json(json, context).map(|p| Box::new(p) as _) - }), - ) - .context(RegisterProcedureLoaderSnafu { - type_name: RegionFailoverProcedure::TYPE_NAME, - }) - } - - fn insert_running_procedures( - &self, - failed_region: &RegionIdent, - ) -> Option<FailoverProcedureGuard> { - let key = RegionFailoverKey::from(failed_region.clone()); - let mut procedures = self.running_procedures.write().unwrap(); - if procedures.insert(key.clone()) { - Some(FailoverProcedureGuard { - running_procedures: self.running_procedures.clone(), - key, - }) - } else { - None - } - } - - pub(crate) async fn is_maintenance_mode(&self) -> Result<bool> { - self.kv_backend - .exists(MAINTENANCE_KEY.as_bytes()) - .await - .context(KvBackendSnafu) - } - - pub(crate) async fn do_region_failover(&self, failed_region: &RegionIdent) -> Result<()> { - let Some(guard) = self.insert_running_procedures(failed_region) else { - warn!("Region failover procedure for region {failed_region} is already running!"); - return Ok(()); - }; - - let table_info = self - .table_metadata_manager - .table_info_manager() - .get(failed_region.table_id) - .await - .context(error::TableMetadataManagerSnafu)?; - - if table_info.is_none() { - // The table could be dropped before the failure detector knows it. Then the region - // failover is not needed. - // Or the table could be renamed. But we will have a new region ident to detect failure. - // So the region failover here is not needed either. - return Ok(()); - } - - if !self.failed_region_exists(failed_region).await? { - // The failed region could be failover by another procedure. - return Ok(()); - } - - let context = self.create_context(); - // Safety: Check before. - let table_info = table_info.unwrap(); - let TableName { - catalog_name, - schema_name, - .. - } = table_info.table_name(); - let procedure = - RegionFailoverProcedure::new(catalog_name, schema_name, failed_region.clone(), context); - let procedure_with_id = ProcedureWithId::with_random_id(Box::new(procedure)); - let procedure_id = procedure_with_id.id; - info!("Starting region failover procedure {procedure_id} for region {failed_region:?}"); - - let procedure_manager = self.procedure_manager.clone(); - let failed_region = failed_region.clone(); - let _handle = common_runtime::spawn_bg(async move { - let _ = guard; - - let watcher = &mut match procedure_manager.submit(procedure_with_id).await { - Ok(watcher) => watcher, - Err(e) => { - error!(e; "Failed to submit region failover procedure {procedure_id} for region {failed_region}"); - return; - } - }; - - if let Err(e) = watcher::wait(watcher).await { - error!(e; "Failed to wait region failover procedure {procedure_id} for region {failed_region}"); - return; - } - - info!("Region failover procedure {procedure_id} for region {failed_region} is finished successfully!"); - }); - Ok(()) - } - - async fn failed_region_exists(&self, failed_region: &RegionIdent) -> Result<bool> { - let table_id = failed_region.table_id; - let datanode_id = failed_region.datanode_id; - - let value = self - .table_metadata_manager - .datanode_table_manager() - .get(&DatanodeTableKey::new(datanode_id, table_id)) - .await - .context(TableMetadataManagerSnafu)?; - - Ok(value - .map(|value| { - value - .regions - .iter() - .any(|region| *region == failed_region.region_number) - }) - .unwrap_or_default()) - } -} - -#[derive(Serialize, Deserialize, Debug)] -struct LockMeta { - catalog: String, - schema: String, -} - -/// A "Node" in the state machine of region failover procedure. -/// Contains the current state and the data. -#[derive(Serialize, Deserialize, Debug)] -struct Node { - lock_meta: LockMeta, - failed_region: RegionIdent, - state: Box<dyn State>, -} - -/// The "Context" of region failover procedure state machine. -#[derive(Clone)] -pub struct RegionFailoverContext { - pub region_lease_secs: u64, - pub in_memory: ResettableKvBackendRef, - pub kv_backend: KvBackendRef, - pub mailbox: MailboxRef, - pub selector: SelectorRef, - pub selector_ctx: SelectorContext, - pub dist_lock: DistLockRef, - pub table_metadata_manager: TableMetadataManagerRef, -} - -/// The state machine of region failover procedure. Driven by the call to `next`. -#[async_trait] -#[typetag::serde(tag = "region_failover_state")] -trait State: Sync + Send + Debug { - async fn next( - &mut self, - ctx: &RegionFailoverContext, - failed_region: &RegionIdent, - ) -> Result<Box<dyn State>>; - - fn status(&self) -> Status { - Status::executing(true) - } -} - -/// The states transition of region failover procedure: -/// -/// ```text -/// ┌───────────────────┐ -/// │RegionFailoverStart│ -/// └─────────┬─────────┘ -/// │ -/// │ Selects a candidate(Datanode) -/// ┌─────────┐ │ to place the failed region -/// │ │ │ -/// If replied with │ ┌───▼────▼───────┐ -/// "Close region │ │DeactivateRegion│ -/// failed" │ └───┬────┬───────┘ -/// │ │ │ -/// └─────────┘ │ Sends "Close Region" request -/// │ to the failed Datanode, and -/// | wait for the Region lease expiry -/// ┌─────────┐ │ seconds -/// │ │ │ -/// │ ┌──▼────▼──────┐ -/// Wait candidate │ │ActivateRegion◄───────────────────────┐ -/// response timeout │ └──┬────┬──────┘ │ -/// │ │ │ │ -/// └─────────┘ │ Sends "Open Region" request │ -/// │ to the candidate Datanode, │ -/// │ and wait for 30 seconds │ -/// │ │ -/// │ Check Datanode returns │ -/// │ │ -/// success ├──────────────────────────────┘ -/// │ failed -/// ┌─────────▼──────────┐ -/// │UpdateRegionMetadata│ -/// └─────────┬──────────┘ -/// │ -/// │ Updates the Region -/// │ placement metadata -/// │ -/// ┌───────▼───────┐ -/// │InvalidateCache│ -/// └───────┬───────┘ -/// │ -/// │ Broadcast Invalidate Table -/// │ Cache -/// │ -/// ┌────────▼────────┐ -/// │RegionFailoverEnd│ -/// └─────────────────┘ -/// ``` -pub struct RegionFailoverProcedure { - node: Node, - context: RegionFailoverContext, -} - -impl RegionFailoverProcedure { - const TYPE_NAME: &'static str = "metasrv-procedure::RegionFailover"; - - pub fn new( - catalog: String, - schema: String, - failed_region: RegionIdent, - context: RegionFailoverContext, - ) -> Self { - let state = RegionFailoverStart::new(); - let node = Node { - lock_meta: LockMeta { catalog, schema }, - failed_region, - state: Box::new(state), - }; - Self { node, context } - } - - fn from_json(json: &str, context: RegionFailoverContext) -> ProcedureResult<Self> { - let node: Node = serde_json::from_str(json).context(FromJsonSnafu)?; - Ok(Self { node, context }) - } -} - -#[async_trait] -impl Procedure for RegionFailoverProcedure { - fn type_name(&self) -> &str { - Self::TYPE_NAME - } - - async fn execute(&mut self, _ctx: &ProcedureContext) -> ProcedureResult<Status> { - let state = &mut self.node.state; - *state = state - .next(&self.context, &self.node.failed_region) - .await - .map_err(|e| { - if e.is_retryable() { - ProcedureError::retry_later(e) - } else { - ProcedureError::external(e) - } - })?; - Ok(state.status()) - } - - fn dump(&self) -> ProcedureResult<String> { - serde_json::to_string(&self.node).context(ToJsonSnafu) - } - - fn lock_key(&self) -> LockKey { - let region_ident = &self.node.failed_region; - let lock_key = vec![ - CatalogLock::Read(&self.node.lock_meta.catalog).into(), - SchemaLock::read(&self.node.lock_meta.catalog, &self.node.lock_meta.catalog).into(), - TableLock::Read(region_ident.table_id).into(), - RegionLock::Write(RegionId::new( - region_ident.table_id, - region_ident.region_number, - )) - .into(), - ]; - - LockKey::new(lock_key) - } -} - -#[cfg(test)] -mod tests { - use std::collections::HashMap; - use std::sync::Mutex; - - use api::v1::meta::mailbox_message::Payload; - use api::v1::meta::{HeartbeatResponse, MailboxMessage, RequestHeader}; - use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME}; - use common_meta::ddl::utils::region_storage_path; - use common_meta::instruction::{Instruction, InstructionReply, OpenRegion, SimpleReply}; - use common_meta::key::TableMetadataManager; - use common_meta::kv_backend::memory::MemoryKvBackend; - use common_meta::peer::Peer; - use common_meta::sequence::SequenceBuilder; - use common_meta::DatanodeId; - use common_procedure::{BoxedProcedure, ProcedureId}; - use common_procedure_test::MockContextProvider; - use rand::prelude::SliceRandom; - use tokio::sync::mpsc::Receiver; - - use super::*; - use crate::cluster::MetaPeerClientBuilder; - use crate::handler::{HeartbeatMailbox, Pusher, Pushers}; - use crate::lock::memory::MemLock; - use crate::selector::{Namespace, Selector, SelectorOptions}; - use crate::service::mailbox::Channel; - use crate::test_util; - - struct RandomNodeSelector { - nodes: Vec<Peer>, - } - - #[async_trait] - impl Selector for RandomNodeSelector { - type Context = SelectorContext; - type Output = Vec<Peer>; - - async fn select( - &self, - _ns: Namespace, - _ctx: &Self::Context, - _opts: SelectorOptions, - ) -> Result<Self::Output> { - let mut rng = rand::thread_rng(); - let mut nodes = self.nodes.clone(); - nodes.shuffle(&mut rng); - Ok(nodes) - } - } - - pub struct TestingEnv { - pub context: RegionFailoverContext, - pub heartbeat_receivers: HashMap<DatanodeId, Receiver<tonic::Result<HeartbeatResponse>>>, - pub pushers: Pushers, - pub path: String, - } - - impl TestingEnv { - pub async fn failed_region(&self, region_number: u32) -> RegionIdent { - let region_distribution = self - .context - .table_metadata_manager - .table_route_manager() - .get_region_distribution(1) - .await - .unwrap() - .unwrap(); - let failed_datanode = region_distribution - .iter() - .find_map(|(&datanode_id, regions)| { - if regions.contains(&region_number) { - Some(datanode_id) - } else { - None - } - }) - .unwrap(); - RegionIdent { - cluster_id: 0, - region_number, - datanode_id: failed_datanode, - table_id: 1, - engine: "mito2".to_string(), - } - } - } - - pub struct TestingEnvBuilder { - selector: Option<SelectorRef>, - } - - impl TestingEnvBuilder { - pub fn new() -> Self { - Self { selector: None } - } - - fn with_selector(mut self, selector: SelectorRef) -> Self { - self.selector = Some(selector); - self - } - - pub async fn build(self) -> TestingEnv { - let in_memory = Arc::new(MemoryKvBackend::new()); - let kv_backend = Arc::new(MemoryKvBackend::new()); - let meta_peer_client = MetaPeerClientBuilder::default() - .election(None) - .in_memory(Arc::new(MemoryKvBackend::new())) - .build() - .map(Arc::new) - // Safety: all required fields set at initialization - .unwrap(); - - let table_id = 1; - let table = "my_table"; - let table_metadata_manager = Arc::new(TableMetadataManager::new(kv_backend.clone())); - test_util::prepare_table_region_and_info_value(&table_metadata_manager, table).await; - let region_distribution = table_metadata_manager - .table_route_manager() - .get_region_distribution(1) - .await - .unwrap() - .unwrap(); - - let pushers = Pushers::default(); - let mut heartbeat_receivers = HashMap::with_capacity(3); - for datanode_id in 1..=3 { - let (tx, rx) = tokio::sync::mpsc::channel(1); - - let pusher_id = Channel::Datanode(datanode_id).pusher_id(); - let pusher = Pusher::new(tx, &RequestHeader::default()); - let _ = pushers.insert(pusher_id, pusher).await; - - let _ = heartbeat_receivers.insert(datanode_id, rx); - } - - let mailbox_sequence = - SequenceBuilder::new("test_heartbeat_mailbox", kv_backend.clone()) - .initial(0) - .step(100) - .build(); - let mailbox = HeartbeatMailbox::create(pushers.clone(), mailbox_sequence); - - let selector = self.selector.unwrap_or_else(|| { - let nodes = (1..=region_distribution.len()) - .map(|id| Peer { - id: id as u64, - addr: String::default(), - }) - .collect(); - Arc::new(RandomNodeSelector { nodes }) - }); - let selector_ctx = SelectorContext { - datanode_lease_secs: 10, - flownode_lease_secs: 10, - server_addr: "127.0.0.1:3002".to_string(), - kv_backend: kv_backend.clone(), - meta_peer_client, - table_id: Some(table_id), - }; - - TestingEnv { - context: RegionFailoverContext { - region_lease_secs: 10, - in_memory, - kv_backend, - mailbox, - selector, - selector_ctx, - dist_lock: Arc::new(MemLock::default()), - table_metadata_manager, - }, - pushers, - heartbeat_receivers, - path: region_storage_path(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME).to_string(), - } - } - } - - #[tokio::test] - async fn test_region_failover_procedure() { - let mut env = TestingEnvBuilder::new().build().await; - let failed_region = env.failed_region(1).await; - - let mut procedure = Box::new(RegionFailoverProcedure::new( - "greptime".into(), - "public".into(), - failed_region.clone(), - env.context.clone(), - )) as BoxedProcedure; - - let mut failed_datanode = env - .heartbeat_receivers - .remove(&failed_region.datanode_id) - .unwrap(); - let mailbox_clone = env.context.mailbox.clone(); - let failed_region_clone = failed_region.clone(); - let _handle = common_runtime::spawn_bg(async move { - let resp = failed_datanode.recv().await.unwrap().unwrap(); - let received = &resp.mailbox_message.unwrap(); - assert_eq!( - received.payload, - Some(Payload::Json( - serde_json::to_string(&Instruction::CloseRegion(failed_region_clone.clone())) - .unwrap(), - )) - ); - - // simulating response from Datanode - mailbox_clone - .on_recv( - 1, - Ok(MailboxMessage { - id: 1, - subject: "Deactivate Region".to_string(), - from: format!("Datanode-{}", failed_region.datanode_id), - to: "Metasrv".to_string(), - timestamp_millis: common_time::util::current_time_millis(), - payload: Some(Payload::Json( - serde_json::to_string(&InstructionReply::CloseRegion(SimpleReply { - result: true, - error: None, - })) - .unwrap(), - )), - }), - ) - .await - .unwrap(); - }); - - let (candidate_tx, mut candidate_rx) = tokio::sync::mpsc::channel(1); - for (datanode_id, mut recv) in env.heartbeat_receivers.into_iter() { - let mailbox_clone = env.context.mailbox.clone(); - let opening_region = RegionIdent { - datanode_id, - ..failed_region.clone() - }; - let path = env.path.to_string(); - let candidate_tx = candidate_tx.clone(); - let _handle = common_runtime::spawn_bg(async move { - let resp = recv.recv().await.unwrap().unwrap(); - let received = &resp.mailbox_message.unwrap(); - assert_eq!( - received.payload, - Some(Payload::Json( - serde_json::to_string(&Instruction::OpenRegion(OpenRegion::new( - opening_region, - &path, - HashMap::new(), - HashMap::new(), - false - ))) - .unwrap(), - )) - ); - - candidate_tx.send(datanode_id).await.unwrap(); - - // simulating response from Datanode - mailbox_clone - .on_recv( - // Very tricky here: - // the procedure only sends two messages in sequence, the second one is - // "Activate Region", and its message id is 2. - 2, - Ok(MailboxMessage { - id: 2, - subject: "Activate Region".to_string(), - from: format!("Datanode-{datanode_id}"), - to: "Metasrv".to_string(), - timestamp_millis: common_time::util::current_time_millis(), - payload: Some(Payload::Json( - serde_json::to_string(&InstructionReply::OpenRegion(SimpleReply { - result: true, - error: None, - })) - .unwrap(), - )), - }), - ) - .await - .unwrap(); - }); - } - - common_procedure_test::execute_procedure_until_done(&mut procedure).await; - - assert_eq!( - procedure.dump().unwrap(), - r#"{"lock_meta":{"catalog":"greptime","schema":"public"},"failed_region":{"cluster_id":0,"datanode_id":1,"table_id":1,"region_number":1,"engine":"mito2"},"state":{"region_failover_state":"RegionFailoverEnd"}}"# - ); - - // Verifies that the failed region (region 1) is moved from failed datanode (datanode 1) to the candidate datanode. - let region_distribution = env - .context - .table_metadata_manager - .table_route_manager() - .get_region_distribution(1) - .await - .unwrap() - .unwrap(); - assert_eq!( - region_distribution.get(&failed_region.datanode_id).unwrap(), - &vec![2] - ); - assert!(region_distribution - .get(&candidate_rx.recv().await.unwrap()) - .unwrap() - .contains(&1)); - } - - #[tokio::test] - async fn test_state_serde() { - let env = TestingEnvBuilder::new().build().await; - let failed_region = env.failed_region(1).await; - - let state = RegionFailoverStart::new(); - let node = Node { - lock_meta: LockMeta { - catalog: "greptime".into(), - schema: "public".into(), - }, - failed_region, - state: Box::new(state), - }; - let procedure = RegionFailoverProcedure { - node, - context: env.context, - }; - - let s = procedure.dump().unwrap(); - assert_eq!( - s, - r#"{"lock_meta":{"catalog":"greptime","schema":"public"},"failed_region":{"cluster_id":0,"datanode_id":1,"table_id":1,"region_number":1,"engine":"mito2"},"state":{"region_failover_state":"RegionFailoverStart","failover_candidate":null}}"#, - ); - let n: Node = serde_json::from_str(&s).unwrap(); - assert_eq!( - format!("{n:?}"), - r#"Node { lock_meta: LockMeta { catalog: "greptime", schema: "public" }, failed_region: RegionIdent { cluster_id: 0, datanode_id: 1, table_id: 1, region_number: 1, engine: "mito2" }, state: RegionFailoverStart { failover_candidate: None } }"#, - ); - } - - #[tokio::test] - async fn test_state_not_changed_upon_failure() { - struct MySelector { - peers: Arc<Mutex<Vec<Option<Peer>>>>, - } - - #[async_trait] - impl Selector for MySelector { - type Context = SelectorContext; - type Output = Vec<Peer>; - - async fn select( - &self, - _ns: Namespace, - _ctx: &Self::Context, - _opts: SelectorOptions, - ) -> Result<Self::Output> { - let mut peers = self.peers.lock().unwrap(); - Ok(if let Some(Some(peer)) = peers.pop() { - vec![peer] - } else { - vec![] - }) - } - } - - // Returns a valid peer the second time called "select". - let selector = MySelector { - peers: Arc::new(Mutex::new(vec![ - Some(Peer { - id: 42, - addr: String::default(), - }), - None, - ])), - }; - - let env = TestingEnvBuilder::new() - .with_selector(Arc::new(selector)) - .build() - .await; - let failed_region = env.failed_region(1).await; - - let state = RegionFailoverStart::new(); - let node = Node { - lock_meta: LockMeta { - catalog: "greptime".into(), - schema: "public".into(), - }, - failed_region, - state: Box::new(state), - }; - let mut procedure = RegionFailoverProcedure { - node, - context: env.context, - }; - - let ctx = ProcedureContext { - procedure_id: ProcedureId::random(), - provider: Arc::new(MockContextProvider::default()), - }; - - let result = procedure.execute(&ctx).await; - assert!(result.is_err()); - let err = result.unwrap_err(); - assert!(err.is_retry_later(), "err: {:?}", err); - assert_eq!( - r#"{"region_failover_state":"RegionFailoverStart","failover_candidate":null}"#, - serde_json::to_string(&procedure.node.state).unwrap() - ); - - let result = procedure.execute(&ctx).await; - assert!(matches!(result, Ok(Status::Executing { persist: true }))); - assert_eq!( - r#"{"region_failover_state":"DeactivateRegion","candidate":{"id":42,"addr":""}}"#, - serde_json::to_string(&procedure.node.state).unwrap() - ); - } -} diff --git a/src/meta-srv/src/procedure/region_failover/activate_region.rs b/src/meta-srv/src/procedure/region_failover/activate_region.rs deleted file mode 100644 index a2b1c8fd9303..000000000000 --- a/src/meta-srv/src/procedure/region_failover/activate_region.rs +++ /dev/null @@ -1,328 +0,0 @@ -// Copyright 2023 Greptime Team -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use std::collections::HashMap; -use std::time::Duration; - -use api::v1::meta::MailboxMessage; -use async_trait::async_trait; -use common_meta::instruction::{Instruction, InstructionReply, OpenRegion, SimpleReply}; -use common_meta::key::datanode_table::{DatanodeTableKey, RegionInfo}; -use common_meta::peer::Peer; -use common_meta::RegionIdent; -use common_telemetry::{debug, info}; -use serde::{Deserialize, Serialize}; -use snafu::{OptionExt, ResultExt}; -use store_api::storage::RegionNumber; - -use super::update_metadata::UpdateRegionMetadata; -use super::{RegionFailoverContext, State}; -use crate::error::{ - self, Error, Result, RetryLaterSnafu, SerializeToJsonSnafu, UnexpectedInstructionReplySnafu, -}; -use crate::handler::HeartbeatMailbox; -use crate::procedure::region_failover::OPEN_REGION_MESSAGE_TIMEOUT; -use crate::service::mailbox::{Channel, MailboxReceiver}; - -#[derive(Serialize, Deserialize, Debug)] -pub(super) struct ActivateRegion { - candidate: Peer, - // If the meta leader node dies during the execution of the procedure, - // the new leader node needs to remark the failed region as "inactive" - // to prevent it from renewing the lease. - remark_inactive_region: bool, - // An `None` option stands for uninitialized. - region_storage_path: Option<String>, - region_options: Option<HashMap<String, String>>, - region_wal_options: Option<HashMap<RegionNumber, String>>, -} - -impl ActivateRegion { - pub(super) fn new(candidate: Peer) -> Self { - Self { - candidate, - remark_inactive_region: false, - region_storage_path: None, - region_options: None, - region_wal_options: None, - } - } - - async fn send_open_region_message( - &mut self, - ctx: &RegionFailoverContext, - failed_region: &RegionIdent, - timeout: Duration, - ) -> Result<MailboxReceiver> { - let table_id = failed_region.table_id; - // Retrieves the wal options from failed datanode table value. - let datanode_table_value = ctx - .table_metadata_manager - .datanode_table_manager() - .get(&DatanodeTableKey::new(failed_region.datanode_id, table_id)) - .await - .context(error::TableMetadataManagerSnafu)? - .context(error::DatanodeTableNotFoundSnafu { - table_id, - datanode_id: failed_region.datanode_id, - })?; - - let candidate_ident = RegionIdent { - datanode_id: self.candidate.id, - ..failed_region.clone() - }; - info!("Activating region: {candidate_ident:?}"); - - let RegionInfo { - region_storage_path, - region_options, - region_wal_options, - .. - } = datanode_table_value.region_info; - - let instruction = Instruction::OpenRegion(OpenRegion::new( - candidate_ident.clone(), - &region_storage_path, - region_options.clone(), - region_wal_options.clone(), - false, - )); - - self.region_storage_path = Some(region_storage_path); - self.region_options = Some(region_options); - self.region_wal_options = Some(region_wal_options); - - let msg = MailboxMessage::json_message( - "Activate Region", - &format!("Metasrv@{}", ctx.selector_ctx.server_addr), - &format!( - "Datanode-(id={}, addr={})", - self.candidate.id, self.candidate.addr - ), - common_time::util::current_time_millis(), - &instruction, - ) - .with_context(|_| SerializeToJsonSnafu { - input: instruction.to_string(), - })?; - - let ch = Channel::Datanode(self.candidate.id); - ctx.mailbox.send(&ch, msg, timeout).await - } - - async fn handle_response( - &mut self, - mailbox_receiver: MailboxReceiver, - failed_region: &RegionIdent, - ) -> Result<Box<dyn State>> { - match mailbox_receiver.await? { - Ok(msg) => { - debug!("Received activate region reply: {msg:?}"); - - let reply = HeartbeatMailbox::json_reply(&msg)?; - let InstructionReply::OpenRegion(SimpleReply { result, error }) = reply else { - return UnexpectedInstructionReplySnafu { - mailbox_message: msg.to_string(), - reason: "expect open region reply", - } - .fail(); - }; - if result { - Ok(Box::new(UpdateRegionMetadata::new( - self.candidate.clone(), - self.region_storage_path - .clone() - .context(error::UnexpectedSnafu { - violated: "expected region_storage_path", - })?, - self.region_options - .clone() - .context(error::UnexpectedSnafu { - violated: "expected region_options", - })?, - self.region_wal_options - .clone() - .context(error::UnexpectedSnafu { - violated: "expected region_wal_options", - })?, - ))) - } else { - // The region could be just indeed cannot be opened by the candidate, retry - // would be in vain. Then why not just end the failover procedure? Because we - // currently lack the methods or any maintenance tools to manage the whole - // procedures things, it would be easier to let the procedure keep running. - let reason = format!( - "Region {failed_region:?} is not opened by Datanode {:?}, error: {error:?}", - self.candidate, - ); - RetryLaterSnafu { reason }.fail() - } - } - Err(Error::MailboxTimeout { .. }) => { - let reason = format!( - "Mailbox received timeout for activate failed region {failed_region:?} on Datanode {:?}", - self.candidate, - ); - RetryLaterSnafu { reason }.fail() - } - Err(e) => Err(e), - } - } -} - -#[async_trait] -#[typetag::serde] -impl State for ActivateRegion { - async fn next( - &mut self, - ctx: &RegionFailoverContext, - failed_region: &RegionIdent, - ) -> Result<Box<dyn State>> { - let mailbox_receiver = self - .send_open_region_message(ctx, failed_region, OPEN_REGION_MESSAGE_TIMEOUT) - .await?; - - self.handle_response(mailbox_receiver, failed_region).await - } -} - -#[cfg(test)] -mod tests { - use std::collections::HashMap; - - use api::v1::meta::mailbox_message::Payload; - use common_meta::instruction::SimpleReply; - - use super::super::tests::TestingEnvBuilder; - use super::*; - - #[tokio::test] - async fn test_activate_region_success() { - common_telemetry::init_default_ut_logging(); - - let mut env = TestingEnvBuilder::new().build().await; - let failed_region = env.failed_region(1).await; - - let candidate = 2; - let mut state = ActivateRegion::new(Peer::new(candidate, "")); - let mailbox_receiver = state - .send_open_region_message(&env.context, &failed_region, Duration::from_millis(100)) - .await - .unwrap(); - - let message_id = mailbox_receiver.message_id(); - - // verify that the open region message is sent - let rx = env.heartbeat_receivers.get_mut(&candidate).unwrap(); - let resp = rx.recv().await.unwrap().unwrap(); - let received = &resp.mailbox_message.unwrap(); - assert_eq!(received.id, message_id); - assert_eq!(received.subject, "Activate Region"); - assert_eq!(received.from, "[email protected]:3002"); - assert_eq!(received.to, "Datanode-(id=2, addr=)"); - assert_eq!( - received.payload, - Some(Payload::Json( - serde_json::to_string(&Instruction::OpenRegion(OpenRegion::new( - RegionIdent { - datanode_id: candidate, - ..failed_region.clone() - }, - &env.path, - HashMap::new(), - HashMap::new(), - false - ))) - .unwrap(), - )) - ); - - // simulating response from Datanode - env.context - .mailbox - .on_recv( - message_id, - Ok(MailboxMessage { - id: message_id, - subject: "Activate Region".to_string(), - from: "Datanode-2".to_string(), - to: "Metasrv".to_string(), - timestamp_millis: common_time::util::current_time_millis(), - payload: Some(Payload::Json( - serde_json::to_string(&InstructionReply::OpenRegion(SimpleReply { - result: true, - error: None, - })) - .unwrap(), - )), - }), - ) - .await - .unwrap(); - - let next_state = state - .handle_response(mailbox_receiver, &failed_region) - .await - .unwrap(); - assert_eq!( - format!("{next_state:?}"), - r#"UpdateRegionMetadata { candidate: Peer { id: 2, addr: "" }, region_storage_path: "greptime/public", region_options: {}, region_wal_options: {} }"# - ); - } - - #[tokio::test] - async fn test_activate_region_timeout() { - common_telemetry::init_default_ut_logging(); - - let mut env = TestingEnvBuilder::new().build().await; - let failed_region = env.failed_region(1).await; - - let candidate = 2; - let mut state = ActivateRegion::new(Peer::new(candidate, "")); - let mailbox_receiver = state - .send_open_region_message(&env.context, &failed_region, Duration::from_millis(100)) - .await - .unwrap(); - - // verify that the open region message is sent - let rx = env.heartbeat_receivers.get_mut(&candidate).unwrap(); - let resp = rx.recv().await.unwrap().unwrap(); - let received = &resp.mailbox_message.unwrap(); - assert_eq!(received.id, mailbox_receiver.message_id()); - assert_eq!(received.subject, "Activate Region"); - assert_eq!(received.from, "[email protected]:3002"); - assert_eq!(received.to, "Datanode-(id=2, addr=)"); - assert_eq!( - received.payload, - Some(Payload::Json( - serde_json::to_string(&Instruction::OpenRegion(OpenRegion::new( - RegionIdent { - datanode_id: candidate, - ..failed_region.clone() - }, - &env.path, - HashMap::new(), - HashMap::new(), - false - ))) - .unwrap(), - )) - ); - - let result = state - .handle_response(mailbox_receiver, &failed_region) - .await; - assert!(matches!(result, Err(Error::RetryLater { .. }))); - } -} diff --git a/src/meta-srv/src/procedure/region_failover/deactivate_region.rs b/src/meta-srv/src/procedure/region_failover/deactivate_region.rs deleted file mode 100644 index d6e2c088945c..000000000000 --- a/src/meta-srv/src/procedure/region_failover/deactivate_region.rs +++ /dev/null @@ -1,328 +0,0 @@ -// Copyright 2023 Greptime Team -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use std::time::Duration; - -use api::v1::meta::MailboxMessage; -use async_trait::async_trait; -use common_meta::instruction::{Instruction, InstructionReply, SimpleReply}; -use common_meta::peer::Peer; -use common_meta::rpc::router::RegionStatus; -use common_meta::RegionIdent; -use common_telemetry::{debug, info, warn}; -use serde::{Deserialize, Serialize}; -use snafu::{OptionExt, ResultExt}; - -use super::activate_region::ActivateRegion; -use super::{RegionFailoverContext, State}; -use crate::error::{ - self, Error, Result, RetryLaterSnafu, SerializeToJsonSnafu, UnexpectedInstructionReplySnafu, -}; -use crate::handler::HeartbeatMailbox; -use crate::service::mailbox::{Channel, MailboxReceiver}; - -#[derive(Serialize, Deserialize, Debug)] -pub(super) struct DeactivateRegion { - candidate: Peer, -} - -impl DeactivateRegion { - pub(super) fn new(candidate: Peer) -> Self { - Self { candidate } - } - - async fn mark_leader_downgraded( - &self, - ctx: &RegionFailoverContext, - failed_region: &RegionIdent, - ) -> Result<()> { - let table_id = failed_region.table_id; - - let table_route_value = ctx - .table_metadata_manager - .table_route_manager() - .table_route_storage() - .get_raw(table_id) - .await - .context(error::TableMetadataManagerSnafu)? - .context(error::TableRouteNotFoundSnafu { table_id })?; - - ctx.table_metadata_manager - .update_leader_region_status(table_id, &table_route_value, |region| { - if region.region.id.region_number() == failed_region.region_number { - Some(Some(RegionStatus::Downgraded)) - } else { - None - } - }) - .await - .context(error::UpdateTableRouteSnafu)?; - - Ok(()) - } - - async fn send_close_region_message( - &self, - ctx: &RegionFailoverContext, - failed_region: &RegionIdent, - ) -> Result<MailboxReceiver> { - let instruction = Instruction::CloseRegion(failed_region.clone()); - - let msg = MailboxMessage::json_message( - "Deactivate Region", - &format!("Metasrv@{}", ctx.selector_ctx.server_addr), - &format!("Datanode-{}", failed_region.datanode_id), - common_time::util::current_time_millis(), - &instruction, - ) - .with_context(|_| SerializeToJsonSnafu { - input: instruction.to_string(), - })?; - - let ch = Channel::Datanode(failed_region.datanode_id); - let timeout = Duration::from_secs(ctx.region_lease_secs); - ctx.mailbox.send(&ch, msg, timeout).await - } - - async fn handle_response( - &self, - _ctx: &RegionFailoverContext, - mailbox_receiver: MailboxReceiver, - failed_region: &RegionIdent, - ) -> Result<Box<dyn State>> { - match mailbox_receiver.await? { - Ok(msg) => { - debug!("Received deactivate region reply: {msg:?}"); - - let reply = HeartbeatMailbox::json_reply(&msg)?; - let InstructionReply::CloseRegion(SimpleReply { result, error }) = reply else { - return UnexpectedInstructionReplySnafu { - mailbox_message: msg.to_string(), - reason: "expect close region reply", - } - .fail(); - }; - if result { - Ok(Box::new(ActivateRegion::new(self.candidate.clone()))) - } else { - // Under rare circumstances would a Datanode fail to close a Region. - // So simply retry. - let reason = format!( - "Region {failed_region:?} is not closed by Datanode {}, error: {error:?}", - failed_region.datanode_id, - ); - RetryLaterSnafu { reason }.fail() - } - } - Err(Error::MailboxTimeout { .. }) => { - // We have configured the timeout to match the region lease timeout before making - // the call and have disabled region lease renewal. Therefore, if a timeout error - // occurs, it can be concluded that the region has been closed. With this information, - // we can proceed confidently to the next step. - Ok(Box::new(ActivateRegion::new(self.candidate.clone()))) - } - Err(e) => Err(e), - } - } - - /// Sleep for `region_lease_expiry_seconds`, to make sure the region is closed (by its - /// region alive keeper). This is critical for region not being opened in multiple Datanodes - /// simultaneously. - async fn wait_for_region_lease_expiry(&self, ctx: &RegionFailoverContext) { - tokio::time::sleep(Duration::from_secs(ctx.region_lease_secs)).await; - } -} - -#[async_trait] -#[typetag::serde] -impl State for DeactivateRegion { - async fn next( - &mut self, - ctx: &RegionFailoverContext, - failed_region: &RegionIdent, - ) -> Result<Box<dyn State>> { - info!("Deactivating region: {failed_region:?}"); - self.mark_leader_downgraded(ctx, failed_region).await?; - let result = self.send_close_region_message(ctx, failed_region).await; - let mailbox_receiver = match result { - Ok(mailbox_receiver) => mailbox_receiver, - Err(Error::PusherNotFound { .. }) => { - warn!( - "Datanode {} is not reachable, skip deactivating region {}, just wait for the region lease to expire", - failed_region.datanode_id, failed_region - ); - // See the mailbox received timeout situation comments above. - self.wait_for_region_lease_expiry(ctx).await; - return Ok(Box::new(ActivateRegion::new(self.candidate.clone()))); - } - Err(e) => return Err(e), - }; - - self.handle_response(ctx, mailbox_receiver, failed_region) - .await - } -} - -#[cfg(test)] -mod tests { - use api::v1::meta::mailbox_message::Payload; - use common_meta::instruction::SimpleReply; - - use super::super::tests::TestingEnvBuilder; - use super::*; - - #[tokio::test] - async fn test_mark_leader_downgraded() { - common_telemetry::init_default_ut_logging(); - - let env = TestingEnvBuilder::new().build().await; - let failed_region = env.failed_region(1).await; - - let state = DeactivateRegion::new(Peer::new(2, "")); - - state - .mark_leader_downgraded(&env.context, &failed_region) - .await - .unwrap(); - - let table_id = failed_region.table_id; - - let table_route_value = env - .context - .table_metadata_manager - .table_route_manager() - .table_route_storage() - .get(table_id) - .await - .unwrap() - .unwrap(); - - let should_downgraded = table_route_value - .region_routes() - .unwrap() - .iter() - .find(|route| route.region.id.region_number() == failed_region.region_number) - .unwrap(); - - assert!(should_downgraded.is_leader_downgraded()); - } - - #[tokio::test] - async fn test_deactivate_region_success() { - common_telemetry::init_default_ut_logging(); - - let mut env = TestingEnvBuilder::new().build().await; - let failed_region = env.failed_region(1).await; - - let state = DeactivateRegion::new(Peer::new(2, "")); - let mailbox_receiver = state - .send_close_region_message(&env.context, &failed_region) - .await - .unwrap(); - - let message_id = mailbox_receiver.message_id(); - - // verify that the close region message is sent - let rx = env - .heartbeat_receivers - .get_mut(&failed_region.datanode_id) - .unwrap(); - let resp = rx.recv().await.unwrap().unwrap(); - let received = &resp.mailbox_message.unwrap(); - assert_eq!(received.id, message_id); - assert_eq!(received.subject, "Deactivate Region"); - assert_eq!(received.from, "[email protected]:3002"); - assert_eq!(received.to, "Datanode-1"); - assert_eq!( - received.payload, - Some(Payload::Json( - serde_json::to_string(&Instruction::CloseRegion(failed_region.clone())).unwrap(), - )) - ); - - // simulating response from Datanode - env.context - .mailbox - .on_recv( - message_id, - Ok(MailboxMessage { - id: message_id, - subject: "Deactivate Region".to_string(), - from: "Datanode-1".to_string(), - to: "Metasrv".to_string(), - timestamp_millis: common_time::util::current_time_millis(), - payload: Some(Payload::Json( - serde_json::to_string(&InstructionReply::CloseRegion(SimpleReply { - result: true, - error: None, - })) - .unwrap(), - )), - }), - ) - .await - .unwrap(); - - let next_state = state - .handle_response(&env.context, mailbox_receiver, &failed_region) - .await - .unwrap(); - assert_eq!( - format!("{next_state:?}"), - r#"ActivateRegion { candidate: Peer { id: 2, addr: "" }, remark_inactive_region: false, region_storage_path: None, region_options: None, region_wal_options: None }"# - ); - } - - #[tokio::test] - async fn test_deactivate_region_timeout() { - common_telemetry::init_default_ut_logging(); - - let mut env = TestingEnvBuilder::new().build().await; - let failed_region = env.failed_region(1).await; - - let state = DeactivateRegion::new(Peer::new(2, "")); - let mailbox_receiver = state - .send_close_region_message(&env.context, &failed_region) - .await - .unwrap(); - - // verify that the open region message is sent - let rx = env - .heartbeat_receivers - .get_mut(&failed_region.datanode_id) - .unwrap(); - let resp = rx.recv().await.unwrap().unwrap(); - let received = &resp.mailbox_message.unwrap(); - assert_eq!(received.id, mailbox_receiver.message_id()); - assert_eq!(received.subject, "Deactivate Region"); - assert_eq!(received.from, "[email protected]:3002"); - assert_eq!(received.to, "Datanode-1"); - assert_eq!( - received.payload, - Some(Payload::Json( - serde_json::to_string(&Instruction::CloseRegion(failed_region.clone())).unwrap(), - )) - ); - - let next_state = state - .handle_response(&env.context, mailbox_receiver, &failed_region) - .await - .unwrap(); - // Timeout or not, proceed to `ActivateRegion`. - assert_eq!( - format!("{next_state:?}"), - r#"ActivateRegion { candidate: Peer { id: 2, addr: "" }, remark_inactive_region: false, region_storage_path: None, region_options: None, region_wal_options: None }"# - ); - } -} diff --git a/src/meta-srv/src/procedure/region_failover/failover_end.rs b/src/meta-srv/src/procedure/region_failover/failover_end.rs deleted file mode 100644 index 48d0a1fa1826..000000000000 --- a/src/meta-srv/src/procedure/region_failover/failover_end.rs +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright 2023 Greptime Team -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use async_trait::async_trait; -use common_meta::RegionIdent; -use common_procedure::Status; -use serde::{Deserialize, Serialize}; - -use super::{RegionFailoverContext, State}; -use crate::error::Result; - -#[derive(Serialize, Deserialize, Debug)] -pub(super) struct RegionFailoverEnd; - -#[async_trait] -#[typetag::serde] -impl State for RegionFailoverEnd { - async fn next(&mut self, _: &RegionFailoverContext, _: &RegionIdent) -> Result<Box<dyn State>> { - Ok(Box::new(RegionFailoverEnd)) - } - - fn status(&self) -> Status { - Status::done() - } -} diff --git a/src/meta-srv/src/procedure/region_failover/failover_start.rs b/src/meta-srv/src/procedure/region_failover/failover_start.rs deleted file mode 100644 index aaa1c9949857..000000000000 --- a/src/meta-srv/src/procedure/region_failover/failover_start.rs +++ /dev/null @@ -1,136 +0,0 @@ -// Copyright 2023 Greptime Team -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use async_trait::async_trait; -use common_error::ext::{BoxedError, ErrorExt}; -use common_error::status_code::StatusCode; -use common_meta::peer::Peer; -use common_meta::RegionIdent; -use common_telemetry::info; -use serde::{Deserialize, Serialize}; -use snafu::{ensure, location, Location}; - -use super::deactivate_region::DeactivateRegion; -use super::{RegionFailoverContext, State}; -use crate::error::{self, RegionFailoverCandidatesNotFoundSnafu, Result}; -use crate::selector::SelectorOptions; - -#[derive(Serialize, Deserialize, Debug)] -pub(super) struct RegionFailoverStart { - failover_candidate: Option<Peer>, -} - -impl RegionFailoverStart { - pub(super) fn new() -> Self { - Self { - failover_candidate: None, - } - } - - async fn choose_candidate( - &mut self, - ctx: &RegionFailoverContext, - failed_region: &RegionIdent, - ) -> Result<Peer> { - if let Some(candidate) = self.failover_candidate.clone() { - return Ok(candidate); - } - - let mut selector_ctx = ctx.selector_ctx.clone(); - selector_ctx.table_id = Some(failed_region.table_id); - - let cluster_id = failed_region.cluster_id; - let opts = SelectorOptions::default(); - let candidates = ctx - .selector - .select(cluster_id, &selector_ctx, opts) - .await? - .iter() - .filter_map(|p| { - if p.id != failed_region.datanode_id { - Some(p.clone()) - } else { - None - } - }) - .collect::<Vec<Peer>>(); - ensure!( - !candidates.is_empty(), - RegionFailoverCandidatesNotFoundSnafu { - failed_region: format!("{failed_region:?}"), - } - ); - - // Safety: indexing is guarded by the "ensure!" above. - let candidate = &candidates[0]; - self.failover_candidate = Some(candidate.clone()); - info!("Choose failover candidate datanode {candidate:?} for region: {failed_region}"); - Ok(candidate.clone()) - } -} - -#[async_trait] -#[typetag::serde] -impl State for RegionFailoverStart { - async fn next( - &mut self, - ctx: &RegionFailoverContext, - failed_region: &RegionIdent, - ) -> Result<Box<dyn State>> { - let candidate = self - .choose_candidate(ctx, failed_region) - .await - .map_err(|e| { - if e.status_code() == StatusCode::RuntimeResourcesExhausted { - error::Error::RetryLaterWithSource { - reason: format!("Region failover aborted for {failed_region:?}"), - location: location!(), - source: BoxedError::new(e), - } - } else { - e - } - })?; - return Ok(Box::new(DeactivateRegion::new(candidate))); - } -} - -#[cfg(test)] -mod tests { - use super::super::tests::TestingEnvBuilder; - use super::*; - - #[tokio::test] - async fn test_choose_failover_candidate() { - common_telemetry::init_default_ut_logging(); - - let env = TestingEnvBuilder::new().build().await; - let failed_region = env.failed_region(1).await; - - let mut state = RegionFailoverStart::new(); - assert!(state.failover_candidate.is_none()); - - let candidate = state - .choose_candidate(&env.context, &failed_region) - .await - .unwrap(); - assert_ne!(candidate.id, failed_region.datanode_id); - - let candidate_again = state - .choose_candidate(&env.context, &failed_region) - .await - .unwrap(); - assert_eq!(candidate, candidate_again); - } -} diff --git a/src/meta-srv/src/procedure/region_failover/invalidate_cache.rs b/src/meta-srv/src/procedure/region_failover/invalidate_cache.rs deleted file mode 100644 index d7231abfc834..000000000000 --- a/src/meta-srv/src/procedure/region_failover/invalidate_cache.rs +++ /dev/null @@ -1,144 +0,0 @@ -// Copyright 2023 Greptime Team -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use api::v1::meta::MailboxMessage; -use async_trait::async_trait; -use common_meta::instruction::{CacheIdent, Instruction}; -use common_meta::RegionIdent; -use common_telemetry::info; -use serde::{Deserialize, Serialize}; -use snafu::ResultExt; -use table::metadata::TableId; - -use super::failover_end::RegionFailoverEnd; -use super::{RegionFailoverContext, State}; -use crate::error::{self, Result}; -use crate::service::mailbox::BroadcastChannel; - -#[derive(Serialize, Deserialize, Debug, Default)] -pub(super) struct InvalidateCache; - -impl InvalidateCache { - async fn broadcast_invalidate_table_cache_messages( - &self, - ctx: &RegionFailoverContext, - table_id: TableId, - ) -> Result<()> { - let instruction = Instruction::InvalidateCaches(vec![CacheIdent::TableId(table_id)]); - - let msg = &MailboxMessage::json_message( - "Invalidate Table Cache", - &format!("Metasrv@{}", ctx.selector_ctx.server_addr), - "Frontend broadcast", - common_time::util::current_time_millis(), - &instruction, - ) - .with_context(|_| error::SerializeToJsonSnafu { - input: instruction.to_string(), - })?; - - ctx.mailbox - .broadcast(&BroadcastChannel::Frontend, msg) - .await - } -} - -#[async_trait] -#[typetag::serde] -impl State for InvalidateCache { - async fn next( - &mut self, - ctx: &RegionFailoverContext, - failed_region: &RegionIdent, - ) -> Result<Box<dyn State>> { - let table_id = failed_region.table_id; - info!( - "Broadcast invalidate table({}) cache message to frontend", - table_id - ); - self.broadcast_invalidate_table_cache_messages(ctx, table_id) - .await?; - - Ok(Box::new(RegionFailoverEnd)) - } -} - -#[cfg(test)] -mod tests { - use api::v1::meta::mailbox_message::Payload; - use api::v1::meta::RequestHeader; - - use super::super::tests::TestingEnvBuilder; - use super::*; - use crate::handler::Pusher; - use crate::procedure::region_failover::tests::TestingEnv; - use crate::service::mailbox::Channel; - - #[tokio::test] - async fn test_invalidate_table_cache() { - common_telemetry::init_default_ut_logging(); - - let env = TestingEnvBuilder::new().build().await; - let failed_region = env.failed_region(1).await; - - let TestingEnv { - mut heartbeat_receivers, - context, - pushers, - .. - } = env; - - for frontend_id in 4..=7 { - let (tx, rx) = tokio::sync::mpsc::channel(1); - - let pusher_id = Channel::Frontend(frontend_id).pusher_id(); - let pusher = Pusher::new(tx, &RequestHeader::default()); - let _ = pushers.insert(pusher_id, pusher).await; - - let _ = heartbeat_receivers.insert(frontend_id, rx); - } - - let table_id = failed_region.table_id; - - // lexicographical order - // frontend-4,5,6,7 - let next_state = InvalidateCache - .next(&context, &failed_region) - .await - .unwrap(); - assert_eq!(format!("{next_state:?}"), "RegionFailoverEnd"); - - for i in 4..=7 { - // frontend id starts from 4 - let rx = heartbeat_receivers.get_mut(&i).unwrap(); - let resp = rx.recv().await.unwrap().unwrap(); - let received = &resp.mailbox_message.unwrap(); - - assert_eq!(received.id, 0); - assert_eq!(received.subject, "Invalidate Table Cache"); - assert_eq!(received.from, "[email protected]:3002"); - assert_eq!(received.to, "Frontend broadcast"); - - assert_eq!( - received.payload, - Some(Payload::Json( - serde_json::to_string(&Instruction::InvalidateCaches(vec![ - CacheIdent::TableId(table_id) - ])) - .unwrap(), - )) - ); - } - } -} diff --git a/src/meta-srv/src/procedure/region_failover/update_metadata.rs b/src/meta-srv/src/procedure/region_failover/update_metadata.rs deleted file mode 100644 index 6302d20eee73..000000000000 --- a/src/meta-srv/src/procedure/region_failover/update_metadata.rs +++ /dev/null @@ -1,496 +0,0 @@ -// Copyright 2023 Greptime Team -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use std::collections::HashMap; - -use async_trait::async_trait; -use common_error::ext::BoxedError; -use common_meta::key::datanode_table::RegionInfo; -use common_meta::key::table_route::TableRouteKey; -use common_meta::peer::Peer; -use common_meta::rpc::router::RegionRoute; -use common_meta::RegionIdent; -use common_telemetry::info; -use serde::{Deserialize, Serialize}; -use snafu::{OptionExt, ResultExt}; -use store_api::storage::RegionNumber; - -use super::invalidate_cache::InvalidateCache; -use super::{RegionFailoverContext, State}; -use crate::error::{self, Result, TableRouteNotFoundSnafu}; -use crate::lock::keys::table_metadata_lock_key; -use crate::lock::Opts; - -#[derive(Serialize, Deserialize, Debug, PartialEq)] -pub(super) struct UpdateRegionMetadata { - candidate: Peer, - region_storage_path: String, - region_options: HashMap<String, String>, - #[serde(default)] - region_wal_options: HashMap<RegionNumber, String>, -} - -impl UpdateRegionMetadata { - pub(super) fn new( - candidate: Peer, - region_storage_path: String, - region_options: HashMap<String, String>, - region_wal_options: HashMap<RegionNumber, String>, - ) -> Self { - Self { - candidate, - region_storage_path, - region_options, - region_wal_options, - } - } - - /// Updates the metadata of the table. - async fn update_metadata( - &self, - ctx: &RegionFailoverContext, - failed_region: &RegionIdent, - ) -> Result<()> { - let key = table_metadata_lock_key(failed_region); - let key = ctx.dist_lock.lock(key, Opts::default()).await?; - - self.update_table_route(ctx, failed_region).await?; - - ctx.dist_lock.unlock(key).await?; - Ok(()) - } - - async fn update_table_route( - &self, - ctx: &RegionFailoverContext, - failed_region: &RegionIdent, - ) -> Result<()> { - let table_id = failed_region.table_id; - let engine = &failed_region.engine; - - let table_route_value = ctx - .table_metadata_manager - .table_route_manager() - .table_route_storage() - .get_raw(table_id) - .await - .context(error::TableMetadataManagerSnafu)? - .context(TableRouteNotFoundSnafu { table_id })?; - - let mut new_region_routes = table_route_value - .region_routes() - .context(error::UnexpectedLogicalRouteTableSnafu { - err_msg: format!("{self:?} is a non-physical TableRouteValue."), - })? - .clone(); - - for region_route in new_region_routes.iter_mut() { - if region_route.region.id.region_number() == failed_region.region_number { - region_route.leader_peer = Some(self.candidate.clone()); - region_route.set_leader_status(None); - break; - } - } - - pretty_log_table_route_change( - TableRouteKey::new(table_id).to_string(), - &new_region_routes, - failed_region, - ); - - ctx.table_metadata_manager - .update_table_route( - table_id, - RegionInfo { - engine: engine.to_string(), - region_storage_path: self.region_storage_path.to_string(), - region_options: self.region_options.clone(), - region_wal_options: self.region_wal_options.clone(), - }, - &table_route_value, - new_region_routes, - &self.region_options, - &self.region_wal_options, - ) - .await - .context(error::UpdateTableRouteSnafu)?; - - Ok(()) - } -} - -fn pretty_log_table_route_change( - key: String, - region_routes: &[RegionRoute], - failed_region: &RegionIdent, -) { - let region_routes = region_routes - .iter() - .map(|x| { - format!( - "{{region: {}, leader: {}, followers: [{}]}}", - x.region.id, - x.leader_peer - .as_ref() - .map(|p| p.id.to_string()) - .unwrap_or_else(|| "?".to_string()), - x.follower_peers - .iter() - .map(|p| p.id.to_string()) - .collect::<Vec<_>>() - .join(","), - ) - }) - .collect::<Vec<_>>(); - - info!( - "Updating region routes in table route value (key = '{}') to [{}]. \ - Failed region {} was on Datanode {}.", - key, - region_routes.join(", "), - failed_region.region_number, - failed_region.datanode_id, - ); -} - -#[async_trait] -#[typetag::serde] -impl State for UpdateRegionMetadata { - async fn next( - &mut self, - ctx: &RegionFailoverContext, - failed_region: &RegionIdent, - ) -> Result<Box<dyn State>> { - self.update_metadata(ctx, failed_region) - .await - .map_err(BoxedError::new) - .context(error::RetryLaterWithSourceSnafu { - reason: format!( - "Failed to update metadata for failed region: {}", - failed_region - ), - })?; - Ok(Box::new(InvalidateCache)) - } -} - -#[cfg(test)] -mod tests { - - use common_meta::rpc::router::{extract_all_peers, region_distribution}; - use futures::TryStreamExt; - - use super::super::tests::{TestingEnv, TestingEnvBuilder}; - use super::{State, *}; - use crate::test_util::new_region_route; - - #[tokio::test] - async fn test_next_state() { - let env = TestingEnvBuilder::new().build().await; - let failed_region = env.failed_region(1).await; - - let mut state = UpdateRegionMetadata::new( - Peer::new(2, ""), - env.path.clone(), - HashMap::new(), - HashMap::new(), - ); - - let next_state = state.next(&env.context, &failed_region).await.unwrap(); - assert_eq!(format!("{next_state:?}"), "InvalidateCache"); - } - - #[tokio::test] - async fn test_update_table_route() { - common_telemetry::init_default_ut_logging(); - - async fn test(env: TestingEnv, failed_region: u32, candidate: u64) -> Vec<RegionRoute> { - let failed_region = env.failed_region(failed_region).await; - - let state = UpdateRegionMetadata::new( - Peer::new(candidate, ""), - env.path.clone(), - HashMap::new(), - HashMap::new(), - ); - state - .update_table_route(&env.context, &failed_region) - .await - .unwrap(); - - let table_id = failed_region.table_id; - - env.context - .table_metadata_manager - .table_route_manager() - .table_route_storage() - .get_raw(table_id) - .await - .unwrap() - .unwrap() - .into_inner() - .region_routes() - .unwrap() - .clone() - } - - // Original region routes: - // region number => leader node - // 1 => 1 - // 2 => 1 - // 3 => 2 - // 4 => 3 - - // Testing failed region 1 moves to Datanode 2. - let env = TestingEnvBuilder::new().build().await; - let actual = test(env, 1, 2).await; - - // Expected region routes: - // region number => leader node - // 1 => 2 - // 2 => 1 - // 3 => 2 - // 4 => 3 - let peers = &extract_all_peers(&actual); - assert_eq!(peers.len(), 3); - let expected = vec![ - new_region_route(1, peers, 2), - new_region_route(2, peers, 1), - new_region_route(3, peers, 2), - new_region_route(4, peers, 3), - ]; - assert_eq!(actual, expected); - - // Testing failed region 3 moves to Datanode 3. - let env = TestingEnvBuilder::new().build().await; - let actual = test(env, 3, 3).await; - - // Expected region routes: - // region number => leader node - // 1 => 1 - // 2 => 1 - // 3 => 3 - // 4 => 3 - let peers = &extract_all_peers(&actual); - assert_eq!(peers.len(), 2); - let expected = vec![ - new_region_route(1, peers, 1), - new_region_route(2, peers, 1), - new_region_route(3, peers, 3), - new_region_route(4, peers, 3), - ]; - assert_eq!(actual, expected); - - // Testing failed region 1 moves to a new Datanode, 4. - let env = TestingEnvBuilder::new().build().await; - let actual = test(env, 1, 4).await; - - // Expected region routes: - // region number => leader node - // 1 => 4 - // 2 => 1 - // 3 => 2 - // 4 => 3 - let peers = &extract_all_peers(&actual); - assert_eq!(peers.len(), 4); - let expected = vec![ - new_region_route(1, peers, 4), - new_region_route(2, peers, 1), - new_region_route(3, peers, 2), - new_region_route(4, peers, 3), - ]; - assert_eq!(actual, expected); - - // Testing failed region 3 moves to a new Datanode, 4. - let env = TestingEnvBuilder::new().build().await; - let actual = test(env, 3, 4).await; - - // Expected region routes: - // region number => leader node - // 1 => 1 - // 2 => 1 - // 3 => 4 - // 4 => 3 - let peers = &extract_all_peers(&actual); - assert_eq!(peers.len(), 3); - let expected = vec![ - new_region_route(1, peers, 1), - new_region_route(2, peers, 1), - new_region_route(3, peers, 4), - new_region_route(4, peers, 3), - ]; - assert_eq!(actual, expected); - } - - #[tokio::test(flavor = "multi_thread")] - async fn test_update_metadata_concurrently() { - common_telemetry::init_default_ut_logging(); - - // Test the correctness of concurrently updating the region distribution in table region - // value, and region routes in table route value. Region 1 moves to Datanode 2; region 2 - // moves to Datanode 3. - // - // Datanode => Regions - // Before: | After: - // 1 => 1, 2 | - // 2 => 3 | 2 => 3, 1 - // 3 => 4 | 3 => 4, 2 - // - // region number => leader node - // Before: | After: - // 1 => 1 | 1 => 2 - // 2 => 1 | 2 => 3 - // 3 => 2 | 3 => 2 - // 4 => 3 | 4 => 3 - // - // Test case runs 10 times to enlarge the possibility of concurrent updating. - for _ in 0..10 { - let env = TestingEnvBuilder::new().build().await; - - let ctx_1 = env.context.clone(); - let ctx_2 = env.context.clone(); - - let failed_region_1 = env.failed_region(1).await; - let failed_region_2 = env.failed_region(2).await; - - let table_id = failed_region_1.table_id; - let path = env.path.clone(); - let _ = futures::future::join_all(vec![ - tokio::spawn(async move { - let state = UpdateRegionMetadata::new( - Peer::new(2, ""), - path, - HashMap::new(), - HashMap::new(), - ); - state - .update_metadata(&ctx_1, &failed_region_1) - .await - .unwrap(); - }), - tokio::spawn(async move { - let state = UpdateRegionMetadata::new( - Peer::new(3, ""), - env.path.clone(), - HashMap::new(), - HashMap::new(), - ); - state - .update_metadata(&ctx_2, &failed_region_2) - .await - .unwrap(); - }), - ]) - .await; - - let table_route_value = env - .context - .table_metadata_manager - .table_route_manager() - .table_route_storage() - .get(table_id) - .await - .unwrap() - .unwrap(); - - let peers = &extract_all_peers(table_route_value.region_routes().unwrap()); - let actual = table_route_value.region_routes().unwrap(); - let expected = &vec![ - new_region_route(1, peers, 2), - new_region_route(2, peers, 3), - new_region_route(3, peers, 2), - new_region_route(4, peers, 3), - ]; - assert_eq!(peers.len(), 2); - assert_eq!(actual, expected); - - let manager = &env.context.table_metadata_manager; - let table_route_value = manager - .table_route_manager() - .table_route_storage() - .get(table_id) - .await - .unwrap() - .unwrap(); - - let map = region_distribution(table_route_value.region_routes().unwrap()); - assert_eq!(map.len(), 2); - assert_eq!(map.get(&2), Some(&vec![1, 3])); - assert_eq!(map.get(&3), Some(&vec![2, 4])); - - // test DatanodeTableValues matches the table region distribution - let datanode_table_manager = manager.datanode_table_manager(); - let tables = datanode_table_manager - .tables(1) - .try_collect::<Vec<_>>() - .await - .unwrap(); - assert!(tables.is_empty()); - - let tables = datanode_table_manager - .tables(2) - .try_collect::<Vec<_>>() - .await - .unwrap(); - assert_eq!(tables.len(), 1); - assert_eq!(tables[0].table_id, 1); - assert_eq!(tables[0].regions, vec![1, 3]); - - let tables = datanode_table_manager - .tables(3) - .try_collect::<Vec<_>>() - .await - .unwrap(); - assert_eq!(tables.len(), 1); - assert_eq!(tables[0].table_id, 1); - assert_eq!(tables[0].regions, vec![2, 4]); - } - } - - #[derive(Debug, Clone, Serialize, Deserialize)] - struct LegacyUpdateRegionMetadata { - candidate: Peer, - region_storage_path: String, - region_options: HashMap<String, String>, - } - - #[test] - fn test_compatible_serialize_update_region_metadata() { - let candidate = Peer::new(1, "test_addr"); - let region_storage_path = "test_path".to_string(); - let region_options = HashMap::from([ - ("a".to_string(), "aa".to_string()), - ("b".to_string(), "bb".to_string()), - ]); - - let legacy_update_region_metadata = LegacyUpdateRegionMetadata { - candidate: candidate.clone(), - region_storage_path: region_storage_path.clone(), - region_options: region_options.clone(), - }; - - // Serialize a LegacyUpdateRegionMetadata. - let serialized = serde_json::to_string(&legacy_update_region_metadata).unwrap(); - - // Deserialize to UpdateRegionMetadata. - let deserialized = serde_json::from_str(&serialized).unwrap(); - let expected = UpdateRegionMetadata { - candidate, - region_storage_path, - region_options, - region_wal_options: HashMap::new(), - }; - assert_eq!(expected, deserialized); - } -} diff --git a/src/meta-srv/src/test_util.rs b/src/meta-srv/src/test_util.rs index 9f9f119e7eca..576d1aa92365 100644 --- a/src/meta-srv/src/test_util.rs +++ b/src/meta-srv/src/test_util.rs @@ -12,22 +12,13 @@ // See the License for the specific language governing permissions and // limitations under the License. -use std::collections::HashMap; use std::sync::Arc; -use chrono::DateTime; -use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, MITO_ENGINE}; -use common_meta::key::table_route::TableRouteValue; -use common_meta::key::TableMetadataManagerRef; use common_meta::kv_backend::memory::MemoryKvBackend; use common_meta::peer::Peer; use common_meta::rpc::router::{Region, RegionRoute}; use common_meta::ClusterId; use common_time::util as time_util; -use datatypes::data_type::ConcreteDataType; -use datatypes::schema::{ColumnSchema, RawSchema}; -use table::metadata::{RawTableInfo, RawTableMeta, TableIdent, TableType}; -use table::requests::TableOptions; use crate::cluster::{MetaPeerClientBuilder, MetaPeerClientRef}; use crate::key::{DatanodeLeaseKey, LeaseValue}; @@ -72,69 +63,6 @@ pub(crate) fn create_selector_context() -> SelectorContext { } } -pub(crate) async fn prepare_table_region_and_info_value( - table_metadata_manager: &TableMetadataManagerRef, - table: &str, -) { - let table_info = RawTableInfo { - ident: TableIdent::new(1), - name: table.to_string(), - desc: None, - catalog_name: DEFAULT_CATALOG_NAME.to_string(), - schema_name: DEFAULT_SCHEMA_NAME.to_string(), - meta: RawTableMeta { - schema: RawSchema::new(vec![ColumnSchema::new( - "a", - ConcreteDataType::string_datatype(), - true, - )]), - primary_key_indices: vec![], - value_indices: vec![], - engine: MITO_ENGINE.to_string(), - next_column_id: 1, - region_numbers: vec![1, 2, 3, 4], - options: TableOptions::default(), - created_on: DateTime::default(), - partition_key_indices: vec![], - }, - table_type: TableType::Base, - }; - - let region_route_factory = |region_id: u64, peer: u64| RegionRoute { - region: Region { - id: region_id.into(), - ..Default::default() - }, - leader_peer: Some(Peer { - id: peer, - addr: String::new(), - }), - follower_peers: vec![], - leader_status: None, - leader_down_since: None, - }; - - // Region distribution: - // Datanode => Regions - // 1 => 1, 2 - // 2 => 3 - // 3 => 4 - let region_routes = vec![ - region_route_factory(1, 1), - region_route_factory(2, 1), - region_route_factory(3, 2), - region_route_factory(4, 3), - ]; - table_metadata_manager - .create_table_metadata( - table_info, - TableRouteValue::physical(region_routes), - HashMap::default(), - ) - .await - .unwrap(); -} - pub(crate) async fn put_datanodes( cluster_id: ClusterId, meta_peer_client: &MetaPeerClientRef, diff --git a/tests-integration/tests/main.rs b/tests-integration/tests/main.rs index 42560e46f4cd..4fc19f24b284 100644 --- a/tests-integration/tests/main.rs +++ b/tests-integration/tests/main.rs @@ -19,10 +19,7 @@ mod http; #[macro_use] mod sql; #[macro_use] -#[allow(dead_code)] mod region_migration; -// #[macro_use] -// mod region_failover; grpc_tests!(File, S3, S3WithCache, Oss, Azblob, Gcs); http_tests!(File, S3, S3WithCache, Oss, Azblob, Gcs); diff --git a/tests-integration/tests/region_failover.rs b/tests-integration/tests/region_failover.rs deleted file mode 100644 index 103b9481f8ae..000000000000 --- a/tests-integration/tests/region_failover.rs +++ /dev/null @@ -1,376 +0,0 @@ -// Copyright 2023 Greptime Team -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use std::sync::Arc; -use std::time::Duration; - -use catalog::kvbackend::{CachedMetaKvBackend, KvBackendCatalogManager}; -use client::OutputData; -use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME}; -use common_meta::key::table_route::TableRouteKey; -use common_meta::key::{MetaKey, RegionDistribution}; -use common_meta::peer::Peer; -use common_meta::{distributed_time_constants, RegionIdent}; -use common_procedure::{watcher, ProcedureWithId}; -use common_query::Output; -use common_telemetry::info; -use common_test_util::recordbatch::check_output_stream; -use frontend::error::Result as FrontendResult; -use frontend::instance::Instance; -use futures::TryStreamExt; -use meta_srv::error::Result as MetaResult; -use meta_srv::metasrv::{SelectorContext, SelectorRef}; -use meta_srv::procedure::region_failover::{RegionFailoverContext, RegionFailoverProcedure}; -use meta_srv::selector::{Namespace, Selector, SelectorOptions}; -use servers::query_handler::sql::SqlQueryHandler; -use session::context::{QueryContext, QueryContextRef}; -use table::metadata::TableId; -use tests_integration::cluster::{GreptimeDbCluster, GreptimeDbClusterBuilder}; -use tests_integration::test_util::{get_test_store_config, StorageType}; -use tokio::time; - -#[macro_export] -macro_rules! region_failover_test { - ($service:ident, $($(#[$meta:meta])* $test:ident),*,) => { - paste::item! { - mod [<integration_region_failover_ $service:lower _test>] { - $( - #[tokio::test(flavor = "multi_thread")] - $( - #[$meta] - )* - async fn [< $test >]() { - let store_type = tests_integration::test_util::StorageType::$service; - if store_type.test_on() { - let _ = $crate::region_failover::$test(store_type).await; - } - - } - )* - } - } - }; -} - -#[macro_export] -macro_rules! region_failover_tests { - ($($service:ident),*) => { - $( - region_failover_test!( - $service, - - test_region_failover, - ); - )* - }; -} - -pub async fn test_region_failover(store_type: StorageType) { - if store_type == StorageType::File { - // Region failover doesn't make sense when using local file storage. - return; - } - common_telemetry::init_default_ut_logging(); - info!("Running region failover test for {}", store_type); - - let mut logical_timer = 1685508715000; - - let cluster_name = "test_region_failover"; - - let (store_config, _guard) = get_test_store_config(&store_type); - - let datanodes = 5u64; - let builder = GreptimeDbClusterBuilder::new(cluster_name).await; - let cluster = builder - .with_datanodes(datanodes as u32) - .with_store_config(store_config) - .build() - .await; - - let frontend = cluster.frontend.clone(); - - let table_id = prepare_testing_table(&cluster).await; - - let results = insert_values(&frontend, logical_timer).await; - logical_timer += 1000; - for result in results { - assert!(matches!(result.unwrap().data, OutputData::AffectedRows(1))); - } - - assert!(has_route_cache(&frontend, table_id).await); - - let distribution = find_region_distribution(&cluster, table_id).await; - info!("Find region distribution: {distribution:?}"); - - let mut foreign = 0; - for dn in 1..=datanodes { - if !&distribution.contains_key(&dn) { - foreign = dn - } - } - - let selector = Arc::new(ForeignNodeSelector { - foreign: Peer { - id: foreign, - // "127.0.0.1:3001" is just a placeholder, does not actually connect to it. - addr: "127.0.0.1:3001".to_string(), - }, - }); - - let failed_region = choose_failed_region(distribution); - info!("Simulating failed region: {failed_region:#?}"); - - run_region_failover_procedure(&cluster, failed_region.clone(), selector).await; - - let distribution = find_region_distribution(&cluster, table_id).await; - info!("Find region distribution again: {distribution:?}"); - - // Waits for invalidating table cache - time::sleep(Duration::from_millis(100)).await; - - assert!(!has_route_cache(&frontend, table_id).await); - - // Inserts data to each datanode after failover - let frontend = cluster.frontend.clone(); - let results = insert_values(&frontend, logical_timer).await; - for result in results { - assert!(matches!(result.unwrap().data, OutputData::AffectedRows(1))); - } - - assert_values(&frontend).await; - - assert!(!distribution.contains_key(&failed_region.datanode_id)); - - let mut success = false; - let values = distribution.values(); - for val in values { - success = success || val.contains(&failed_region.region_number); - } - assert!(success) -} - -async fn has_route_cache(instance: &Arc<Instance>, table_id: TableId) -> bool { - let catalog_manager = instance - .catalog_manager() - .as_any() - .downcast_ref::<KvBackendCatalogManager>() - .unwrap(); - - let kv_backend = catalog_manager.table_metadata_manager_ref().kv_backend(); - - let cache = kv_backend - .as_any() - .downcast_ref::<CachedMetaKvBackend>() - .unwrap() - .cache(); - - cache - .get(TableRouteKey::new(table_id).to_bytes().as_slice()) - .await - .is_some() -} - -async fn insert_values(instance: &Arc<Instance>, ts: u64) -> Vec<FrontendResult<Output>> { - let query_ctx = QueryContext::arc(); - - let mut results = Vec::new(); - for range in [5, 15, 25, 55] { - let result = insert_value( - instance, - &format!("INSERT INTO my_table VALUES ({},{})", range, ts), - query_ctx.clone(), - ) - .await; - results.push(result); - } - - results -} - -async fn insert_value( - instance: &Arc<Instance>, - sql: &str, - query_ctx: QueryContextRef, -) -> FrontendResult<Output> { - instance.do_query(sql, query_ctx).await.remove(0) -} - -async fn assert_values(instance: &Arc<Instance>) { - let query_ctx = QueryContext::arc(); - - let result = instance - .do_query("select * from my_table order by i, ts", query_ctx) - .await - .remove(0); - - let expected = "\ -+----+---------------------+ -| i | ts | -+----+---------------------+ -| 5 | 2023-05-31T04:51:55 | -| 5 | 2023-05-31T04:51:56 | -| 15 | 2023-05-31T04:51:55 | -| 15 | 2023-05-31T04:51:56 | -| 25 | 2023-05-31T04:51:55 | -| 25 | 2023-05-31T04:51:56 | -| 55 | 2023-05-31T04:51:55 | -| 55 | 2023-05-31T04:51:56 | -+----+---------------------+"; - check_output_stream(result.unwrap().data, expected).await; -} - -async fn prepare_testing_table(cluster: &GreptimeDbCluster) -> TableId { - let sql = r" -CREATE TABLE my_table ( - i INT PRIMARY KEY, - ts TIMESTAMP TIME INDEX, -) PARTITION BY RANGE COLUMNS (i) ( - PARTITION r0 VALUES LESS THAN (10), - PARTITION r1 VALUES LESS THAN (20), - PARTITION r2 VALUES LESS THAN (50), - PARTITION r3 VALUES LESS THAN (MAXVALUE), -)"; - let result = cluster.frontend.do_query(sql, QueryContext::arc()).await; - result.first().unwrap().as_ref().unwrap(); - - let table = cluster - .frontend - .catalog_manager() - .table(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, "my_table") - .await - .unwrap() - .unwrap(); - table.table_info().table_id() -} - -async fn find_region_distribution( - cluster: &GreptimeDbCluster, - table_id: TableId, -) -> RegionDistribution { - let manager = cluster.metasrv.table_metadata_manager(); - let region_distribution = manager - .table_route_manager() - .get_region_distribution(table_id) - .await - .unwrap() - .unwrap(); - - // test DatanodeTableValues match the table region distribution - for datanode_id in cluster.datanode_instances.keys() { - let mut actual = manager - .datanode_table_manager() - .tables(*datanode_id) - .try_collect::<Vec<_>>() - .await - .unwrap() - .into_iter() - .filter_map(|x| { - if x.table_id == table_id { - Some(x.regions) - } else { - None - } - }) - .flatten() - .collect::<Vec<_>>(); - actual.sort(); - - if let Some(mut expected) = region_distribution.get(datanode_id).cloned() { - expected.sort(); - assert_eq!(expected, actual); - } else { - assert!(actual.is_empty()); - } - } - - region_distribution -} - -fn choose_failed_region(distribution: RegionDistribution) -> RegionIdent { - let (failed_datanode, failed_region) = distribution - .iter() - .filter_map(|(datanode_id, regions)| { - if !regions.is_empty() { - Some((*datanode_id, regions[0])) - } else { - None - } - }) - .next() - .unwrap(); - RegionIdent { - cluster_id: 1000, - datanode_id: failed_datanode, - table_id: 1025, - region_number: failed_region, - engine: "mito2".to_string(), - } -} - -// The "foreign" means the Datanode is not containing any regions to the table before. -pub struct ForeignNodeSelector { - pub foreign: Peer, -} - -#[async_trait::async_trait] -impl Selector for ForeignNodeSelector { - type Context = SelectorContext; - type Output = Vec<Peer>; - - async fn select( - &self, - _ns: Namespace, - _ctx: &Self::Context, - _opts: SelectorOptions, - ) -> MetaResult<Self::Output> { - Ok(vec![self.foreign.clone()]) - } -} - -async fn run_region_failover_procedure( - cluster: &GreptimeDbCluster, - failed_region: RegionIdent, - selector: SelectorRef, -) { - let metasrv = &cluster.metasrv; - let procedure_manager = metasrv.procedure_manager(); - let procedure = RegionFailoverProcedure::new( - "greptime".into(), - "public".into(), - failed_region.clone(), - RegionFailoverContext { - region_lease_secs: 10, - in_memory: metasrv.in_memory().clone(), - kv_backend: metasrv.kv_backend().clone(), - mailbox: metasrv.mailbox().clone(), - selector, - selector_ctx: SelectorContext { - datanode_lease_secs: distributed_time_constants::REGION_LEASE_SECS, - flownode_lease_secs: distributed_time_constants::REGION_LEASE_SECS, - server_addr: metasrv.options().server_addr.clone(), - kv_backend: metasrv.kv_backend().clone(), - meta_peer_client: metasrv.meta_peer_client().clone(), - table_id: None, - }, - dist_lock: metasrv.lock().clone(), - table_metadata_manager: metasrv.table_metadata_manager().clone(), - }, - ); - let procedure_with_id = ProcedureWithId::with_random_id(Box::new(procedure)); - let procedure_id = procedure_with_id.id; - info!("Starting region failover procedure {procedure_id} for region {failed_region:?}"); - - let watcher = &mut procedure_manager.submit(procedure_with_id).await.unwrap(); - watcher::wait(watcher).await.unwrap(); -}
chore
remove original region failover implementation (#4237)
91e933517a3b25e3421afcf00f0e140ffc9f91d0
2024-11-27 07:49:24
Zhenchi
chore: bump version of main branch to v0.11.0 (#5057)
false
diff --git a/Cargo.lock b/Cargo.lock index 0495d1d35b2f..5d6750643035 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -208,7 +208,7 @@ checksum = "d301b3b94cb4b2f23d7917810addbbaff90738e0ca2be692bd027e70d7e0330c" [[package]] name = "api" -version = "0.10.1" +version = "0.11.0" dependencies = [ "common-base", "common-decimal", @@ -769,7 +769,7 @@ dependencies = [ [[package]] name = "auth" -version = "0.10.1" +version = "0.11.0" dependencies = [ "api", "async-trait", @@ -1379,7 +1379,7 @@ dependencies = [ [[package]] name = "cache" -version = "0.10.1" +version = "0.11.0" dependencies = [ "catalog", "common-error", @@ -1387,7 +1387,7 @@ dependencies = [ "common-meta", "moka", "snafu 0.8.5", - "substrait 0.10.1", + "substrait 0.11.0", ] [[package]] @@ -1414,7 +1414,7 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" [[package]] name = "catalog" -version = "0.10.1" +version = "0.11.0" dependencies = [ "api", "arrow", @@ -1753,7 +1753,7 @@ checksum = "1462739cb27611015575c0c11df5df7601141071f07518d56fcc1be504cbec97" [[package]] name = "client" -version = "0.10.1" +version = "0.11.0" dependencies = [ "api", "arc-swap", @@ -1782,7 +1782,7 @@ dependencies = [ "rand", "serde_json", "snafu 0.8.5", - "substrait 0.10.1", + "substrait 0.11.0", "substrait 0.37.3", "tokio", "tokio-stream", @@ -1823,7 +1823,7 @@ dependencies = [ [[package]] name = "cmd" -version = "0.10.1" +version = "0.11.0" dependencies = [ "async-trait", "auth", @@ -1882,7 +1882,7 @@ dependencies = [ "similar-asserts", "snafu 0.8.5", "store-api", - "substrait 0.10.1", + "substrait 0.11.0", "table", "temp-env", "tempfile", @@ -1928,7 +1928,7 @@ checksum = "55b672471b4e9f9e95499ea597ff64941a309b2cdbffcc46f2cc5e2d971fd335" [[package]] name = "common-base" -version = "0.10.1" +version = "0.11.0" dependencies = [ "anymap2", "async-trait", @@ -1949,7 +1949,7 @@ dependencies = [ [[package]] name = "common-catalog" -version = "0.10.1" +version = "0.11.0" dependencies = [ "chrono", "common-error", @@ -1960,7 +1960,7 @@ dependencies = [ [[package]] name = "common-config" -version = "0.10.1" +version = "0.11.0" dependencies = [ "common-base", "common-error", @@ -1983,7 +1983,7 @@ dependencies = [ [[package]] name = "common-datasource" -version = "0.10.1" +version = "0.11.0" dependencies = [ "arrow", "arrow-schema", @@ -2020,7 +2020,7 @@ dependencies = [ [[package]] name = "common-decimal" -version = "0.10.1" +version = "0.11.0" dependencies = [ "bigdecimal 0.4.5", "common-error", @@ -2033,7 +2033,7 @@ dependencies = [ [[package]] name = "common-error" -version = "0.10.1" +version = "0.11.0" dependencies = [ "snafu 0.8.5", "strum 0.25.0", @@ -2042,7 +2042,7 @@ dependencies = [ [[package]] name = "common-frontend" -version = "0.10.1" +version = "0.11.0" dependencies = [ "api", "async-trait", @@ -2057,7 +2057,7 @@ dependencies = [ [[package]] name = "common-function" -version = "0.10.1" +version = "0.11.0" dependencies = [ "api", "approx 0.5.1", @@ -2102,7 +2102,7 @@ dependencies = [ [[package]] name = "common-greptimedb-telemetry" -version = "0.10.1" +version = "0.11.0" dependencies = [ "async-trait", "common-runtime", @@ -2119,7 +2119,7 @@ dependencies = [ [[package]] name = "common-grpc" -version = "0.10.1" +version = "0.11.0" dependencies = [ "api", "arrow-flight", @@ -2145,7 +2145,7 @@ dependencies = [ [[package]] name = "common-grpc-expr" -version = "0.10.1" +version = "0.11.0" dependencies = [ "api", "common-base", @@ -2164,7 +2164,7 @@ dependencies = [ [[package]] name = "common-macro" -version = "0.10.1" +version = "0.11.0" dependencies = [ "arc-swap", "common-query", @@ -2178,7 +2178,7 @@ dependencies = [ [[package]] name = "common-mem-prof" -version = "0.10.1" +version = "0.11.0" dependencies = [ "common-error", "common-macro", @@ -2191,7 +2191,7 @@ dependencies = [ [[package]] name = "common-meta" -version = "0.10.1" +version = "0.11.0" dependencies = [ "anymap2", "api", @@ -2248,7 +2248,7 @@ dependencies = [ [[package]] name = "common-options" -version = "0.10.1" +version = "0.11.0" dependencies = [ "common-grpc", "humantime-serde", @@ -2257,11 +2257,11 @@ dependencies = [ [[package]] name = "common-plugins" -version = "0.10.1" +version = "0.11.0" [[package]] name = "common-pprof" -version = "0.10.1" +version = "0.11.0" dependencies = [ "common-error", "common-macro", @@ -2273,7 +2273,7 @@ dependencies = [ [[package]] name = "common-procedure" -version = "0.10.1" +version = "0.11.0" dependencies = [ "async-stream", "async-trait", @@ -2300,7 +2300,7 @@ dependencies = [ [[package]] name = "common-procedure-test" -version = "0.10.1" +version = "0.11.0" dependencies = [ "async-trait", "common-procedure", @@ -2308,7 +2308,7 @@ dependencies = [ [[package]] name = "common-query" -version = "0.10.1" +version = "0.11.0" dependencies = [ "api", "async-trait", @@ -2334,7 +2334,7 @@ dependencies = [ [[package]] name = "common-recordbatch" -version = "0.10.1" +version = "0.11.0" dependencies = [ "arc-swap", "common-error", @@ -2353,7 +2353,7 @@ dependencies = [ [[package]] name = "common-runtime" -version = "0.10.1" +version = "0.11.0" dependencies = [ "async-trait", "clap 4.5.19", @@ -2383,7 +2383,7 @@ dependencies = [ [[package]] name = "common-telemetry" -version = "0.10.1" +version = "0.11.0" dependencies = [ "atty", "backtrace", @@ -2411,7 +2411,7 @@ dependencies = [ [[package]] name = "common-test-util" -version = "0.10.1" +version = "0.11.0" dependencies = [ "client", "common-query", @@ -2423,7 +2423,7 @@ dependencies = [ [[package]] name = "common-time" -version = "0.10.1" +version = "0.11.0" dependencies = [ "arrow", "chrono", @@ -2439,7 +2439,7 @@ dependencies = [ [[package]] name = "common-version" -version = "0.10.1" +version = "0.11.0" dependencies = [ "build-data", "const_format", @@ -2450,7 +2450,7 @@ dependencies = [ [[package]] name = "common-wal" -version = "0.10.1" +version = "0.11.0" dependencies = [ "common-base", "common-error", @@ -3259,7 +3259,7 @@ dependencies = [ [[package]] name = "datanode" -version = "0.10.1" +version = "0.11.0" dependencies = [ "api", "arrow-flight", @@ -3309,7 +3309,7 @@ dependencies = [ "session", "snafu 0.8.5", "store-api", - "substrait 0.10.1", + "substrait 0.11.0", "table", "tokio", "toml 0.8.19", @@ -3318,7 +3318,7 @@ dependencies = [ [[package]] name = "datatypes" -version = "0.10.1" +version = "0.11.0" dependencies = [ "arrow", "arrow-array", @@ -3936,7 +3936,7 @@ dependencies = [ [[package]] name = "file-engine" -version = "0.10.1" +version = "0.11.0" dependencies = [ "api", "async-trait", @@ -4053,7 +4053,7 @@ checksum = "8bf7cc16383c4b8d58b9905a8509f02926ce3058053c056376248d958c9df1e8" [[package]] name = "flow" -version = "0.10.1" +version = "0.11.0" dependencies = [ "api", "arrow", @@ -4110,7 +4110,7 @@ dependencies = [ "snafu 0.8.5", "store-api", "strum 0.25.0", - "substrait 0.10.1", + "substrait 0.11.0", "table", "tokio", "tonic 0.11.0", @@ -4172,7 +4172,7 @@ checksum = "6c2141d6d6c8512188a7891b4b01590a45f6dac67afb4f255c4124dbb86d4eaa" [[package]] name = "frontend" -version = "0.10.1" +version = "0.11.0" dependencies = [ "api", "arc-swap", @@ -5312,7 +5312,7 @@ dependencies = [ [[package]] name = "index" -version = "0.10.1" +version = "0.11.0" dependencies = [ "async-trait", "asynchronous-codec", @@ -6156,7 +6156,7 @@ checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24" [[package]] name = "log-store" -version = "0.10.1" +version = "0.11.0" dependencies = [ "async-stream", "async-trait", @@ -6486,7 +6486,7 @@ dependencies = [ [[package]] name = "meta-client" -version = "0.10.1" +version = "0.11.0" dependencies = [ "api", "async-trait", @@ -6513,7 +6513,7 @@ dependencies = [ [[package]] name = "meta-srv" -version = "0.10.1" +version = "0.11.0" dependencies = [ "api", "async-trait", @@ -6592,7 +6592,7 @@ dependencies = [ [[package]] name = "metric-engine" -version = "0.10.1" +version = "0.11.0" dependencies = [ "api", "aquamarine", @@ -6695,7 +6695,7 @@ dependencies = [ [[package]] name = "mito2" -version = "0.10.1" +version = "0.11.0" dependencies = [ "api", "aquamarine", @@ -7459,7 +7459,7 @@ dependencies = [ [[package]] name = "object-store" -version = "0.10.1" +version = "0.11.0" dependencies = [ "anyhow", "bytes", @@ -7750,7 +7750,7 @@ dependencies = [ [[package]] name = "operator" -version = "0.10.1" +version = "0.11.0" dependencies = [ "api", "async-stream", @@ -7797,7 +7797,7 @@ dependencies = [ "sql", "sqlparser 0.45.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=54a267ac89c09b11c0c88934690530807185d3e7)", "store-api", - "substrait 0.10.1", + "substrait 0.11.0", "table", "tokio", "tokio-util", @@ -8047,7 +8047,7 @@ dependencies = [ [[package]] name = "partition" -version = "0.10.1" +version = "0.11.0" dependencies = [ "api", "async-trait", @@ -8348,7 +8348,7 @@ checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" [[package]] name = "pipeline" -version = "0.10.1" +version = "0.11.0" dependencies = [ "ahash 0.8.11", "api", @@ -8511,7 +8511,7 @@ dependencies = [ [[package]] name = "plugins" -version = "0.10.1" +version = "0.11.0" dependencies = [ "auth", "common-base", @@ -8785,7 +8785,7 @@ dependencies = [ [[package]] name = "promql" -version = "0.10.1" +version = "0.11.0" dependencies = [ "ahash 0.8.11", "async-trait", @@ -9023,7 +9023,7 @@ dependencies = [ [[package]] name = "puffin" -version = "0.10.1" +version = "0.11.0" dependencies = [ "async-compression 0.4.13", "async-trait", @@ -9147,7 +9147,7 @@ dependencies = [ [[package]] name = "query" -version = "0.10.1" +version = "0.11.0" dependencies = [ "ahash 0.8.11", "api", @@ -9214,7 +9214,7 @@ dependencies = [ "stats-cli", "store-api", "streaming-stats", - "substrait 0.10.1", + "substrait 0.11.0", "table", "tokio", "tokio-stream", @@ -10677,7 +10677,7 @@ checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" [[package]] name = "script" -version = "0.10.1" +version = "0.11.0" dependencies = [ "api", "arc-swap", @@ -10971,7 +10971,7 @@ dependencies = [ [[package]] name = "servers" -version = "0.10.1" +version = "0.11.0" dependencies = [ "ahash 0.8.11", "aide", @@ -11086,7 +11086,7 @@ dependencies = [ [[package]] name = "session" -version = "0.10.1" +version = "0.11.0" dependencies = [ "api", "arc-swap", @@ -11432,7 +11432,7 @@ dependencies = [ [[package]] name = "sql" -version = "0.10.1" +version = "0.11.0" dependencies = [ "api", "chrono", @@ -11495,7 +11495,7 @@ dependencies = [ [[package]] name = "sqlness-runner" -version = "0.10.1" +version = "0.11.0" dependencies = [ "async-trait", "clap 4.5.19", @@ -11715,7 +11715,7 @@ dependencies = [ [[package]] name = "store-api" -version = "0.10.1" +version = "0.11.0" dependencies = [ "api", "aquamarine", @@ -11886,7 +11886,7 @@ dependencies = [ [[package]] name = "substrait" -version = "0.10.1" +version = "0.11.0" dependencies = [ "async-trait", "bytes", @@ -12085,7 +12085,7 @@ dependencies = [ [[package]] name = "table" -version = "0.10.1" +version = "0.11.0" dependencies = [ "api", "async-trait", @@ -12351,7 +12351,7 @@ checksum = "3369f5ac52d5eb6ab48c6b4ffdc8efbcad6b89c765749064ba298f2c68a16a76" [[package]] name = "tests-fuzz" -version = "0.10.1" +version = "0.11.0" dependencies = [ "arbitrary", "async-trait", @@ -12393,7 +12393,7 @@ dependencies = [ [[package]] name = "tests-integration" -version = "0.10.1" +version = "0.11.0" dependencies = [ "api", "arrow-flight", @@ -12457,7 +12457,7 @@ dependencies = [ "sql", "sqlx", "store-api", - "substrait 0.10.1", + "substrait 0.11.0", "table", "tempfile", "time", diff --git a/Cargo.toml b/Cargo.toml index eefc21841ba9..9f3a4ff59e76 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -66,7 +66,7 @@ members = [ resolver = "2" [workspace.package] -version = "0.10.1" +version = "0.11.0" edition = "2021" license = "Apache-2.0"
chore
bump version of main branch to v0.11.0 (#5057)
332b3677ac2bc69cc44697a69e10416da8250e58
2023-05-26 08:27:27
Ning Sun
feat: add metrics for ingested row count (#1645)
false
diff --git a/Cargo.lock b/Cargo.lock index cd315d7998d6..df06ede090ff 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3118,6 +3118,7 @@ dependencies = [ "meta-srv", "meter-core", "meter-macros", + "metrics", "mito", "moka 0.9.7", "object-store", diff --git a/src/frontend/Cargo.toml b/src/frontend/Cargo.toml index 64351240d32c..71828fbec530 100644 --- a/src/frontend/Cargo.toml +++ b/src/frontend/Cargo.toml @@ -41,6 +41,7 @@ itertools = "0.10" meta-client = { path = "../meta-client" } meter-core.workspace = true meter-macros.workspace = true +metrics.workspace = true mito = { path = "../mito", features = ["test"] } moka = { version = "0.9", features = ["future"] } object-store = { path = "../object-store" } diff --git a/src/frontend/src/metrics.rs b/src/frontend/src/metrics.rs index 43c694d425b2..508699e84088 100644 --- a/src/frontend/src/metrics.rs +++ b/src/frontend/src/metrics.rs @@ -21,3 +21,4 @@ pub(crate) const METRIC_RUN_SCRIPT_ELAPSED: &str = "frontend.run_script_elapsed" pub const DIST_CREATE_TABLE: &str = "frontend.dist.create_table"; pub const DIST_CREATE_TABLE_IN_META: &str = "frontend.dist.create_table.update_meta"; pub const DIST_CREATE_TABLE_IN_DATANODE: &str = "frontend.dist.create_table.invoke_datanode"; +pub const DIST_INGEST_ROW_COUNT: &str = "frontend.dist.ingest_rows"; diff --git a/src/frontend/src/table/insert.rs b/src/frontend/src/table/insert.rs index 18f73766a905..3edce8c00464 100644 --- a/src/frontend/src/table/insert.rs +++ b/src/frontend/src/table/insert.rs @@ -20,6 +20,7 @@ use api::v1::{Column, InsertRequest as GrpcInsertRequest}; use common_query::Output; use datatypes::prelude::{ConcreteDataType, VectorRef}; use futures::future; +use metrics::counter; use snafu::{ensure, ResultExt}; use store_api::storage::RegionNumber; use table::requests::InsertRequest; @@ -47,6 +48,7 @@ impl DistTable { .context(JoinTaskSnafu)?; let affected_rows = results.into_iter().sum::<Result<u32>>()?; + counter!(crate::metrics::DIST_INGEST_ROW_COUNT, affected_rows as u64); Ok(Output::AffectedRows(affected_rows as _)) } }
feat
add metrics for ingested row count (#1645)
3b27adb3fe4cde38448aac3fc20c37ca46681234
2025-01-03 08:43:04
Ning Sun
ci: update nix setup (#5272)
false
diff --git a/shell.nix b/shell.nix index ce84a032764d..e052ed6bea18 100644 --- a/shell.nix +++ b/shell.nix @@ -1,5 +1,5 @@ let - nixpkgs = fetchTarball "https://github.com/NixOS/nixpkgs/tarball/nixos-unstable"; + nixpkgs = fetchTarball "https://github.com/NixOS/nixpkgs/tarball/nixos-24.11"; fenix = import (fetchTarball "https://github.com/nix-community/fenix/archive/main.tar.gz") {}; pkgs = import nixpkgs { config = {}; overlays = []; }; in @@ -17,10 +17,12 @@ pkgs.mkShell rec { }) cargo-nextest taplo + curl ]; buildInputs = with pkgs; [ libgit2 + libz ]; LD_LIBRARY_PATH = pkgs.lib.makeLibraryPath buildInputs;
ci
update nix setup (#5272)
06dcd0f6edb4c9e9ce822d618601568167f2cc53
2024-03-11 20:21:06
Yingwen
fix: freeze data buffer in shard (#3468)
false
diff --git a/src/mito2/src/memtable/merge_tree.rs b/src/mito2/src/memtable/merge_tree.rs index 5f80ba746a5f..1789959adfee 100644 --- a/src/mito2/src/memtable/merge_tree.rs +++ b/src/mito2/src/memtable/merge_tree.rs @@ -85,7 +85,7 @@ impl Default for MergeTreeConfig { Self { index_max_keys_per_shard: 8192, - data_freeze_threshold: 32768, + data_freeze_threshold: 131072, dedup: true, fork_dictionary_bytes, } diff --git a/src/mito2/src/memtable/merge_tree/data.rs b/src/mito2/src/memtable/merge_tree/data.rs index e43d06a22361..3f6557ee03b2 100644 --- a/src/mito2/src/memtable/merge_tree/data.rs +++ b/src/mito2/src/memtable/merge_tree/data.rs @@ -957,6 +957,18 @@ impl DataParts { self.active.write_row(pk_index, kv) } + /// Returns the number of rows in the active buffer. + pub fn num_active_rows(&self) -> usize { + self.active.num_rows() + } + + /// Freezes active buffer and creates a new active buffer. + pub fn freeze(&mut self) -> Result<()> { + let part = self.active.freeze(None, false)?; + self.frozen.push(part); + Ok(()) + } + /// Reads data from all parts including active and frozen parts. /// The returned iterator yields a record batch of one primary key at a time. /// The order of yielding primary keys is determined by provided weights. @@ -976,6 +988,11 @@ impl DataParts { pub(crate) fn is_empty(&self) -> bool { self.active.is_empty() && self.frozen.iter().all(|part| part.is_empty()) } + + #[cfg(test)] + pub(crate) fn frozen_len(&self) -> usize { + self.frozen.len() + } } pub struct DataPartsReaderBuilder { diff --git a/src/mito2/src/memtable/merge_tree/partition.rs b/src/mito2/src/memtable/merge_tree/partition.rs index d4bd0644b582..f031de57eb66 100644 --- a/src/mito2/src/memtable/merge_tree/partition.rs +++ b/src/mito2/src/memtable/merge_tree/partition.rs @@ -78,7 +78,7 @@ impl Partition { // Finds key in shards, now we ensure one key only exists in one shard. if let Some(pk_id) = inner.find_key_in_shards(primary_key) { - inner.write_to_shard(pk_id, &key_value); + inner.write_to_shard(pk_id, &key_value)?; inner.num_rows += 1; return Ok(()); } @@ -106,7 +106,7 @@ impl Partition { } /// Writes to the partition without a primary key. - pub fn write_no_key(&self, key_value: KeyValue) { + pub fn write_no_key(&self, key_value: KeyValue) -> Result<()> { let mut inner = self.inner.write().unwrap(); // If no primary key, always write to the first shard. debug_assert!(!inner.shards.is_empty()); @@ -117,12 +117,15 @@ impl Partition { shard_id: 0, pk_index: 0, }; - inner.shards[0].write_with_pk_id(pk_id, &key_value); + inner.shards[0].write_with_pk_id(pk_id, &key_value)?; inner.num_rows += 1; + + Ok(()) } /// Scans data in the partition. pub fn read(&self, mut context: ReadPartitionContext) -> Result<PartitionReader> { + let start = Instant::now(); let key_filter = if context.need_prune_key { Some(PrimaryKeyFilter::new( context.metadata.clone(), @@ -150,7 +153,7 @@ impl Partition { (builder_reader, shard_source) }; - context.metrics.num_shards = shard_reader_builders.len(); + context.metrics.num_shards += shard_reader_builders.len(); let mut nodes = shard_reader_builders .into_iter() .map(|builder| { @@ -161,7 +164,7 @@ impl Partition { .collect::<Result<Vec<_>>>()?; if let Some(builder) = builder_source { - context.metrics.read_builder = true; + context.metrics.num_builder += 1; // Move the initialization of ShardBuilderReader out of read lock. let shard_builder_reader = builder.build(Some(&context.pk_weights), key_filter.clone())?; @@ -172,8 +175,10 @@ impl Partition { let merger = ShardMerger::try_new(nodes)?; if self.dedup { let source = DedupReader::try_new(merger)?; + context.metrics.build_partition_reader += start.elapsed(); PartitionReader::new(context, Box::new(source)) } else { + context.metrics.build_partition_reader += start.elapsed(); PartitionReader::new(context, Box::new(merger)) } } @@ -282,9 +287,10 @@ pub(crate) struct PartitionStats { #[derive(Default)] struct PartitionReaderMetrics { + build_partition_reader: Duration, read_source: Duration, data_batch_to_batch: Duration, - read_builder: bool, + num_builder: usize, num_shards: usize, } @@ -440,9 +446,15 @@ impl Drop for ReadPartitionContext { .observe(partition_data_batch_to_batch); common_telemetry::debug!( - "TreeIter partitions metrics, read_builder: {}, num_shards: {}, partition_read_source: {}s, partition_data_batch_to_batch: {}s", - self.metrics.read_builder, + "TreeIter partitions metrics, \ + num_builder: {}, \ + num_shards: {}, \ + build_partition_reader: {}s, \ + partition_read_source: {}s, \ + partition_data_batch_to_batch: {}s", + self.metrics.num_builder, self.metrics.num_shards, + self.metrics.build_partition_reader.as_secs_f64(), partition_read_source, partition_data_batch_to_batch, ); @@ -549,7 +561,16 @@ impl Inner { fn new(metadata: RegionMetadataRef, config: &MergeTreeConfig) -> Self { let (shards, current_shard_id) = if metadata.primary_key.is_empty() { let data_parts = DataParts::new(metadata.clone(), DATA_INIT_CAP, config.dedup); - (vec![Shard::new(0, None, data_parts, config.dedup)], 1) + ( + vec![Shard::new( + 0, + None, + data_parts, + config.dedup, + config.data_freeze_threshold, + )], + 1, + ) } else { (Vec::new(), 0) }; @@ -569,18 +590,22 @@ impl Inner { self.pk_to_pk_id.get(primary_key).copied() } - fn write_to_shard(&mut self, pk_id: PkId, key_value: &KeyValue) { + fn write_to_shard(&mut self, pk_id: PkId, key_value: &KeyValue) -> Result<()> { if pk_id.shard_id == self.shard_builder.current_shard_id() { self.shard_builder.write_with_pk_id(pk_id, key_value); - return; - } - for shard in &mut self.shards { - if shard.shard_id == pk_id.shard_id { - shard.write_with_pk_id(pk_id, key_value); - self.num_rows += 1; - return; - } + return Ok(()); } + + // Safety: We find the shard by shard id. + let shard = self + .shards + .iter_mut() + .find(|shard| shard.shard_id == pk_id.shard_id) + .unwrap(); + shard.write_with_pk_id(pk_id, key_value)?; + self.num_rows += 1; + + Ok(()) } fn freeze_active_shard(&mut self) -> Result<()> { diff --git a/src/mito2/src/memtable/merge_tree/shard.rs b/src/mito2/src/memtable/merge_tree/shard.rs index 7f981f91623c..2ac1ee90bc4a 100644 --- a/src/mito2/src/memtable/merge_tree/shard.rs +++ b/src/mito2/src/memtable/merge_tree/shard.rs @@ -39,6 +39,8 @@ pub struct Shard { /// Data in the shard. data_parts: DataParts, dedup: bool, + /// Number of rows to freeze a data part. + data_freeze_threshold: usize, } impl Shard { @@ -48,20 +50,29 @@ impl Shard { key_dict: Option<KeyDictRef>, data_parts: DataParts, dedup: bool, + data_freeze_threshold: usize, ) -> Shard { Shard { shard_id, key_dict, data_parts, dedup, + data_freeze_threshold, } } /// Writes a key value into the shard. - pub fn write_with_pk_id(&mut self, pk_id: PkId, key_value: &KeyValue) { + /// + /// It will freezes the active buffer if it is full. + pub fn write_with_pk_id(&mut self, pk_id: PkId, key_value: &KeyValue) -> Result<()> { debug_assert_eq!(self.shard_id, pk_id.shard_id); + if self.data_parts.num_active_rows() >= self.data_freeze_threshold { + self.data_parts.freeze()?; + } + self.data_parts.write_row(pk_id.pk_index, key_value); + Ok(()) } /// Scans the shard. @@ -83,6 +94,7 @@ impl Shard { key_dict: self.key_dict.clone(), data_parts: DataParts::new(metadata, DATA_INIT_CAP, self.dedup), dedup: self.dedup, + data_freeze_threshold: self.data_freeze_threshold, } } @@ -467,6 +479,7 @@ mod tests { shard_id: ShardId, metadata: RegionMetadataRef, input: &[(KeyValues, PkIndex)], + data_freeze_threshold: usize, ) -> Shard { let mut dict_builder = KeyDictBuilder::new(1024); let mut metrics = WriteMetrics::default(); @@ -481,14 +494,34 @@ mod tests { let dict = dict_builder.finish(&mut BTreeMap::new()).unwrap(); let data_parts = DataParts::new(metadata, DATA_INIT_CAP, true); - Shard::new(shard_id, Some(Arc::new(dict)), data_parts, true) + Shard::new( + shard_id, + Some(Arc::new(dict)), + data_parts, + true, + data_freeze_threshold, + ) + } + + fn collect_timestamps(shard: &Shard) -> Vec<i64> { + let mut reader = shard.read().unwrap().build(None).unwrap(); + let mut timestamps = Vec::new(); + while reader.is_valid() { + let rb = reader.current_data_batch().slice_record_batch(); + let ts_array = rb.column(1); + let ts_slice = timestamp_array_to_i64_slice(ts_array); + timestamps.extend_from_slice(ts_slice); + + reader.next().unwrap(); + } + timestamps } #[test] fn test_write_read_shard() { let metadata = metadata_for_test(); let input = input_with_key(&metadata); - let mut shard = new_shard_with_dict(8, metadata, &input); + let mut shard = new_shard_with_dict(8, metadata, &input, 100); assert!(shard.is_empty()); for (key_values, pk_index) in &input { for kv in key_values.iter() { @@ -496,21 +529,49 @@ mod tests { shard_id: shard.shard_id, pk_index: *pk_index, }; - shard.write_with_pk_id(pk_id, &kv); + shard.write_with_pk_id(pk_id, &kv).unwrap(); } } assert!(!shard.is_empty()); - let mut reader = shard.read().unwrap().build(None).unwrap(); - let mut timestamps = Vec::new(); - while reader.is_valid() { - let rb = reader.current_data_batch().slice_record_batch(); - let ts_array = rb.column(1); - let ts_slice = timestamp_array_to_i64_slice(ts_array); - timestamps.extend_from_slice(ts_slice); + let timestamps = collect_timestamps(&shard); + assert_eq!(vec![0, 1, 10, 11, 20, 21], timestamps); + } - reader.next().unwrap(); + #[test] + fn test_shard_freeze() { + let metadata = metadata_for_test(); + let kvs = build_key_values_with_ts_seq_values( + &metadata, + "shard".to_string(), + 0, + [0].into_iter(), + [Some(0.0)].into_iter(), + 0, + ); + let mut shard = new_shard_with_dict(8, metadata.clone(), &[(kvs, 0)], 50); + let expected: Vec<_> = (0..200).collect(); + for i in &expected { + let kvs = build_key_values_with_ts_seq_values( + &metadata, + "shard".to_string(), + 0, + [*i].into_iter(), + [Some(0.0)].into_iter(), + *i as u64, + ); + let pk_id = PkId { + shard_id: shard.shard_id, + pk_index: *i as PkIndex, + }; + for kv in kvs.iter() { + shard.write_with_pk_id(pk_id, &kv).unwrap(); + } } - assert_eq!(vec![0, 1, 10, 11, 20, 21], timestamps); + assert!(!shard.is_empty()); + assert_eq!(3, shard.data_parts.frozen_len()); + + let timestamps = collect_timestamps(&shard); + assert_eq!(expected, timestamps); } } diff --git a/src/mito2/src/memtable/merge_tree/shard_builder.rs b/src/mito2/src/memtable/merge_tree/shard_builder.rs index 2b007ebd87a1..01cb2de25a3f 100644 --- a/src/mito2/src/memtable/merge_tree/shard_builder.rs +++ b/src/mito2/src/memtable/merge_tree/shard_builder.rs @@ -138,7 +138,13 @@ impl ShardBuilder { let shard_id = self.current_shard_id; self.current_shard_id += 1; - Ok(Some(Shard::new(shard_id, key_dict, data_parts, self.dedup))) + Ok(Some(Shard::new( + shard_id, + key_dict, + data_parts, + self.dedup, + self.data_freeze_threshold, + ))) } /// Scans the shard builder. diff --git a/src/mito2/src/memtable/merge_tree/tree.rs b/src/mito2/src/memtable/merge_tree/tree.rs index 94c87f7583b3..0a42e13fdec3 100644 --- a/src/mito2/src/memtable/merge_tree/tree.rs +++ b/src/mito2/src/memtable/merge_tree/tree.rs @@ -124,7 +124,7 @@ impl MergeTree { if !has_pk { // No primary key. - self.write_no_key(kv); + self.write_no_key(kv)?; continue; } @@ -299,7 +299,7 @@ impl MergeTree { ) } - fn write_no_key(&self, key_value: KeyValue) { + fn write_no_key(&self, key_value: KeyValue) -> Result<()> { let partition_key = Partition::get_partition_key(&key_value, self.is_partitioned); let partition = self.get_or_create_partition(partition_key); diff --git a/tests-integration/tests/http.rs b/tests-integration/tests/http.rs index 32feebe1613b..a75e9e219612 100644 --- a/tests-integration/tests/http.rs +++ b/tests-integration/tests/http.rs @@ -789,7 +789,7 @@ intermediate_path = "" [datanode.region_engine.mito.memtable] type = "experimental" index_max_keys_per_shard = 8192 -data_freeze_threshold = 32768 +data_freeze_threshold = 131072 dedup = true fork_dictionary_bytes = "1GiB"
fix
freeze data buffer in shard (#3468)
a680133acc39fc83e0a6d62f929ea615a95e882e
2023-10-08 11:49:52
Weny Xu
feat: enable no delay for mysql, opentsdb, http (#2530)
false
diff --git a/src/servers/src/http.rs b/src/servers/src/http.rs index 0a8dd839fa27..1e9660e93872 100644 --- a/src/servers/src/http.rs +++ b/src/servers/src/http.rs @@ -728,7 +728,9 @@ impl Server for HttpServer { app = configurator.config_http(app); } let app = self.build(app); - let server = axum::Server::bind(&listening).serve(app.into_make_service()); + let server = axum::Server::bind(&listening) + .tcp_nodelay(true) + .serve(app.into_make_service()); *shutdown_tx = Some(tx); diff --git a/src/servers/src/mysql/server.rs b/src/servers/src/mysql/server.rs index 65af7e28696a..0dd9af79dd30 100644 --- a/src/servers/src/mysql/server.rs +++ b/src/servers/src/mysql/server.rs @@ -19,6 +19,7 @@ use std::sync::Arc; use async_trait::async_trait; use auth::UserProviderRef; use common_runtime::Runtime; +use common_telemetry::error; use common_telemetry::logging::{info, warn}; use futures::StreamExt; use metrics::{decrement_gauge, increment_gauge}; @@ -137,6 +138,9 @@ impl MysqlServer { match tcp_stream { Err(error) => warn!("Broken pipe: {}", error), // IoError doesn't impl ErrorExt. Ok(io_stream) => { + if let Err(e) = io_stream.set_nodelay(true) { + error!(e; "Failed to set TCP nodelay"); + } if let Err(error) = Self::handle(io_stream, io_runtime, spawn_ref, spawn_config).await { diff --git a/src/servers/src/opentsdb.rs b/src/servers/src/opentsdb.rs index 7c569cc2ffed..61ed84167064 100644 --- a/src/servers/src/opentsdb.rs +++ b/src/servers/src/opentsdb.rs @@ -81,6 +81,9 @@ impl OpentsdbServer { async move { match stream { Ok(stream) => { + if let Err(e) = stream.set_nodelay(true) { + error!(e; "Failed to set TCP nodelay"); + } let connection = Connection::new(stream); let mut handler = Handler::new(query_handler, connection, shutdown);
feat
enable no delay for mysql, opentsdb, http (#2530)
68dd2916fb2e3133f2cb90bf1350bf040f316b78
2024-12-24 11:51:19
Ruihang Xia
feat: logs query endpoint (#5202)
false
diff --git a/Cargo.lock b/Cargo.lock index c23acf60636d..8387a80663ed 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4152,6 +4152,7 @@ dependencies = [ "futures", "humantime-serde", "lazy_static", + "log-query", "log-store", "meta-client", "opentelemetry-proto 0.5.0", @@ -6122,6 +6123,7 @@ dependencies = [ "chrono", "common-error", "common-macro", + "serde", "snafu 0.8.5", "table", ] @@ -8160,7 +8162,7 @@ dependencies = [ "rand", "ring 0.17.8", "rust_decimal", - "thiserror 2.0.4", + "thiserror 2.0.6", "tokio", "tokio-rustls 0.26.0", "tokio-util", @@ -9098,6 +9100,7 @@ dependencies = [ "humantime", "itertools 0.10.5", "lazy_static", + "log-query", "meter-core", "meter-macros", "num", @@ -10952,6 +10955,7 @@ dependencies = [ "json5", "jsonb", "lazy_static", + "log-query", "loki-api", "mime_guess", "mysql_async", @@ -12434,11 +12438,11 @@ dependencies = [ [[package]] name = "thiserror" -version = "2.0.4" +version = "2.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f49a1853cf82743e3b7950f77e0f4d622ca36cf4317cba00c767838bac8d490" +checksum = "8fec2a1820ebd077e2b90c4df007bebf344cd394098a13c563957d0afc83ea47" dependencies = [ - "thiserror-impl 2.0.4", + "thiserror-impl 2.0.6", ] [[package]] @@ -12454,9 +12458,9 @@ dependencies = [ [[package]] name = "thiserror-impl" -version = "2.0.4" +version = "2.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8381894bb3efe0c4acac3ded651301ceee58a15d47c2e34885ed1908ad667061" +checksum = "d65750cab40f4ff1929fb1ba509e9914eb756131cef4210da8d5d700d26f6312" dependencies = [ "proc-macro2", "quote", diff --git a/Cargo.toml b/Cargo.toml index 990bc71a907b..9729d5796d74 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -238,6 +238,7 @@ file-engine = { path = "src/file-engine" } flow = { path = "src/flow" } frontend = { path = "src/frontend", default-features = false } index = { path = "src/index" } +log-query = { path = "src/log-query" } log-store = { path = "src/log-store" } meta-client = { path = "src/meta-client" } meta-srv = { path = "src/meta-srv" } diff --git a/src/auth/src/permission.rs b/src/auth/src/permission.rs index cdd370c5cda8..6c33a766a6ae 100644 --- a/src/auth/src/permission.rs +++ b/src/auth/src/permission.rs @@ -25,6 +25,7 @@ pub enum PermissionReq<'a> { GrpcRequest(&'a Request), SqlStatement(&'a Statement), PromQuery, + LogQuery, Opentsdb, LineProtocol, PromStoreWrite, diff --git a/src/common/function/src/lib.rs b/src/common/function/src/lib.rs index 4a6a6844d548..1c718634dcee 100644 --- a/src/common/function/src/lib.rs +++ b/src/common/function/src/lib.rs @@ -26,3 +26,4 @@ pub mod function_registry; pub mod handlers; pub mod helper; pub mod state; +pub mod utils; diff --git a/src/common/function/src/scalars/matches.rs b/src/common/function/src/scalars/matches.rs index d3276f83a2a0..1bd9e8e1b5f1 100644 --- a/src/common/function/src/scalars/matches.rs +++ b/src/common/function/src/scalars/matches.rs @@ -204,20 +204,10 @@ impl PatternAst { fn convert_literal(column: &str, pattern: &str) -> Expr { logical_expr::col(column).like(logical_expr::lit(format!( "%{}%", - Self::escape_pattern(pattern) + crate::utils::escape_like_pattern(pattern) ))) } - fn escape_pattern(pattern: &str) -> String { - pattern - .chars() - .flat_map(|c| match c { - '\\' | '%' | '_' => vec!['\\', c], - _ => vec![c], - }) - .collect::<String>() - } - /// Transform this AST with preset rules to make it correct. fn transform_ast(self) -> Result<Self> { self.transform_up(Self::collapse_binary_branch_fn) diff --git a/src/common/function/src/utils.rs b/src/common/function/src/utils.rs new file mode 100644 index 000000000000..f2c18d5f6c77 --- /dev/null +++ b/src/common/function/src/utils.rs @@ -0,0 +1,58 @@ +// Copyright 2023 Greptime Team +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/// Escapes special characters in the provided pattern string for `LIKE`. +/// +/// Specifically, it prefixes the backslash (`\`), percent (`%`), and underscore (`_`) +/// characters with an additional backslash to ensure they are treated literally. +/// +/// # Examples +/// +/// ```rust +/// let escaped = escape_pattern("100%_some\\path"); +/// assert_eq!(escaped, "100\\%\\_some\\\\path"); +/// ``` +pub fn escape_like_pattern(pattern: &str) -> String { + pattern + .chars() + .flat_map(|c| match c { + '\\' | '%' | '_' => vec!['\\', c], + _ => vec![c], + }) + .collect::<String>() +} +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_escape_like_pattern() { + assert_eq!( + escape_like_pattern("100%_some\\path"), + "100\\%\\_some\\\\path" + ); + assert_eq!(escape_like_pattern(""), ""); + assert_eq!(escape_like_pattern("hello"), "hello"); + assert_eq!(escape_like_pattern("\\%_"), "\\\\\\%\\_"); + assert_eq!(escape_like_pattern("%%__\\\\"), "\\%\\%\\_\\_\\\\\\\\"); + assert_eq!(escape_like_pattern("abc123"), "abc123"); + assert_eq!(escape_like_pattern("%_\\"), "\\%\\_\\\\"); + assert_eq!( + escape_like_pattern("%%__\\\\another%string"), + "\\%\\%\\_\\_\\\\\\\\another\\%string" + ); + assert_eq!(escape_like_pattern("foo%bar_"), "foo\\%bar\\_"); + assert_eq!(escape_like_pattern("\\_\\%"), "\\\\\\_\\\\\\%"); + } +} diff --git a/src/frontend/Cargo.toml b/src/frontend/Cargo.toml index e21819c568f2..5542c19b54e9 100644 --- a/src/frontend/Cargo.toml +++ b/src/frontend/Cargo.toml @@ -41,6 +41,7 @@ datafusion-expr.workspace = true datanode.workspace = true humantime-serde.workspace = true lazy_static.workspace = true +log-query.workspace = true log-store.workspace = true meta-client.workspace = true opentelemetry-proto.workspace = true diff --git a/src/frontend/src/instance.rs b/src/frontend/src/instance.rs index b22bde96e0ff..c304eece4206 100644 --- a/src/frontend/src/instance.rs +++ b/src/frontend/src/instance.rs @@ -16,6 +16,7 @@ pub mod builder; mod grpc; mod influxdb; mod log_handler; +mod logs; mod opentsdb; mod otlp; mod prom_store; @@ -64,8 +65,8 @@ use servers::prometheus_handler::PrometheusHandler; use servers::query_handler::grpc::GrpcQueryHandler; use servers::query_handler::sql::SqlQueryHandler; use servers::query_handler::{ - InfluxdbLineProtocolHandler, OpenTelemetryProtocolHandler, OpentsdbProtocolHandler, - PipelineHandler, PromStoreProtocolHandler, ScriptHandler, + InfluxdbLineProtocolHandler, LogQueryHandler, OpenTelemetryProtocolHandler, + OpentsdbProtocolHandler, PipelineHandler, PromStoreProtocolHandler, ScriptHandler, }; use servers::server::ServerHandlers; use session::context::QueryContextRef; @@ -99,6 +100,7 @@ pub trait FrontendInstance: + ScriptHandler + PrometheusHandler + PipelineHandler + + LogQueryHandler + Send + Sync + 'static diff --git a/src/frontend/src/instance/logs.rs b/src/frontend/src/instance/logs.rs new file mode 100644 index 000000000000..f10ea168ff10 --- /dev/null +++ b/src/frontend/src/instance/logs.rs @@ -0,0 +1,67 @@ +// Copyright 2023 Greptime Team +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use auth::{PermissionChecker, PermissionCheckerRef, PermissionReq}; +use client::Output; +use common_error::ext::BoxedError; +use log_query::LogQuery; +use server_error::Result as ServerResult; +use servers::error::{self as server_error, AuthSnafu, ExecuteQuerySnafu}; +use servers::interceptor::{LogQueryInterceptor, LogQueryInterceptorRef}; +use servers::query_handler::LogQueryHandler; +use session::context::QueryContextRef; +use snafu::ResultExt; +use tonic::async_trait; + +use super::Instance; + +#[async_trait] +impl LogQueryHandler for Instance { + async fn query(&self, mut request: LogQuery, ctx: QueryContextRef) -> ServerResult<Output> { + let interceptor = self + .plugins + .get::<LogQueryInterceptorRef<server_error::Error>>(); + + self.plugins + .get::<PermissionCheckerRef>() + .as_ref() + .check_permission(ctx.current_user(), PermissionReq::LogQuery) + .context(AuthSnafu)?; + + interceptor.as_ref().pre_query(&request, ctx.clone())?; + + request + .time_filter + .canonicalize() + .map_err(BoxedError::new) + .context(ExecuteQuerySnafu)?; + + let plan = self + .query_engine + .planner() + .plan_logs_query(request, ctx.clone()) + .await + .map_err(BoxedError::new) + .context(ExecuteQuerySnafu)?; + + let output = self + .statement_executor + .exec_plan(plan, ctx.clone()) + .await + .map_err(BoxedError::new) + .context(ExecuteQuerySnafu)?; + + Ok(interceptor.as_ref().post_query(output, ctx.clone())?) + } +} diff --git a/src/frontend/src/server.rs b/src/frontend/src/server.rs index 115002c3aba9..ccc98593cadb 100644 --- a/src/frontend/src/server.rs +++ b/src/frontend/src/server.rs @@ -87,6 +87,7 @@ where let ingest_interceptor = self.plugins.get::<LogIngestInterceptorRef<ServerError>>(); builder = builder.with_log_ingest_handler(self.instance.clone(), validator, ingest_interceptor); + builder = builder.with_logs_handler(self.instance.clone()); if let Some(user_provider) = self.plugins.get::<UserProviderRef>() { builder = builder.with_user_provider(user_provider); diff --git a/src/log-query/Cargo.toml b/src/log-query/Cargo.toml index 9e503470149f..5e8db345b00e 100644 --- a/src/log-query/Cargo.toml +++ b/src/log-query/Cargo.toml @@ -11,5 +11,6 @@ workspace = true chrono.workspace = true common-error.workspace = true common-macro.workspace = true +serde.workspace = true snafu.workspace = true table.workspace = true diff --git a/src/log-query/src/error.rs b/src/log-query/src/error.rs index d8ec39a936eb..26554e478163 100644 --- a/src/log-query/src/error.rs +++ b/src/log-query/src/error.rs @@ -15,6 +15,7 @@ use std::any::Any; use common_error::ext::ErrorExt; +use common_error::status_code::StatusCode; use common_macro::stack_trace_debug; use snafu::Snafu; @@ -41,6 +42,15 @@ impl ErrorExt for Error { fn as_any(&self) -> &dyn Any { self } + + fn status_code(&self) -> StatusCode { + match self { + Error::InvalidTimeFilter { .. } + | Error::InvalidDateFormat { .. } + | Error::InvalidSpanFormat { .. } + | Error::EndBeforeStart { .. } => StatusCode::InvalidArguments, + } + } } pub type Result<T> = std::result::Result<T, Error>; diff --git a/src/log-query/src/log_query.rs b/src/log-query/src/log_query.rs index c8719b125905..24ad3e622042 100644 --- a/src/log-query/src/log_query.rs +++ b/src/log-query/src/log_query.rs @@ -13,6 +13,7 @@ // limitations under the License. use chrono::{DateTime, Datelike, Duration, NaiveDate, NaiveTime, TimeZone, Utc}; +use serde::{Deserialize, Serialize}; use table::table_name::TableName; use crate::error::{ @@ -21,9 +22,10 @@ use crate::error::{ }; /// GreptimeDB's log query request. +#[derive(Debug, Serialize, Deserialize)] pub struct LogQuery { /// A fully qualified table name to query logs from. - pub table_name: TableName, + pub table: TableName, /// Specifies the time range for the log query. See [`TimeFilter`] for more details. pub time_filter: TimeFilter, /// Columns with filters to query. @@ -34,6 +36,18 @@ pub struct LogQuery { pub context: Context, } +impl Default for LogQuery { + fn default() -> Self { + Self { + table: TableName::new("", "", ""), + time_filter: Default::default(), + columns: vec![], + limit: None, + context: Default::default(), + } + } +} + /// Represents a time range for log query. /// /// This struct allows various formats to express a time range from the user side @@ -58,7 +72,7 @@ pub struct LogQuery { /// /// This struct doesn't require a timezone to be presented. When the timezone is not /// provided, it will fill the default timezone with the same rules akin to other queries. -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Default, Serialize, Deserialize)] pub struct TimeFilter { pub start: Option<String>, pub end: Option<String>, @@ -69,8 +83,7 @@ impl TimeFilter { /// Validate and canonicalize the time filter. /// /// This function will try to fill the missing fields and convert all dates to timestamps - // false positive - #[allow(unused_assignments)] + #[allow(unused_assignments)] // false positive pub fn canonicalize(&mut self) -> Result<()> { let mut start_dt = None; let mut end_dt = None; @@ -209,6 +222,7 @@ impl TimeFilter { } /// Represents a column with filters to query. +#[derive(Debug, Serialize, Deserialize)] pub struct ColumnFilters { /// Case-sensitive column name to query. pub column_name: String, @@ -216,6 +230,7 @@ pub struct ColumnFilters { pub filters: Vec<ContentFilter>, } +#[derive(Debug, Serialize, Deserialize)] pub enum ContentFilter { /// Only match the exact content. /// @@ -234,13 +249,16 @@ pub enum ContentFilter { Compound(Vec<ContentFilter>, BinaryOperator), } +#[derive(Debug, Serialize, Deserialize)] pub enum BinaryOperator { And, Or, } /// Controls how many adjacent lines to return. +#[derive(Debug, Default, Serialize, Deserialize)] pub enum Context { + #[default] None, /// Specify the number of lines before and after the matched line separately. Lines(usize, usize), diff --git a/src/query/Cargo.toml b/src/query/Cargo.toml index 8139ea3aafbb..130037fec562 100644 --- a/src/query/Cargo.toml +++ b/src/query/Cargo.toml @@ -46,6 +46,7 @@ greptime-proto.workspace = true humantime.workspace = true itertools.workspace = true lazy_static.workspace = true +log-query.workspace = true meter-core.workspace = true meter-macros.workspace = true object-store.workspace = true diff --git a/src/query/src/lib.rs b/src/query/src/lib.rs index 435e9b4bcc9e..6e1fbfae0af8 100644 --- a/src/query/src/lib.rs +++ b/src/query/src/lib.rs @@ -17,6 +17,7 @@ #![feature(trait_upcasting)] #![feature(try_blocks)] #![feature(stmt_expr_attributes)] +#![feature(iterator_try_collect)] mod analyze; pub mod dataframe; @@ -25,6 +26,7 @@ pub mod dist_plan; pub mod dummy_catalog; pub mod error; pub mod executor; +pub mod log_query; pub mod metrics; mod optimizer; pub mod parser; diff --git a/src/query/src/log_query.rs b/src/query/src/log_query.rs new file mode 100644 index 000000000000..b44053b579d6 --- /dev/null +++ b/src/query/src/log_query.rs @@ -0,0 +1,16 @@ +// Copyright 2023 Greptime Team +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +pub mod error; +pub mod planner; diff --git a/src/query/src/log_query/error.rs b/src/query/src/log_query/error.rs new file mode 100644 index 000000000000..9045d30b6805 --- /dev/null +++ b/src/query/src/log_query/error.rs @@ -0,0 +1,84 @@ +// Copyright 2023 Greptime Team +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::any::Any; + +use common_error::ext::ErrorExt; +use common_error::status_code::StatusCode; +use common_macro::stack_trace_debug; +use datafusion::error::DataFusionError; +use snafu::{Location, Snafu}; + +#[derive(Snafu)] +#[snafu(visibility(pub))] +#[stack_trace_debug] +pub enum Error { + #[snafu(display("General catalog error"))] + Catalog { + #[snafu(implicit)] + location: Location, + source: catalog::error::Error, + }, + + #[snafu(display("Internal error during building DataFusion plan"))] + DataFusionPlanning { + #[snafu(source)] + error: datafusion::error::DataFusionError, + #[snafu(implicit)] + location: Location, + }, + + #[snafu(display("Unknown table type, downcast failed"))] + UnknownTable { + #[snafu(implicit)] + location: Location, + }, + + #[snafu(display("Cannot find time index column"))] + TimeIndexNotFound { + #[snafu(implicit)] + location: Location, + }, + + #[snafu(display("Unimplemented feature: {}", feature))] + Unimplemented { + #[snafu(implicit)] + location: Location, + feature: String, + }, +} + +impl ErrorExt for Error { + fn status_code(&self) -> StatusCode { + use Error::*; + match self { + Catalog { source, .. } => source.status_code(), + DataFusionPlanning { .. } => StatusCode::External, + UnknownTable { .. } | TimeIndexNotFound { .. } => StatusCode::Internal, + Unimplemented { .. } => StatusCode::Unsupported, + } + } + + fn as_any(&self) -> &dyn Any { + self + } +} + +pub type Result<T> = std::result::Result<T, Error>; + +impl From<Error> for DataFusionError { + fn from(err: Error) -> Self { + DataFusionError::External(Box::new(err)) + } +} diff --git a/src/query/src/log_query/planner.rs b/src/query/src/log_query/planner.rs new file mode 100644 index 000000000000..b5d4e385fbcb --- /dev/null +++ b/src/query/src/log_query/planner.rs @@ -0,0 +1,371 @@ +// Copyright 2023 Greptime Team +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use catalog::table_source::DfTableSourceProvider; +use common_function::utils::escape_like_pattern; +use datafusion::datasource::DefaultTableSource; +use datafusion_common::ScalarValue; +use datafusion_expr::utils::conjunction; +use datafusion_expr::{col, lit, Expr, LogicalPlan, LogicalPlanBuilder}; +use datafusion_sql::TableReference; +use datatypes::schema::Schema; +use log_query::{ColumnFilters, LogQuery, TimeFilter}; +use snafu::{OptionExt, ResultExt}; +use table::table::adapter::DfTableProviderAdapter; + +use crate::log_query::error::{ + CatalogSnafu, DataFusionPlanningSnafu, Result, TimeIndexNotFoundSnafu, UnimplementedSnafu, + UnknownTableSnafu, +}; + +const DEFAULT_LIMIT: usize = 1000; + +pub struct LogQueryPlanner { + table_provider: DfTableSourceProvider, +} + +impl LogQueryPlanner { + pub fn new(table_provider: DfTableSourceProvider) -> Self { + Self { table_provider } + } + + pub async fn query_to_plan(&mut self, query: LogQuery) -> Result<LogicalPlan> { + // Resolve table + let table_ref: TableReference = query.table.table_ref().into(); + let table_source = self + .table_provider + .resolve_table(table_ref.clone()) + .await + .context(CatalogSnafu)?; + let schema = table_source + .as_any() + .downcast_ref::<DefaultTableSource>() + .context(UnknownTableSnafu)? + .table_provider + .as_any() + .downcast_ref::<DfTableProviderAdapter>() + .context(UnknownTableSnafu)? + .table() + .schema(); + + // Build the initial scan plan + let mut plan_builder = LogicalPlanBuilder::scan(table_ref, table_source, None) + .context(DataFusionPlanningSnafu)?; + + // Collect filter expressions + let mut filters = Vec::new(); + + // Time filter + filters.push(self.build_time_filter(&query.time_filter, &schema)?); + + // Column filters and projections + let mut projected_columns = Vec::new(); + for column_filter in &query.columns { + if let Some(expr) = self.build_column_filter(column_filter)? { + filters.push(expr); + } + projected_columns.push(col(&column_filter.column_name)); + } + + // Apply filters + if !filters.is_empty() { + let filter_expr = filters.into_iter().reduce(|a, b| a.and(b)).unwrap(); + plan_builder = plan_builder + .filter(filter_expr) + .context(DataFusionPlanningSnafu)?; + } + + // Apply projections + plan_builder = plan_builder + .project(projected_columns) + .context(DataFusionPlanningSnafu)?; + + // Apply limit + plan_builder = plan_builder + .limit(0, query.limit.or(Some(DEFAULT_LIMIT))) + .context(DataFusionPlanningSnafu)?; + + // Build the final plan + let plan = plan_builder.build().context(DataFusionPlanningSnafu)?; + + Ok(plan) + } + + fn build_time_filter(&self, time_filter: &TimeFilter, schema: &Schema) -> Result<Expr> { + let timestamp_col = schema + .timestamp_column() + .with_context(|| TimeIndexNotFoundSnafu {})? + .name + .clone(); + + let start_time = ScalarValue::Utf8(time_filter.start.clone()); + let end_time = ScalarValue::Utf8( + time_filter + .end + .clone() + .or(Some("9999-12-31T23:59:59Z".to_string())), + ); + let expr = col(timestamp_col.clone()) + .gt_eq(lit(start_time)) + .and(col(timestamp_col).lt_eq(lit(end_time))); + + Ok(expr) + } + + /// Returns filter expressions + fn build_column_filter(&self, column_filter: &ColumnFilters) -> Result<Option<Expr>> { + if column_filter.filters.is_empty() { + return Ok(None); + } + + let exprs = column_filter + .filters + .iter() + .map(|filter| match filter { + log_query::ContentFilter::Exact(pattern) => Ok(col(&column_filter.column_name) + .like(lit(ScalarValue::Utf8(Some(escape_like_pattern(pattern)))))), + log_query::ContentFilter::Prefix(pattern) => Ok(col(&column_filter.column_name) + .like(lit(ScalarValue::Utf8(Some(format!( + "{}%", + escape_like_pattern(pattern) + )))))), + log_query::ContentFilter::Postfix(pattern) => Ok(col(&column_filter.column_name) + .like(lit(ScalarValue::Utf8(Some(format!( + "%{}", + escape_like_pattern(pattern) + )))))), + log_query::ContentFilter::Contains(pattern) => Ok(col(&column_filter.column_name) + .like(lit(ScalarValue::Utf8(Some(format!( + "%{}%", + escape_like_pattern(pattern) + )))))), + log_query::ContentFilter::Regex(..) => Err::<Expr, _>( + UnimplementedSnafu { + feature: "regex filter", + } + .build(), + ), + log_query::ContentFilter::Compound(..) => Err::<Expr, _>( + UnimplementedSnafu { + feature: "compound filter", + } + .build(), + ), + }) + .try_collect::<Vec<_>>()?; + + Ok(conjunction(exprs)) + } +} + +#[cfg(test)] +mod tests { + use std::sync::Arc; + + use catalog::memory::MemoryCatalogManager; + use catalog::RegisterTableRequest; + use common_catalog::consts::DEFAULT_CATALOG_NAME; + use common_query::test_util::DummyDecoder; + use datatypes::prelude::ConcreteDataType; + use datatypes::schema::{ColumnSchema, SchemaRef}; + use log_query::{ContentFilter, Context}; + use session::context::QueryContext; + use table::metadata::{TableInfoBuilder, TableMetaBuilder}; + use table::table_name::TableName; + use table::test_util::EmptyTable; + + use super::*; + + fn mock_schema() -> SchemaRef { + let columns = vec![ + ColumnSchema::new( + "message".to_string(), + ConcreteDataType::string_datatype(), + false, + ), + ColumnSchema::new( + "timestamp".to_string(), + ConcreteDataType::timestamp_millisecond_datatype(), + false, + ) + .with_time_index(true), + ColumnSchema::new( + "host".to_string(), + ConcreteDataType::string_datatype(), + true, + ), + ]; + + Arc::new(Schema::new(columns)) + } + + /// Registers table under `greptime`, with `message` and `timestamp` and `host` columns. + async fn build_test_table_provider( + table_name_tuples: &[(String, String)], + ) -> DfTableSourceProvider { + let catalog_list = MemoryCatalogManager::with_default_setup(); + for (schema_name, table_name) in table_name_tuples { + let schema = mock_schema(); + let table_meta = TableMetaBuilder::default() + .schema(schema) + .primary_key_indices(vec![2]) + .value_indices(vec![0]) + .next_column_id(1024) + .build() + .unwrap(); + let table_info = TableInfoBuilder::default() + .name(table_name.to_string()) + .meta(table_meta) + .build() + .unwrap(); + let table = EmptyTable::from_table_info(&table_info); + + catalog_list + .register_table_sync(RegisterTableRequest { + catalog: DEFAULT_CATALOG_NAME.to_string(), + schema: schema_name.to_string(), + table_name: table_name.to_string(), + table_id: 1024, + table, + }) + .unwrap(); + } + + DfTableSourceProvider::new( + catalog_list, + false, + QueryContext::arc(), + DummyDecoder::arc(), + false, + ) + } + + #[tokio::test] + async fn test_query_to_plan() { + let table_provider = + build_test_table_provider(&[("public".to_string(), "test_table".to_string())]).await; + let mut planner = LogQueryPlanner::new(table_provider); + + let log_query = LogQuery { + table: TableName::new(DEFAULT_CATALOG_NAME, "public", "test_table"), + time_filter: TimeFilter { + start: Some("2021-01-01T00:00:00Z".to_string()), + end: Some("2021-01-02T00:00:00Z".to_string()), + span: None, + }, + columns: vec![ColumnFilters { + column_name: "message".to_string(), + filters: vec![ContentFilter::Contains("error".to_string())], + }], + limit: Some(100), + context: Context::None, + }; + + let plan = planner.query_to_plan(log_query).await.unwrap(); + let expected = "Limit: skip=0, fetch=100 [message:Utf8]\ +\n Projection: greptime.public.test_table.message [message:Utf8]\ +\n Filter: greptime.public.test_table.timestamp >= Utf8(\"2021-01-01T00:00:00Z\") AND greptime.public.test_table.timestamp <= Utf8(\"2021-01-02T00:00:00Z\") AND greptime.public.test_table.message LIKE Utf8(\"%error%\") [message:Utf8, timestamp:Timestamp(Millisecond, None), host:Utf8;N]\ +\n TableScan: greptime.public.test_table [message:Utf8, timestamp:Timestamp(Millisecond, None), host:Utf8;N]"; + + assert_eq!(plan.display_indent_schema().to_string(), expected); + } + + #[tokio::test] + async fn test_build_time_filter() { + let table_provider = + build_test_table_provider(&[("public".to_string(), "test_table".to_string())]).await; + let planner = LogQueryPlanner::new(table_provider); + + let time_filter = TimeFilter { + start: Some("2021-01-01T00:00:00Z".to_string()), + end: Some("2021-01-02T00:00:00Z".to_string()), + span: None, + }; + + let expr = planner + .build_time_filter(&time_filter, &mock_schema()) + .unwrap(); + + let expected_expr = col("timestamp") + .gt_eq(lit(ScalarValue::Utf8(Some( + "2021-01-01T00:00:00Z".to_string(), + )))) + .and(col("timestamp").lt_eq(lit(ScalarValue::Utf8(Some( + "2021-01-02T00:00:00Z".to_string(), + ))))); + + assert_eq!(format!("{:?}", expr), format!("{:?}", expected_expr)); + } + + #[tokio::test] + async fn test_build_time_filter_without_end() { + let table_provider = + build_test_table_provider(&[("public".to_string(), "test_table".to_string())]).await; + let planner = LogQueryPlanner::new(table_provider); + + let time_filter = TimeFilter { + start: Some("2021-01-01T00:00:00Z".to_string()), + end: None, + span: None, + }; + + let expr = planner + .build_time_filter(&time_filter, &mock_schema()) + .unwrap(); + + let expected_expr = col("timestamp") + .gt_eq(lit(ScalarValue::Utf8(Some( + "2021-01-01T00:00:00Z".to_string(), + )))) + .and(col("timestamp").lt_eq(lit(ScalarValue::Utf8(Some( + "9999-12-31T23:59:59Z".to_string(), + ))))); + + assert_eq!(format!("{:?}", expr), format!("{:?}", expected_expr)); + } + + #[tokio::test] + async fn test_build_column_filter() { + let table_provider = + build_test_table_provider(&[("public".to_string(), "test_table".to_string())]).await; + let planner = LogQueryPlanner::new(table_provider); + + let column_filter = ColumnFilters { + column_name: "message".to_string(), + filters: vec![ + ContentFilter::Contains("error".to_string()), + ContentFilter::Prefix("WARN".to_string()), + ], + }; + + let expr_option = planner.build_column_filter(&column_filter).unwrap(); + assert!(expr_option.is_some()); + + let expr = expr_option.unwrap(); + + let expected_expr = col("message") + .like(lit(ScalarValue::Utf8(Some("%error%".to_string())))) + .and(col("message").like(lit(ScalarValue::Utf8(Some("WARN%".to_string()))))); + + assert_eq!(format!("{:?}", expr), format!("{:?}", expected_expr)); + } + + #[test] + fn test_escape_pattern() { + assert_eq!(escape_like_pattern("test"), "test"); + assert_eq!(escape_like_pattern("te%st"), "te\\%st"); + assert_eq!(escape_like_pattern("te_st"), "te\\_st"); + assert_eq!(escape_like_pattern("te\\st"), "te\\\\st"); + } +} diff --git a/src/query/src/planner.rs b/src/query/src/planner.rs index 29a0a364ea36..20377c67c034 100644 --- a/src/query/src/planner.rs +++ b/src/query/src/planner.rs @@ -24,6 +24,7 @@ use datafusion::execution::context::SessionState; use datafusion::sql::planner::PlannerContext; use datafusion_expr::{Expr as DfExpr, LogicalPlan}; use datafusion_sql::planner::{ParserOptions, SqlToRel}; +use log_query::LogQuery; use promql_parser::parser::EvalStmt; use session::context::QueryContextRef; use snafu::ResultExt; @@ -31,6 +32,7 @@ use sql::ast::Expr as SqlExpr; use sql::statements::statement::Statement; use crate::error::{DataFusionSnafu, PlanSqlSnafu, QueryPlanSnafu, Result, SqlSnafu}; +use crate::log_query::planner::LogQueryPlanner; use crate::parser::QueryStatement; use crate::promql::planner::PromPlanner; use crate::query_engine::{DefaultPlanDecoder, QueryEngineState}; @@ -41,6 +43,12 @@ use crate::{DfContextProviderAdapter, QueryEngineContext}; pub trait LogicalPlanner: Send + Sync { async fn plan(&self, stmt: &QueryStatement, query_ctx: QueryContextRef) -> Result<LogicalPlan>; + async fn plan_logs_query( + &self, + query: LogQuery, + query_ctx: QueryContextRef, + ) -> Result<LogicalPlan>; + fn optimize(&self, plan: LogicalPlan) -> Result<LogicalPlan>; fn as_any(&self) -> &dyn Any; @@ -182,6 +190,34 @@ impl LogicalPlanner for DfLogicalPlanner { } } + async fn plan_logs_query( + &self, + query: LogQuery, + query_ctx: QueryContextRef, + ) -> Result<LogicalPlan> { + let plan_decoder = Arc::new(DefaultPlanDecoder::new( + self.session_state.clone(), + &query_ctx, + )?); + let table_provider = DfTableSourceProvider::new( + self.engine_state.catalog_manager().clone(), + self.engine_state.disallow_cross_catalog_query(), + query_ctx, + plan_decoder, + self.session_state + .config_options() + .sql_parser + .enable_ident_normalization, + ); + + let mut planner = LogQueryPlanner::new(table_provider); + planner + .query_to_plan(query) + .await + .map_err(BoxedError::new) + .context(QueryPlanSnafu) + } + fn optimize(&self, plan: LogicalPlan) -> Result<LogicalPlan> { self.optimize_logical_plan(plan) } diff --git a/src/servers/Cargo.toml b/src/servers/Cargo.toml index a90fb880e20d..74adaffd5ea8 100644 --- a/src/servers/Cargo.toml +++ b/src/servers/Cargo.toml @@ -66,6 +66,7 @@ itertools.workspace = true json5 = "0.4" jsonb.workspace = true lazy_static.workspace = true +log-query.workspace = true loki-api = "0.1" mime_guess = "2.0" notify.workspace = true diff --git a/src/servers/src/http.rs b/src/servers/src/http.rs index 9841f02d6ead..dd618f24a3f7 100644 --- a/src/servers/src/http.rs +++ b/src/servers/src/http.rs @@ -66,8 +66,8 @@ use crate::metrics_handler::MetricsHandler; use crate::prometheus_handler::PrometheusHandlerRef; use crate::query_handler::sql::ServerSqlQueryHandlerRef; use crate::query_handler::{ - InfluxdbLineProtocolHandlerRef, OpenTelemetryProtocolHandlerRef, OpentsdbProtocolHandlerRef, - PipelineHandlerRef, PromStoreProtocolHandlerRef, ScriptHandlerRef, + InfluxdbLineProtocolHandlerRef, LogQueryHandlerRef, OpenTelemetryProtocolHandlerRef, + OpentsdbProtocolHandlerRef, PipelineHandlerRef, PromStoreProtocolHandlerRef, ScriptHandlerRef, }; use crate::server::Server; @@ -80,6 +80,7 @@ mod extractor; pub mod handler; pub mod header; pub mod influxdb; +pub mod logs; pub mod mem_prof; pub mod opentsdb; pub mod otlp; @@ -506,6 +507,17 @@ impl HttpServerBuilder { } } + pub fn with_logs_handler(self, logs_handler: LogQueryHandlerRef) -> Self { + let logs_router = HttpServer::route_logs(logs_handler); + + Self { + router: self + .router + .nest(&format!("/{HTTP_API_VERSION}"), logs_router), + ..self + } + } + pub fn with_opentsdb_handler(self, handler: OpentsdbProtocolHandlerRef) -> Self { Self { router: self.router.nest( @@ -770,6 +782,12 @@ impl HttpServer { .with_state(api_state) } + fn route_logs<S>(log_handler: LogQueryHandlerRef) -> Router<S> { + Router::new() + .route("/logs", routing::get(logs::logs).post(logs::logs)) + .with_state(log_handler) + } + /// Route Prometheus [HTTP API]. /// /// [HTTP API]: https://prometheus.io/docs/prometheus/latest/querying/api/ diff --git a/src/servers/src/http/logs.rs b/src/servers/src/http/logs.rs new file mode 100644 index 000000000000..0375865b31da --- /dev/null +++ b/src/servers/src/http/logs.rs @@ -0,0 +1,50 @@ +// Copyright 2023 Greptime Team +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::sync::Arc; +use std::time::Instant; + +use axum::extract::State; +use axum::response::{IntoResponse, Response}; +use axum::{Extension, Json}; +use common_telemetry::tracing; +use log_query::LogQuery; +use session::context::{Channel, QueryContext}; + +use crate::http::result::greptime_result_v1::GreptimedbV1Response; +use crate::query_handler::LogQueryHandlerRef; + +#[axum_macros::debug_handler] +#[tracing::instrument(skip_all, fields(protocol = "http", request_type = "logs"))] +pub async fn logs( + State(handler): State<LogQueryHandlerRef>, + Extension(mut query_ctx): Extension<QueryContext>, + Json(params): Json<LogQuery>, +) -> Response { + let exec_start = Instant::now(); + let db = query_ctx.get_db_string(); + + query_ctx.set_channel(Channel::Http); + let query_ctx = Arc::new(query_ctx); + + let _timer = crate::metrics::METRIC_HTTP_LOGS_INGESTION_ELAPSED + .with_label_values(&[db.as_str()]) + .start_timer(); + + let output = handler.query(params, query_ctx).await; + let resp = GreptimedbV1Response::from_output(vec![output]).await; + + resp.with_execution_time(exec_start.elapsed().as_millis() as u64) + .into_response() +} diff --git a/src/servers/src/interceptor.rs b/src/servers/src/interceptor.rs index 241bbe1d0e66..a9f5a60765c9 100644 --- a/src/servers/src/interceptor.rs +++ b/src/servers/src/interceptor.rs @@ -22,6 +22,7 @@ use async_trait::async_trait; use common_error::ext::ErrorExt; use common_query::Output; use datafusion_expr::LogicalPlan; +use log_query::LogQuery; use query::parser::PromQuery; use serde_json::Value; use session::context::QueryContextRef; @@ -458,3 +459,54 @@ where } } } + +/// LogQueryInterceptor can track life cycle of a log query request +/// and customize or abort its execution at given point. +pub trait LogQueryInterceptor { + type Error: ErrorExt; + + /// Called before query is actually executed. + fn pre_query(&self, _query: &LogQuery, _query_ctx: QueryContextRef) -> Result<(), Self::Error> { + Ok(()) + } + + /// Called after execution finished. The implementation can modify the + /// output if needed. + fn post_query( + &self, + output: Output, + _query_ctx: QueryContextRef, + ) -> Result<Output, Self::Error> { + Ok(output) + } +} + +pub type LogQueryInterceptorRef<E> = + Arc<dyn LogQueryInterceptor<Error = E> + Send + Sync + 'static>; + +impl<E> LogQueryInterceptor for Option<&LogQueryInterceptorRef<E>> +where + E: ErrorExt, +{ + type Error = E; + + fn pre_query(&self, query: &LogQuery, query_ctx: QueryContextRef) -> Result<(), Self::Error> { + if let Some(this) = self { + this.pre_query(query, query_ctx) + } else { + Ok(()) + } + } + + fn post_query( + &self, + output: Output, + query_ctx: QueryContextRef, + ) -> Result<Output, Self::Error> { + if let Some(this) = self { + this.post_query(output, query_ctx) + } else { + Ok(output) + } + } +} diff --git a/src/servers/src/metrics.rs b/src/servers/src/metrics.rs index 87ab38dc8215..fe81fed6ced5 100644 --- a/src/servers/src/metrics.rs +++ b/src/servers/src/metrics.rs @@ -72,6 +72,14 @@ lazy_static! { vec![0.005, 0.01, 0.05, 0.1, 0.5, 1.0, 5.0, 10.0, 60.0, 300.0] ) .unwrap(); + /// Http logs query duration per database. + pub static ref METRIC_HTTP_LOGS_ELAPSED: HistogramVec = register_histogram_vec!( + "greptime_servers_http_logs_elapsed", + "servers http logs elapsed", + &[METRIC_DB_LABEL], + vec![0.005, 0.01, 0.05, 0.1, 0.5, 1.0, 5.0, 10.0, 60.0, 300.0] + ) + .unwrap(); pub static ref METRIC_AUTH_FAILURE: IntCounterVec = register_int_counter_vec!( "greptime_servers_auth_failure_count", "servers auth failure count", diff --git a/src/servers/src/query_handler.rs b/src/servers/src/query_handler.rs index ff92d3c5d15b..171590d55e15 100644 --- a/src/servers/src/query_handler.rs +++ b/src/servers/src/query_handler.rs @@ -34,6 +34,7 @@ use api::v1::RowInsertRequests; use async_trait::async_trait; use common_query::Output; use headers::HeaderValue; +use log_query::LogQuery; use opentelemetry_proto::tonic::collector::logs::v1::ExportLogsServiceRequest; use opentelemetry_proto::tonic::collector::metrics::v1::ExportMetricsServiceRequest; use opentelemetry_proto::tonic::collector::trace::v1::ExportTraceServiceRequest; @@ -52,6 +53,7 @@ pub type PromStoreProtocolHandlerRef = Arc<dyn PromStoreProtocolHandler + Send + pub type OpenTelemetryProtocolHandlerRef = Arc<dyn OpenTelemetryProtocolHandler + Send + Sync>; pub type ScriptHandlerRef = Arc<dyn ScriptHandler + Send + Sync>; pub type PipelineHandlerRef = Arc<dyn PipelineHandler + Send + Sync>; +pub type LogQueryHandlerRef = Arc<dyn LogQueryHandler + Send + Sync>; #[async_trait] pub trait ScriptHandler { @@ -174,3 +176,9 @@ pub trait PipelineHandler { //// Build a pipeline from a string. fn build_pipeline(&self, pipeline: &str) -> Result<Pipeline<GreptimeTransformer>>; } + +/// Handle log query requests. +#[async_trait] +pub trait LogQueryHandler { + async fn query(&self, query: LogQuery, ctx: QueryContextRef) -> Result<Output>; +}
feat
logs query endpoint (#5202)
9e58311ecd2838761dbcfcccc85ae126b9422e88
2023-01-09 13:43:53
fys
feat: datanode support report number of regions to meta (#838)
false
diff --git a/src/api/greptime/v1/meta/heartbeat.proto b/src/api/greptime/v1/meta/heartbeat.proto index 6d37806a7a0d..91a8bcae554d 100644 --- a/src/api/greptime/v1/meta/heartbeat.proto +++ b/src/api/greptime/v1/meta/heartbeat.proto @@ -34,13 +34,13 @@ message HeartbeatRequest { message NodeStat { // The read capacity units during this period - uint64 rcus = 1; + int64 rcus = 1; // The write capacity units during this period - uint64 wcus = 2; + int64 wcus = 2; // How many tables on this node - uint64 table_num = 3; + int64 table_num = 3; // How many regions on this node - uint64 region_num = 4; + int64 region_num = 4; double cpu_usage = 5; double load = 6; @@ -57,13 +57,13 @@ message RegionStat { uint64 region_id = 1; TableName table_name = 2; // The read capacity units during this period - uint64 rcus = 3; + int64 rcus = 3; // The write capacity units during this period - uint64 wcus = 4; + int64 wcus = 4; // Approximate bytes of this region - uint64 approximate_bytes = 5; + int64 approximate_bytes = 5; // Approximate number of rows in this region - uint64 approximate_rows = 6; + int64 approximate_rows = 6; // Others map<string, string> attrs = 100; diff --git a/src/catalog/src/lib.rs b/src/catalog/src/lib.rs index a7c53ffe9e77..5f1d2d89fb38 100644 --- a/src/catalog/src/lib.rs +++ b/src/catalog/src/lib.rs @@ -19,7 +19,7 @@ use std::fmt::{Debug, Formatter}; use std::sync::Arc; use common_telemetry::info; -use snafu::ResultExt; +use snafu::{OptionExt, ResultExt}; use table::engine::{EngineContext, TableEngineRef}; use table::metadata::TableId; use table::requests::CreateTableRequest; @@ -208,3 +208,38 @@ pub(crate) async fn handle_system_table_request<'a, M: CatalogManager>( } Ok(()) } + +/// The number of regions in the datanode node. +pub fn region_number(catalog_manager: &CatalogManagerRef) -> Result<u64> { + let mut region_number: u64 = 0; + + for catalog_name in catalog_manager.catalog_names()? { + let catalog = + catalog_manager + .catalog(&catalog_name)? + .context(error::CatalogNotFoundSnafu { + catalog_name: &catalog_name, + })?; + + for schema_name in catalog.schema_names()? { + let schema = catalog + .schema(&schema_name)? + .context(error::SchemaNotFoundSnafu { + catalog: &catalog_name, + schema: &schema_name, + })?; + + for table_name in schema.table_names()? { + let table = schema + .table(&table_name)? + .context(error::TableNotFoundSnafu { + table_info: &table_name, + })?; + + let region_numbers = &table.table_info().meta.region_numbers; + region_number += region_numbers.len() as u64; + } + } + } + Ok(region_number) +} diff --git a/src/datanode/src/heartbeat.rs b/src/datanode/src/heartbeat.rs index bf17ef67ed43..c791b8e69e75 100644 --- a/src/datanode/src/heartbeat.rs +++ b/src/datanode/src/heartbeat.rs @@ -16,19 +16,20 @@ use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::Arc; use std::time::Duration; -use api::v1::meta::{HeartbeatRequest, HeartbeatResponse, Peer}; +use api::v1::meta::{HeartbeatRequest, HeartbeatResponse, NodeStat, Peer}; +use catalog::{region_number, CatalogManagerRef}; use common_telemetry::{error, info, warn}; use meta_client::client::{HeartbeatSender, MetaClient}; use snafu::ResultExt; use crate::error::{MetaClientInitSnafu, Result}; -#[derive(Debug, Clone, Default)] pub struct HeartbeatTask { node_id: u64, server_addr: String, running: Arc<AtomicBool>, meta_client: Arc<MetaClient>, + catalog_manager: CatalogManagerRef, interval: u64, } @@ -40,12 +41,18 @@ impl Drop for HeartbeatTask { impl HeartbeatTask { /// Create a new heartbeat task instance. - pub fn new(node_id: u64, server_addr: String, meta_client: Arc<MetaClient>) -> Self { + pub fn new( + node_id: u64, + server_addr: String, + meta_client: Arc<MetaClient>, + catalog_manager: CatalogManagerRef, + ) -> Self { Self { node_id, server_addr, running: Arc::new(AtomicBool::new(false)), meta_client, + catalog_manager, interval: 5_000, // default interval is set to 5 secs } } @@ -92,16 +99,30 @@ impl HeartbeatTask { let server_addr = self.server_addr.clone(); let meta_client = self.meta_client.clone(); + let catalog_manager_clone = self.catalog_manager.clone(); let mut tx = Self::create_streams(&meta_client, running.clone()).await?; common_runtime::spawn_bg(async move { while running.load(Ordering::Acquire) { + let region_num = match region_number(&catalog_manager_clone) { + Ok(region_num) => region_num as i64, + Err(e) => { + error!("failed to get region number, err: {e:?}"); + -1 + } + }; + let req = HeartbeatRequest { peer: Some(Peer { id: node_id, addr: server_addr.clone(), }), + node_stat: Some(NodeStat { + region_num, + ..Default::default() + }), ..Default::default() }; + if let Err(e) = tx.send(req).await { error!("Failed to send heartbeat to metasrv, error: {:?}", e); match Self::create_streams(&meta_client, running.clone()).await { diff --git a/src/datanode/src/instance.rs b/src/datanode/src/instance.rs index 8a41520d3fff..c2a3802253fe 100644 --- a/src/datanode/src/instance.rs +++ b/src/datanode/src/instance.rs @@ -146,6 +146,7 @@ impl Instance { opts.node_id.context(MissingNodeIdSnafu)?, opts.rpc_addr.clone(), meta_client.as_ref().unwrap().clone(), + catalog_manager.clone(), )), }; Ok(Self { diff --git a/src/datanode/src/mock.rs b/src/datanode/src/mock.rs index 211ef99ad9f3..053ab289fe7a 100644 --- a/src/datanode/src/mock.rs +++ b/src/datanode/src/mock.rs @@ -71,6 +71,7 @@ impl Instance { opts.node_id.unwrap_or(42), opts.rpc_addr.clone(), meta_client.clone(), + catalog_manager.clone(), ); Ok(Self { query_engine: query_engine.clone(), diff --git a/src/meta-srv/src/handler.rs b/src/meta-srv/src/handler.rs index 8f98913ee09d..4b7b39e3ac61 100644 --- a/src/meta-srv/src/handler.rs +++ b/src/meta-srv/src/handler.rs @@ -22,7 +22,7 @@ mod check_leader_handler; mod collect_stats_handler; mod instruction; mod keep_lease_handler; -mod node_stat; +pub(crate) mod node_stat; mod persist_stats_handler; mod response_header_handler; diff --git a/src/meta-srv/src/handler/node_stat.rs b/src/meta-srv/src/handler/node_stat.rs index e530469ace25..9431b01a5371 100644 --- a/src/meta-srv/src/handler/node_stat.rs +++ b/src/meta-srv/src/handler/node_stat.rs @@ -14,8 +14,11 @@ use api::v1::meta::HeartbeatRequest; use common_time::util as time_util; +use serde::{Deserialize, Serialize}; -#[derive(Debug)] +use crate::keys::StatKey; + +#[derive(Debug, Default, Serialize, Deserialize)] pub struct Stat { pub timestamp_millis: i64, pub cluster_id: u64, @@ -24,13 +27,13 @@ pub struct Stat { /// Leader node pub is_leader: bool, /// The read capacity units during this period - pub rcus: u64, + pub rcus: i64, /// The write capacity units during this period - pub wcus: u64, + pub wcus: i64, /// How many tables on this node - pub table_num: u64, + pub table_num: i64, /// How many regions on this node - pub region_num: u64, + pub region_num: i64, pub cpu_usage: f64, pub load: f64, /// Read disk IO on this node @@ -41,20 +44,29 @@ pub struct Stat { pub region_stats: Vec<RegionStat>, } -#[derive(Debug)] +#[derive(Debug, Serialize, Deserialize)] pub struct RegionStat { pub id: u64, pub catalog: String, pub schema: String, pub table: String, /// The read capacity units during this period - pub rcus: u64, + pub rcus: i64, /// The write capacity units during this period - pub wcus: u64, + pub wcus: i64, /// Approximate bytes of this region - pub approximate_bytes: u64, + pub approximate_bytes: i64, /// Approximate number of rows in this region - pub approximate_rows: u64, + pub approximate_rows: i64, +} + +impl Stat { + pub fn stat_key(&self) -> StatKey { + StatKey { + cluster_id: self.cluster_id, + node_id: self.id, + } + } } impl TryFrom<&HeartbeatRequest> for Stat { @@ -107,3 +119,23 @@ impl From<&api::v1::meta::RegionStat> for RegionStat { } } } + +#[cfg(test)] +mod tests { + use crate::handler::node_stat::Stat; + + #[test] + fn test_stat_key() { + let stat = Stat { + cluster_id: 3, + id: 101, + region_num: 10, + ..Default::default() + }; + + let stat_key = stat.stat_key(); + + assert_eq!(3, stat_key.cluster_id); + assert_eq!(101, stat_key.node_id); + } +} diff --git a/src/meta-srv/src/handler/persist_stats_handler.rs b/src/meta-srv/src/handler/persist_stats_handler.rs index df3010a80f2e..d3c6b21751c6 100644 --- a/src/meta-srv/src/handler/persist_stats_handler.rs +++ b/src/meta-srv/src/handler/persist_stats_handler.rs @@ -12,10 +12,11 @@ // See the License for the specific language governing permissions and // limitations under the License. -use api::v1::meta::HeartbeatRequest; +use api::v1::meta::{HeartbeatRequest, PutRequest}; use crate::error::Result; use crate::handler::{HeartbeatAccumulator, HeartbeatHandler}; +use crate::keys::StatValue; use crate::metasrv::Context; #[derive(Default)] @@ -33,8 +34,89 @@ impl HeartbeatHandler for PersistStatsHandler { return Ok(()); } - // TODO(jiachun): remove stats from `acc` and persist to store + let stats = &mut acc.stats; + let key = match stats.get(0) { + Some(stat) => stat.stat_key(), + None => return Ok(()), + }; + + // take stats from &mut acc.stats, avoid clone of vec + let stats = std::mem::take(stats); + + let val = &StatValue { stats }; + + let put = PutRequest { + key: key.into(), + value: val.try_into()?, + ..Default::default() + }; + + ctx.kv_store.put(put).await?; Ok(()) } } + +#[cfg(test)] +mod tests { + use std::sync::atomic::AtomicBool; + use std::sync::Arc; + + use api::v1::meta::RangeRequest; + + use super::*; + use crate::handler::node_stat::Stat; + use crate::keys::StatKey; + use crate::service::store::memory::MemStore; + + #[tokio::test] + async fn test_handle_datanode_stats() { + let kv_store = Arc::new(MemStore::new()); + let ctx = Context { + datanode_lease_secs: 30, + server_addr: "127.0.0.1:0000".to_string(), + kv_store, + election: None, + skip_all: Arc::new(AtomicBool::new(false)), + }; + + let req = HeartbeatRequest::default(); + let mut acc = HeartbeatAccumulator { + stats: vec![Stat { + cluster_id: 3, + id: 101, + region_num: 100, + ..Default::default() + }], + ..Default::default() + }; + + let stats_handler = PersistStatsHandler; + stats_handler.handle(&req, &ctx, &mut acc).await.unwrap(); + + let key = StatKey { + cluster_id: 3, + node_id: 101, + }; + + let req = RangeRequest { + key: key.try_into().unwrap(), + ..Default::default() + }; + + let res = ctx.kv_store.range(req).await.unwrap(); + + assert_eq!(1, res.kvs.len()); + + let kv = &res.kvs[0]; + + let key: StatKey = kv.key.clone().try_into().unwrap(); + assert_eq!(3, key.cluster_id); + assert_eq!(101, key.node_id); + + let val: StatValue = kv.value.clone().try_into().unwrap(); + + assert_eq!(1, val.stats.len()); + assert_eq!(100, val.stats[0].region_num); + } +} diff --git a/src/meta-srv/src/keys.rs b/src/meta-srv/src/keys.rs index 1efec0c187f4..b4f5b22ab3fd 100644 --- a/src/meta-srv/src/keys.rs +++ b/src/meta-srv/src/keys.rs @@ -23,17 +23,22 @@ use snafu::{ensure, OptionExt, ResultExt}; use crate::error; use crate::error::Result; +use crate::handler::node_stat::Stat; pub(crate) const REMOVED_PREFIX: &str = "__removed"; pub(crate) const DN_LEASE_PREFIX: &str = "__meta_dnlease"; pub(crate) const SEQ_PREFIX: &str = "__meta_seq"; pub(crate) const TABLE_ROUTE_PREFIX: &str = "__meta_table_route"; +pub const DN_STAT_PREFIX: &str = "__meta_dnstat"; + lazy_static! { - static ref DATANODE_KEY_PATTERN: Regex = + static ref DATANODE_LEASE_KEY_PATTERN: Regex = Regex::new(&format!("^{DN_LEASE_PREFIX}-([0-9]+)-([0-9]+)$")).unwrap(); + static ref DATANODE_STAT_KEY_PATTERN: Regex = + Regex::new(&format!("^{DN_STAT_PREFIX}-([0-9]+)-([0-9]+)$")).unwrap(); } -#[derive(Debug, Clone, Eq, PartialEq)] +#[derive(Debug, Clone, Eq, Hash, PartialEq)] pub struct LeaseKey { pub cluster_id: u64, pub node_id: u64, @@ -43,7 +48,7 @@ impl FromStr for LeaseKey { type Err = error::Error; fn from_str(key: &str) -> Result<Self> { - let caps = DATANODE_KEY_PATTERN + let caps = DATANODE_LEASE_KEY_PATTERN .captures(key) .context(error::InvalidLeaseKeySnafu { key })?; @@ -169,12 +174,135 @@ impl<'a> TableRouteKey<'a> { } } +#[derive(Eq, PartialEq, Debug)] +pub struct StatKey { + pub cluster_id: u64, + pub node_id: u64, +} + +impl From<StatKey> for Vec<u8> { + fn from(value: StatKey) -> Self { + format!("{}-{}-{}", DN_STAT_PREFIX, value.cluster_id, value.node_id).into_bytes() + } +} + +impl FromStr for StatKey { + type Err = error::Error; + + fn from_str(key: &str) -> Result<Self> { + let caps = DATANODE_STAT_KEY_PATTERN + .captures(key) + .context(error::InvalidLeaseKeySnafu { key })?; + + ensure!(caps.len() == 3, error::InvalidLeaseKeySnafu { key }); + + let cluster_id = caps[1].to_string(); + let node_id = caps[2].to_string(); + let cluster_id: u64 = cluster_id.parse().context(error::ParseNumSnafu { + err_msg: format!("invalid cluster_id: {cluster_id}"), + })?; + let node_id: u64 = node_id.parse().context(error::ParseNumSnafu { + err_msg: format!("invalid node_id: {node_id}"), + })?; + + Ok(Self { + cluster_id, + node_id, + }) + } +} + +impl TryFrom<Vec<u8>> for StatKey { + type Error = error::Error; + + fn try_from(bytes: Vec<u8>) -> Result<Self> { + String::from_utf8(bytes) + .context(error::LeaseKeyFromUtf8Snafu {}) + .map(|x| x.parse())? + } +} + +#[derive(Debug, Serialize, Deserialize)] +#[serde(transparent)] +pub struct StatValue { + pub stats: Vec<Stat>, +} + +impl TryFrom<&StatValue> for Vec<u8> { + type Error = error::Error; + + fn try_from(stats: &StatValue) -> Result<Self> { + Ok(serde_json::to_string(stats) + .context(crate::error::SerializeToJsonSnafu { + input: format!("{stats:?}"), + })? + .into_bytes()) + } +} + +impl FromStr for StatValue { + type Err = error::Error; + + fn from_str(value: &str) -> Result<Self> { + serde_json::from_str(value).context(error::DeserializeFromJsonSnafu { input: value }) + } +} + +impl TryFrom<Vec<u8>> for StatValue { + type Error = error::Error; + + fn try_from(value: Vec<u8>) -> Result<Self> { + String::from_utf8(value) + .context(error::LeaseKeyFromUtf8Snafu {}) + .map(|x| x.parse())? + } +} + #[cfg(test)] mod tests { use super::*; #[test] - fn test_datanode_lease_key() { + fn test_stat_key_round_trip() { + let key = StatKey { + cluster_id: 0, + node_id: 1, + }; + + let key_bytes: Vec<u8> = key.try_into().unwrap(); + let new_key: StatKey = key_bytes.try_into().unwrap(); + + assert_eq!(0, new_key.cluster_id); + assert_eq!(1, new_key.node_id); + } + + #[test] + fn test_stat_val_round_trip() { + let stat = Stat { + cluster_id: 0, + id: 101, + is_leader: false, + region_num: 100, + ..Default::default() + }; + + let stat_val = &StatValue { stats: vec![stat] }; + + let bytes: Vec<u8> = stat_val.try_into().unwrap(); + let stat_val: StatValue = bytes.try_into().unwrap(); + let stats = stat_val.stats; + + assert_eq!(1, stats.len()); + + let stat = stats.get(0).unwrap(); + assert_eq!(0, stat.cluster_id); + assert_eq!(101, stat.id); + assert!(!stat.is_leader); + assert_eq!(100, stat.region_num); + } + + #[test] + fn test_lease_key_round_trip() { let key = LeaseKey { cluster_id: 0, node_id: 1, @@ -187,7 +315,7 @@ mod tests { } #[test] - fn test_datanode_lease_value() { + fn test_lease_value_round_trip() { let value = LeaseValue { timestamp_millis: 111, node_addr: "127.0.0.1:3002".to_string(), diff --git a/src/meta-srv/src/lib.rs b/src/meta-srv/src/lib.rs index b0424b4ae2d6..02e671de6232 100644 --- a/src/meta-srv/src/lib.rs +++ b/src/meta-srv/src/lib.rs @@ -17,7 +17,7 @@ pub mod bootstrap; pub mod election; pub mod error; pub mod handler; -mod keys; +pub mod keys; pub mod lease; pub mod metasrv; #[cfg(feature = "mock")] @@ -25,6 +25,6 @@ pub mod mocks; pub mod selector; mod sequence; pub mod service; -mod util; +pub mod util; pub use crate::error::Result;
feat
datanode support report number of regions to meta (#838)
494ad570c530305f499fdad966b7fce84dda96f6
2023-05-04 11:54:26
Ning Sun
feat: update pgwire to 0.14 (#1504)
false
diff --git a/Cargo.lock b/Cargo.lock index e02d52fa9fc7..e3dd55c39e5f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5824,9 +5824,9 @@ dependencies = [ [[package]] name = "pgwire" -version = "0.13.0" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3da58f2d096a2b20ee96420524c6156425575b64409a25acdf638bff450cf53a" +checksum = "bd66851a4b1d6631371c931810e453b0319eb260bbd5853ebe98e37b15105b80" dependencies = [ "async-trait", "base64 0.21.0", diff --git a/src/servers/Cargo.toml b/src/servers/Cargo.toml index 50c5aed855d8..47caff1efe1f 100644 --- a/src/servers/Cargo.toml +++ b/src/servers/Cargo.toml @@ -46,7 +46,7 @@ once_cell = "1.16" openmetrics-parser = "0.4" opensrv-mysql = "0.4" parking_lot = "0.12" -pgwire = "0.13" +pgwire = "0.14" pin-project = "1.0" postgres-types = { version = "0.2", features = ["with-chrono-0_4"] } promql-parser = "0.1.1"
feat
update pgwire to 0.14 (#1504)
fbc8f56eaab7804a2f1b11619b7c2a90e21e1ae1
2023-10-23 14:29:00
Wei
feat: lookup manifest file size (#2590)
false
diff --git a/src/mito2/src/manifest/manager.rs b/src/mito2/src/manifest/manager.rs index e9c85eea3958..653cd7511fc8 100644 --- a/src/mito2/src/manifest/manager.rs +++ b/src/mito2/src/manifest/manager.rs @@ -154,6 +154,12 @@ impl RegionManifestManager { let inner = self.inner.read().await; inner.store.clone() } + + /// Returns total manifest size. + pub async fn manifest_size(&self) -> u64 { + let inner = self.inner.read().await; + inner.total_manifest_size() + } } #[cfg(test)] @@ -186,7 +192,7 @@ impl RegionManifestManagerInner { /// Creates a new manifest. async fn new(metadata: RegionMetadataRef, options: RegionManifestOptions) -> Result<Self> { // construct storage - let store = ManifestObjectStore::new( + let mut store = ManifestObjectStore::new( &options.manifest_dir, options.object_store.clone(), options.compress_type, @@ -232,7 +238,7 @@ impl RegionManifestManagerInner { /// Returns `Ok(None)` if no such manifest. async fn open(options: RegionManifestOptions) -> Result<Option<Self>> { // construct storage - let store = ManifestObjectStore::new( + let mut store = ManifestObjectStore::new( &options.manifest_dir, options.object_store.clone(), options.compress_type, @@ -240,8 +246,9 @@ impl RegionManifestManagerInner { // recover from storage // construct manifest builder + // calculate the manifest size from the latest checkpoint let mut version = MIN_VERSION; - let checkpoint = Self::last_checkpoint(&store).await?; + let checkpoint = Self::last_checkpoint(&mut store).await?; let last_checkpoint_version = checkpoint .as_ref() .map(|checkpoint| checkpoint.last_version) @@ -265,6 +272,8 @@ impl RegionManifestManagerInner { let mut action_iter = store.scan(version, MAX_VERSION).await?; while let Some((manifest_version, raw_action_list)) = action_iter.next_log().await? { let action_list = RegionMetaActionList::decode(&raw_action_list)?; + // set manifest size after last checkpoint + store.set_delta_file_size(manifest_version, raw_action_list.len() as u64); for action in action_list.actions { match action { RegionMetaAction::Change(action) => { @@ -312,6 +321,7 @@ impl RegionManifestManagerInner { Ok(()) } + /// Update the manifest. Return the current manifest version number. async fn update(&mut self, action_list: RegionMetaActionList) -> Result<ManifestVersion> { let version = self.increase_version(); self.store.save(version, &action_list.encode()?).await?; @@ -343,6 +353,11 @@ impl RegionManifestManagerInner { Ok(version) } + + /// Returns total manifest size. + pub(crate) fn total_manifest_size(&self) -> u64 { + self.store.total_manifest_size() + } } impl RegionManifestManagerInner { @@ -369,8 +384,8 @@ impl RegionManifestManagerInner { } /// Make a new checkpoint. Return the fresh one if there are some actions to compact. - async fn do_checkpoint(&self) -> Result<Option<RegionCheckpoint>> { - let last_checkpoint = Self::last_checkpoint(&self.store).await?; + async fn do_checkpoint(&mut self) -> Result<Option<RegionCheckpoint>> { + let last_checkpoint = Self::last_checkpoint(&mut self.store).await?; let current_version = self.last_version; let (start_version, mut manifest_builder) = if let Some(checkpoint) = last_checkpoint { @@ -441,7 +456,7 @@ impl RegionManifestManagerInner { /// Fetch the last [RegionCheckpoint] from storage. pub(crate) async fn last_checkpoint( - store: &ManifestObjectStore, + store: &mut ManifestObjectStore, ) -> Result<Option<RegionCheckpoint>> { let last_checkpoint = store.load_last_checkpoint().await?; @@ -456,14 +471,16 @@ impl RegionManifestManagerInner { #[cfg(test)] mod test { + use api::v1::SemanticType; use common_datasource::compression::CompressionType; + use common_test_util::temp_dir::create_temp_dir; use datatypes::prelude::ConcreteDataType; use datatypes::schema::ColumnSchema; use store_api::metadata::{ColumnMetadata, RegionMetadataBuilder}; use super::*; - use crate::manifest::action::RegionChange; + use crate::manifest::action::{RegionChange, RegionEdit}; use crate::manifest::tests::utils::basic_region_metadata; use crate::test_util::TestEnv; @@ -546,4 +563,95 @@ mod test { .unwrap(); manager.validate_manifest(&new_metadata, 1).await; } + + /// Just for test, refer to wal_dir_usage in src/store-api/src/logstore.rs. + async fn manifest_dir_usage(path: &str) -> u64 { + let mut size = 0; + let mut read_dir = tokio::fs::read_dir(path).await.unwrap(); + while let Ok(dir_entry) = read_dir.next_entry().await { + let Some(entry) = dir_entry else { + break; + }; + if entry.file_type().await.unwrap().is_file() { + let file_name = entry.file_name().into_string().unwrap(); + if file_name.contains(".checkpoint") || file_name.contains(".json") { + let file_size = entry.metadata().await.unwrap().len() as usize; + debug!("File: {file_name:?}, size: {file_size}"); + size += file_size; + } + } + } + size as u64 + } + + #[tokio::test] + async fn test_manifest_size() { + let metadata = Arc::new(basic_region_metadata()); + let data_home = create_temp_dir(""); + let data_home_path = data_home.path().to_str().unwrap().to_string(); + let env = TestEnv::with_data_home(data_home); + + let manifest_dir = format!("{}/manifest", data_home_path); + + let manager = env + .create_manifest_manager(CompressionType::Uncompressed, 10, Some(metadata.clone())) + .await + .unwrap() + .unwrap(); + + let mut new_metadata_builder = RegionMetadataBuilder::from_existing((*metadata).clone()); + new_metadata_builder.push_column_metadata(ColumnMetadata { + column_schema: ColumnSchema::new("val2", ConcreteDataType::float64_datatype(), false), + semantic_type: SemanticType::Field, + column_id: 252, + }); + let new_metadata = Arc::new(new_metadata_builder.build().unwrap()); + + let action_list = + RegionMetaActionList::with_action(RegionMetaAction::Change(RegionChange { + metadata: new_metadata.clone(), + })); + + let current_version = manager.update(action_list).await.unwrap(); + assert_eq!(current_version, 1); + manager.validate_manifest(&new_metadata, 1).await; + + // get manifest size + let manifest_size = manager.manifest_size().await; + assert_eq!(manifest_size, manifest_dir_usage(&manifest_dir).await); + + // update 10 times nop_action to trigger checkpoint + for _ in 0..10 { + manager + .update(RegionMetaActionList::new(vec![RegionMetaAction::Edit( + RegionEdit { + files_to_add: vec![], + files_to_remove: vec![], + compaction_time_window: None, + flushed_entry_id: None, + flushed_sequence: None, + }, + )])) + .await + .unwrap(); + } + + // check manifest size again + let manifest_size = manager.manifest_size().await; + assert_eq!(manifest_size, manifest_dir_usage(&manifest_dir).await); + + // Reopen the manager, + // we just calculate the size from the latest checkpoint file + manager.stop().await.unwrap(); + let manager = env + .create_manifest_manager(CompressionType::Uncompressed, 10, None) + .await + .unwrap() + .unwrap(); + manager.validate_manifest(&new_metadata, 11).await; + + // get manifest size again + let manifest_size = manager.manifest_size().await; + assert_eq!(manifest_size, 1312); + } } diff --git a/src/mito2/src/manifest/storage.rs b/src/mito2/src/manifest/storage.rs index a0f7dbf9714e..edd63ac52162 100644 --- a/src/mito2/src/manifest/storage.rs +++ b/src/mito2/src/manifest/storage.rs @@ -129,11 +129,22 @@ impl ObjectStoreLogIterator { } } +/// Key to identify a manifest file. +#[derive(Debug, Clone, Copy, Eq, PartialEq, Hash)] +enum FileKey { + /// A delta file (`.json`). + Delta(ManifestVersion), + /// A checkpoint file (`.checkpoint`). + Checkpoint(ManifestVersion), +} + #[derive(Clone, Debug)] pub struct ManifestObjectStore { object_store: ObjectStore, compress_type: CompressionType, path: String, + /// Stores the size of each manifest file. + manifest_size_map: HashMap<FileKey, u64>, } impl ManifestObjectStore { @@ -142,6 +153,7 @@ impl ManifestObjectStore { object_store, compress_type, path: util::normalize_dir(path), + manifest_size_map: HashMap::new(), } } @@ -184,6 +196,7 @@ impl ManifestObjectStore { .context(OpenDalSnafu) } + /// Scan the manifest files in the range of [start, end) and return the iterator. pub async fn scan( &self, start: ManifestVersion, @@ -212,8 +225,12 @@ impl ManifestObjectStore { }) } + /// Delete manifest files that version < end. + /// If keep_last_checkpoint is true, the last checkpoint file will be kept. + /// ### Return + /// The number of deleted files. pub async fn delete_until( - &self, + &mut self, end: ManifestVersion, keep_last_checkpoint: bool, ) -> Result<usize> { @@ -248,7 +265,7 @@ impl ManifestObjectStore { } else { None }; - let paths: Vec<_> = entries + let del_entries: Vec<_> = entries .iter() .filter(|(_e, is_checkpoint, version)| { if let Some(max_version) = checkpoint_version { @@ -264,12 +281,15 @@ impl ManifestObjectStore { true } }) - .map(|e| e.0.path().to_string()) .collect(); + let paths = del_entries + .iter() + .map(|(e, _, _)| e.path().to_string()) + .collect::<Vec<_>>(); let ret = paths.len(); debug!( - "Deleting {} logs from manifest storage path {} until {}, checkpoint: {:?}, paths: {:?}", + "Deleting {} logs from manifest storage path {} until {}, checkpoint_version: {:?}, paths: {:?}", ret, self.path, end, @@ -282,10 +302,21 @@ impl ManifestObjectStore { .await .context(OpenDalSnafu)?; + // delete manifest sizes + for (_, is_checkpoint, version) in &del_entries { + if *is_checkpoint { + self.manifest_size_map + .remove(&FileKey::Checkpoint(*version)); + } else { + self.manifest_size_map.remove(&FileKey::Delta(*version)); + } + } + Ok(ret) } - pub async fn save(&self, version: ManifestVersion, bytes: &[u8]) -> Result<()> { + /// Save the delta manifest file. + pub async fn save(&mut self, version: ManifestVersion, bytes: &[u8]) -> Result<()> { let path = self.delta_file_path(version); debug!("Save log to manifest storage, version: {}", version); let data = self @@ -296,13 +327,17 @@ impl ManifestObjectStore { compress_type: self.compress_type, path: &path, })?; + let delta_size = data.len(); self.object_store .write(&path, data) .await - .context(OpenDalSnafu) + .context(OpenDalSnafu)?; + self.set_delta_file_size(version, delta_size as u64); + Ok(()) } - pub async fn save_checkpoint(&self, version: ManifestVersion, bytes: &[u8]) -> Result<()> { + /// Save the checkpoint manifest file. + pub async fn save_checkpoint(&mut self, version: ManifestVersion, bytes: &[u8]) -> Result<()> { let path = self.checkpoint_file_path(version); let data = self .compress_type @@ -312,10 +347,12 @@ impl ManifestObjectStore { compress_type: self.compress_type, path: &path, })?; + let checkpoint_size = data.len(); self.object_store .write(&path, data) .await .context(OpenDalSnafu)?; + self.set_checkpoint_file_size(version, checkpoint_size as u64); // Because last checkpoint file only contain size and version, which is tiny, so we don't compress it. let last_checkpoint_path = self.last_checkpoint_path(); @@ -342,7 +379,7 @@ impl ManifestObjectStore { } pub async fn load_checkpoint( - &self, + &mut self, version: ManifestVersion, ) -> Result<Option<(ManifestVersion, Vec<u8>)>> { let path = self.checkpoint_file_path(version); @@ -351,12 +388,15 @@ impl ManifestObjectStore { let checkpoint_data = match self.object_store.read(&path).await { Ok(checkpoint) => { + let checkpoint_size = checkpoint.len(); let decompress_data = self.compress_type.decode(checkpoint).await.context( DecompressObjectSnafu { compress_type: self.compress_type, path, }, )?; + // set the checkpoint size + self.set_checkpoint_file_size(version, checkpoint_size as u64); Ok(Some(decompress_data)) } Err(e) => { @@ -373,6 +413,7 @@ impl ManifestObjectStore { ); match self.object_store.read(&fall_back_path).await { Ok(checkpoint) => { + let checkpoint_size = checkpoint.len(); let decompress_data = FALL_BACK_COMPRESS_TYPE .decode(checkpoint) .await @@ -380,6 +421,7 @@ impl ManifestObjectStore { compress_type: FALL_BACK_COMPRESS_TYPE, path, })?; + self.set_checkpoint_file_size(version, checkpoint_size as u64); Ok(Some(decompress_data)) } Err(e) if e.kind() == ErrorKind::NotFound => Ok(None), @@ -398,7 +440,7 @@ impl ManifestObjectStore { /// Load the latest checkpoint. /// Return manifest version and the raw [RegionCheckpoint](crate::manifest::action::RegionCheckpoint) content if any - pub async fn load_last_checkpoint(&self) -> Result<Option<(ManifestVersion, Vec<u8>)>> { + pub async fn load_last_checkpoint(&mut self) -> Result<Option<(ManifestVersion, Vec<u8>)>> { let last_checkpoint_path = self.last_checkpoint_path(); let last_checkpoint_data = match self.object_store.read(&last_checkpoint_path).await { Ok(data) => data, @@ -424,6 +466,22 @@ impl ManifestObjectStore { pub async fn read_file(&self, path: &str) -> Result<Vec<u8>> { self.object_store.read(path).await.context(OpenDalSnafu) } + + /// Compute the size(Byte) in manifest size map. + pub(crate) fn total_manifest_size(&self) -> u64 { + self.manifest_size_map.values().sum() + } + + /// Set the size of the delta file by delta version. + pub(crate) fn set_delta_file_size(&mut self, version: ManifestVersion, size: u64) { + self.manifest_size_map.insert(FileKey::Delta(version), size); + } + + /// Set the size of the checkpoint file by checkpoint version. + pub(crate) fn set_checkpoint_file_size(&mut self, version: ManifestVersion, size: u64) { + self.manifest_size_map + .insert(FileKey::Checkpoint(version), size); + } } #[derive(Serialize, Deserialize, Debug)] @@ -489,7 +547,7 @@ mod tests { test_manifest_log_store_case(log_store).await; } - async fn test_manifest_log_store_case(log_store: ManifestObjectStore) { + async fn test_manifest_log_store_case(mut log_store: ManifestObjectStore) { for v in 0..5 { log_store .save(v, format!("hello, {v}").as_bytes()) @@ -600,4 +658,92 @@ mod tests { let mut it = log_store.scan(0, 10).await.unwrap(); assert!(it.next_log().await.unwrap().is_none()); } + + #[tokio::test] + async fn test_file_version() { + let version = file_version("00000000000000000007.checkpoint"); + assert_eq!(version, 7); + + let name = delta_file(version); + assert_eq!(name, "00000000000000000007.json"); + + let name = checkpoint_file(version); + assert_eq!(name, "00000000000000000007.checkpoint"); + } + + #[tokio::test] + async fn test_uncompressed_manifest_files_size() { + let mut log_store = new_test_manifest_store(); + // write 5 manifest files with uncompressed(8B per file) + log_store.compress_type = CompressionType::Uncompressed; + for v in 0..5 { + log_store + .save(v, format!("hello, {v}").as_bytes()) + .await + .unwrap(); + } + // write 1 checkpoint file with uncompressed(23B) + log_store + .save_checkpoint(5, "checkpoint_uncompressed".as_bytes()) + .await + .unwrap(); + + // manifest files size + assert_eq!(log_store.total_manifest_size(), 63); + + // delete 3 manifest files + assert_eq!(log_store.delete_until(3, false).await.unwrap(), 3); + + // manifest files size after delete + assert_eq!(log_store.total_manifest_size(), 39); + + // delete all manifest files + assert_eq!( + log_store + .delete_until(ManifestVersion::MAX, false) + .await + .unwrap(), + 3 + ); + + assert_eq!(log_store.total_manifest_size(), 0); + } + + #[tokio::test] + async fn test_compressed_manifest_files_size() { + let mut log_store = new_test_manifest_store(); + // Test with compressed manifest files + log_store.compress_type = CompressionType::Gzip; + // write 5 manifest files + for v in 0..5 { + log_store + .save(v, format!("hello, {v}").as_bytes()) + .await + .unwrap(); + } + log_store + .save_checkpoint(5, "checkpoint_compressed".as_bytes()) + .await + .unwrap(); + + // manifest files size + assert_eq!(log_store.total_manifest_size(), 181); + + // delete 3 manifest files + assert_eq!(log_store.delete_until(3, false).await.unwrap(), 3); + + // manifest files size after delete + assert_eq!(log_store.total_manifest_size(), 97); + + // delete all manifest files + assert_eq!( + log_store + .delete_until(ManifestVersion::MAX, false) + .await + .unwrap(), + 3 + ); + + assert_eq!(log_store.total_manifest_size(), 0); + } } diff --git a/src/mito2/src/manifest/tests/checkpoint.rs b/src/mito2/src/manifest/tests/checkpoint.rs index 68c7063e1e63..c28f6cd6d598 100644 --- a/src/mito2/src/manifest/tests/checkpoint.rs +++ b/src/mito2/src/manifest/tests/checkpoint.rs @@ -202,7 +202,7 @@ async fn generate_checkpoint_with_compression_types( manager.update(action).await.unwrap(); } - RegionManifestManagerInner::last_checkpoint(&manager.store().await) + RegionManifestManagerInner::last_checkpoint(&mut manager.store().await) .await .unwrap() .unwrap() diff --git a/src/mito2/src/test_util.rs b/src/mito2/src/test_util.rs index c9621249212c..d7cb13e5121b 100644 --- a/src/mito2/src/test_util.rs +++ b/src/mito2/src/test_util.rs @@ -99,6 +99,15 @@ impl TestEnv { } } + /// Returns a new env with specific `data_home` for test. + pub fn with_data_home(data_home: TempDir) -> TestEnv { + TestEnv { + data_home, + logstore: None, + object_store: None, + } + } + pub fn get_logstore(&self) -> Option<Arc<RaftEngineLogStore>> { self.logstore.clone() }
feat
lookup manifest file size (#2590)
b144836935ef580283b6d4fbe5fbe462538a5822
2024-02-23 12:50:55
Yingwen
feat: Implement write and fork for the new memtable (#3357)
false
diff --git a/src/mito2/src/memtable/merge_tree.rs b/src/mito2/src/memtable/merge_tree.rs index be5db7c6a36e..4ab65a6e97fd 100644 --- a/src/mito2/src/memtable/merge_tree.rs +++ b/src/mito2/src/memtable/merge_tree.rs @@ -262,3 +262,85 @@ impl MemtableBuilder for MergeTreeMemtableBuilder { )) } } + +#[cfg(test)] +mod tests { + use common_time::Timestamp; + + use super::*; + use crate::test_util::memtable_util; + + #[test] + fn test_memtable_sorted_input() { + write_sorted_input(true); + write_sorted_input(false); + } + + fn write_sorted_input(has_pk: bool) { + let metadata = if has_pk { + memtable_util::metadata_with_primary_key(vec![1, 0], true) + } else { + memtable_util::metadata_with_primary_key(vec![], false) + }; + let timestamps = (0..100).collect::<Vec<_>>(); + let kvs = + memtable_util::build_key_values(&metadata, "hello".to_string(), 42, &timestamps, 1); + let memtable = MergeTreeMemtable::new(1, metadata, None, &MergeTreeConfig::default()); + memtable.write(&kvs).unwrap(); + + // TODO(yingwen): Test iter. + + let stats = memtable.stats(); + assert!(stats.bytes_allocated() > 0); + assert_eq!( + Some(( + Timestamp::new_millisecond(0), + Timestamp::new_millisecond(99) + )), + stats.time_range() + ); + } + + #[test] + fn test_memtable_unsorted_input() { + write_iter_unsorted_input(true); + write_iter_unsorted_input(false); + } + + fn write_iter_unsorted_input(has_pk: bool) { + let metadata = if has_pk { + memtable_util::metadata_with_primary_key(vec![1, 0], true) + } else { + memtable_util::metadata_with_primary_key(vec![], false) + }; + let memtable = + MergeTreeMemtable::new(1, metadata.clone(), None, &MergeTreeConfig::default()); + + let kvs = memtable_util::build_key_values( + &metadata, + "hello".to_string(), + 0, + &[1, 3, 7, 5, 6], + 0, // sequence 0, 1, 2, 3, 4 + ); + memtable.write(&kvs).unwrap(); + + let kvs = memtable_util::build_key_values( + &metadata, + "hello".to_string(), + 0, + &[5, 2, 4, 0, 7], + 5, // sequence 5, 6, 7, 8, 9 + ); + memtable.write(&kvs).unwrap(); + + // TODO(yingwen): Test iter. + + let stats = memtable.stats(); + assert!(stats.bytes_allocated() > 0); + assert_eq!( + Some((Timestamp::new_millisecond(0), Timestamp::new_millisecond(7))), + stats.time_range() + ); + } +} diff --git a/src/mito2/src/memtable/merge_tree/data.rs b/src/mito2/src/memtable/merge_tree/data.rs index 96db38f673f4..f414ca38f99a 100644 --- a/src/mito2/src/memtable/merge_tree/data.rs +++ b/src/mito2/src/memtable/merge_tree/data.rs @@ -43,10 +43,13 @@ use crate::error; use crate::error::Result; use crate::memtable::key_values::KeyValue; use crate::memtable::merge_tree::merger::{DataNode, DataSource, Merger}; -use crate::memtable::merge_tree::{PkId, PkIndex}; +use crate::memtable::merge_tree::PkIndex; const PK_INDEX_COLUMN_NAME: &str = "__pk_index"; +/// Initial capacity for the data buffer. +pub(crate) const DATA_INIT_CAP: usize = 8; + /// Data part batches returns by `DataParts::read`. #[derive(Debug, Clone)] pub struct DataBatch { @@ -128,9 +131,9 @@ impl DataBuffer { } /// Writes a row to data buffer. - pub fn write_row(&mut self, pk_id: PkId, kv: KeyValue) { + pub fn write_row(&mut self, pk_index: PkIndex, kv: KeyValue) { self.ts_builder.push_value_ref(kv.timestamp()); - self.pk_index_builder.push(Some(pk_id.pk_index)); + self.pk_index_builder.push(Some(pk_index)); self.sequence_builder.push(Some(kv.sequence())); self.op_type_builder.push(Some(kv.op_type() as u8)); @@ -662,9 +665,9 @@ pub struct ParquetPart { /// Data parts under a shard. pub struct DataParts { /// The active writing buffer. - pub(crate) active: DataBuffer, + active: DataBuffer, /// immutable (encoded) parts. - pub(crate) frozen: Vec<DataPart>, + frozen: Vec<DataPart>, } impl DataParts { @@ -675,9 +678,14 @@ impl DataParts { } } - /// Writes one row into active part. - pub fn write_row(&mut self, pk_id: PkId, kv: KeyValue) { - self.active.write_row(pk_id, kv) + pub(crate) fn with_frozen(mut self, frozen: Vec<DataPart>) -> Self { + self.frozen = frozen; + self + } + + /// Writes a row into parts. + pub fn write_row(&mut self, pk_index: PkIndex, kv: KeyValue) { + self.active.write_row(pk_index, kv) } /// Freezes the active data buffer into frozen data parts. @@ -932,13 +940,7 @@ mod tests { ); for kv in kvs.iter() { - buffer.write_row( - PkId { - shard_id: 0, - pk_index, - }, - kv, - ); + buffer.write_row(pk_index, kv); } } diff --git a/src/mito2/src/memtable/merge_tree/dict.rs b/src/mito2/src/memtable/merge_tree/dict.rs index 43a53bd494f6..5c1c3c3a57f6 100644 --- a/src/mito2/src/memtable/merge_tree/dict.rs +++ b/src/mito2/src/memtable/merge_tree/dict.rs @@ -61,19 +61,21 @@ impl KeyDictBuilder { self.pk_to_index.get(key).copied() } + /// Returns true if the builder is full. + pub fn is_full(&self) -> bool { + self.num_keys >= self.capacity + } + /// Adds the key to the builder and returns its index if the builder is not full. /// - /// Returns `None` if the builder is full. - pub fn try_insert_key(&mut self, key: &[u8], metrics: &mut WriteMetrics) -> Option<PkIndex> { + /// # Panics + /// Panics if the builder is full. + pub fn insert_key(&mut self, key: &[u8], metrics: &mut WriteMetrics) -> PkIndex { + assert!(!self.is_full()); + if let Some(pk_index) = self.pk_to_index.get(key).copied() { // Already in the builder. - return Some(pk_index); - } - - // A new key. - if self.num_keys >= self.capacity { - // The builder is full. - return None; + return pk_index; } if self.key_buffer.len() >= MAX_KEYS_PER_BLOCK.into() { @@ -91,7 +93,7 @@ impl KeyDictBuilder { metrics.key_bytes += key.len() * 2; self.key_bytes_in_index += key.len(); - Some(pk_index) + pk_index } /// Memory size of the builder. @@ -129,11 +131,12 @@ impl KeyDictBuilder { pk_to_index, dict_blocks: std::mem::take(&mut self.dict_blocks), key_positions, + key_bytes_in_index: self.key_bytes_in_index, }) } - /// Scans the builder. - pub fn scan(&self) -> DictBuilderReader { + /// Reads the builder. + pub fn read(&self) -> DictBuilderReader { let sorted_pk_indices = self.pk_to_index.values().copied().collect(); let block = self.key_buffer.finish_cloned(); let mut blocks = Vec::with_capacity(self.dict_blocks.len() + 1); @@ -162,38 +165,46 @@ impl DictBuilderReader { } } - /// Returns true if the item in the reader is valid. - pub fn is_valid(&self) -> bool { - self.offset < self.sorted_pk_indices.len() + /// Returns the number of keys. + pub fn num_keys(&self) -> usize { + self.sorted_pk_indices.len() + } + + /// Gets the i-th pk index. + pub fn pk_index(&self, offset: usize) -> PkIndex { + self.sorted_pk_indices[offset] } - /// Returns current key. - pub fn current_key(&self) -> &[u8] { - let pk_index = self.current_pk_index(); + /// Gets the i-th key. + pub fn key(&self, offset: usize) -> &[u8] { + let pk_index = self.pk_index(offset); self.key_by_pk_index(pk_index) } - /// Returns current [PkIndex] of the key. - pub fn current_pk_index(&self) -> PkIndex { - assert!(self.is_valid()); - self.sorted_pk_indices[self.offset] + /// Gets the key by the pk index. + pub fn key_by_pk_index(&self, pk_index: PkIndex) -> &[u8] { + let block_idx = pk_index / MAX_KEYS_PER_BLOCK; + self.blocks[block_idx as usize].key_by_pk_index(pk_index) } - /// Advances the reader. - pub fn next(&mut self) { - assert!(self.is_valid()); - self.offset += 1; + /// Returns pk weights to sort a data part and replaces pk indices. + pub(crate) fn pk_weights_to_sort_data(&self) -> Vec<u16> { + compute_pk_weights(&self.sorted_pk_indices) } /// Returns pk indices sorted by keys. pub(crate) fn sorted_pk_index(&self) -> &[PkIndex] { &self.sorted_pk_indices } +} - fn key_by_pk_index(&self, pk_index: PkIndex) -> &[u8] { - let block_idx = pk_index / MAX_KEYS_PER_BLOCK; - self.blocks[block_idx as usize].key_by_pk_index(pk_index) +/// Returns pk weights to sort a data part and replaces pk indices. +fn compute_pk_weights(sorted_pk_indices: &[PkIndex]) -> Vec<u16> { + let mut pk_weights = vec![0; sorted_pk_indices.len()]; + for (weight, pk_index) in sorted_pk_indices.iter().enumerate() { + pk_weights[*pk_index as usize] = weight as u16; } + pk_weights } /// A key dictionary. @@ -206,6 +217,7 @@ pub struct KeyDict { dict_blocks: Vec<DictBlock>, /// Maps pk index to position of the key in [Self::dict_blocks]. key_positions: Vec<PkIndex>, + key_bytes_in_index: usize, } pub type KeyDictRef = Arc<KeyDict>; @@ -220,6 +232,21 @@ impl KeyDict { let block_index = position / MAX_KEYS_PER_BLOCK; self.dict_blocks[block_index as usize].key_by_pk_index(position) } + + /// Gets the pk index by the key. + pub fn get_pk_index(&self, key: &[u8]) -> Option<PkIndex> { + self.pk_to_index.get(key).copied() + } + + /// Returns pk weights to sort a data part and replaces pk indices. + pub(crate) fn pk_weights_to_sort_data(&self) -> Vec<u16> { + compute_pk_weights(&self.key_positions) + } + + /// Returns the shared memory size. + pub(crate) fn shared_memory_size(&self) -> usize { + self.key_bytes_in_index + } } /// Buffer to store unsorted primary keys. @@ -364,7 +391,8 @@ mod tests { let mut last_pk_index = None; let mut metrics = WriteMetrics::default(); for key in &keys { - let pk_index = builder.try_insert_key(key, &mut metrics).unwrap(); + assert!(!builder.is_full()); + let pk_index = builder.insert_key(key, &mut metrics); last_pk_index = Some(pk_index); } assert_eq!(num_keys - 1, last_pk_index.unwrap()); @@ -379,10 +407,9 @@ mod tests { expect.sort_unstable_by(|a, b| a.0.cmp(&b.0)); let mut result = Vec::with_capacity(expect.len()); - let mut reader = builder.scan(); - while reader.is_valid() { - result.push((reader.current_key().to_vec(), reader.current_pk_index())); - reader.next(); + let reader = builder.read(); + for i in 0..reader.num_keys() { + result.push((reader.key(i).to_vec(), reader.pk_index(i))); } assert_eq!(expect, result); } @@ -397,9 +424,7 @@ mod tests { for i in 0..num_keys { // Each key is 5 bytes. let key = format!("{i:05}"); - builder - .try_insert_key(key.as_bytes(), &mut metrics) - .unwrap(); + builder.insert_key(key.as_bytes(), &mut metrics); } // num_keys * 5 * 2 assert_eq!(5130, metrics.key_bytes); diff --git a/src/mito2/src/memtable/merge_tree/merger.rs b/src/mito2/src/memtable/merge_tree/merger.rs index 7f54183cdd91..c758d3ecd909 100644 --- a/src/mito2/src/memtable/merge_tree/merger.rs +++ b/src/mito2/src/memtable/merge_tree/merger.rs @@ -407,7 +407,6 @@ mod tests { use super::*; use crate::memtable::merge_tree::data::DataBuffer; - use crate::memtable::merge_tree::PkId; use crate::test_util::memtable_util::{build_key_values_with_ts_seq_values, metadata_for_test}; fn write_rows_to_buffer( @@ -429,13 +428,7 @@ mod tests { ); for kv in kvs.iter() { - buffer.write_row( - PkId { - shard_id: 0, - pk_index, - }, - kv, - ); + buffer.write_row(pk_index, kv); } *sequence += rows; diff --git a/src/mito2/src/memtable/merge_tree/partition.rs b/src/mito2/src/memtable/merge_tree/partition.rs index 69c92ff69f3a..dc817d134ded 100644 --- a/src/mito2/src/memtable/merge_tree/partition.rs +++ b/src/mito2/src/memtable/merge_tree/partition.rs @@ -26,6 +26,7 @@ use store_api::storage::ColumnId; use crate::error::Result; use crate::memtable::key_values::KeyValue; +use crate::memtable::merge_tree::data::{DataParts, DATA_INIT_CAP}; use crate::memtable::merge_tree::metrics::WriteMetrics; use crate::memtable::merge_tree::shard::Shard; use crate::memtable::merge_tree::shard_builder::ShardBuilder; @@ -41,8 +42,12 @@ pub struct Partition { impl Partition { /// Creates a new partition. - pub fn new(_metadata: RegionMetadataRef, _config: &MergeTreeConfig) -> Self { - unimplemented!() + pub fn new(metadata: RegionMetadataRef, config: &MergeTreeConfig) -> Self { + let shard_builder = ShardBuilder::new(metadata.clone(), config); + + Partition { + inner: RwLock::new(Inner::new(metadata, shard_builder)), + } } /// Writes to the partition with a primary key. @@ -56,40 +61,37 @@ impl Partition { // Now we ensure one key only exists in one shard. if let Some(pk_id) = inner.find_key_in_shards(primary_key) { // Key already in shards. - return inner.write_to_shard(pk_id, key_value); + inner.write_to_shard(pk_id, key_value); + return Ok(()); } if inner.shard_builder.should_freeze() { - let shard_id = inner.active_shard_id; - let shard = inner.shard_builder.finish(shard_id)?; - inner.active_shard_id += 1; - inner.shards.push(shard); + inner.freeze_active_shard()?; } // Write to the shard builder. inner .shard_builder - .write_with_key(primary_key, key_value, metrics)?; + .write_with_key(primary_key, key_value, metrics); + inner.num_rows += 1; Ok(()) } /// Writes to the partition without a primary key. - pub fn write_no_key(&self, key_value: KeyValue, metrics: &mut WriteMetrics) -> Result<()> { + pub fn write_no_key(&self, key_value: KeyValue) { let mut inner = self.inner.write().unwrap(); // If no primary key, always write to the first shard. - if inner.shards.is_empty() { - let shard_id = inner.active_shard_id; - inner.shards.push(Shard::new_no_dict(shard_id)); - inner.active_shard_id += 1; - } + debug_assert!(!inner.shards.is_empty()); + debug_assert_eq!(1, inner.active_shard_id); // A dummy pk id. let pk_id = PkId { - shard_id: inner.active_shard_id - 1, + shard_id: 0, pk_index: 0, }; - inner.shards[0].write_key_value(pk_id, key_value, metrics) + inner.shards[0].write_with_pk_id(pk_id, key_value); + inner.num_rows += 1; } /// Scans data in the partition. @@ -103,22 +105,47 @@ impl Partition { /// Freezes the partition. pub fn freeze(&self) -> Result<()> { - unimplemented!() + let mut inner = self.inner.write().unwrap(); + inner.freeze_active_shard()?; + Ok(()) } /// Forks the partition. - pub fn fork(&self, _metadata: &RegionMetadataRef) -> Partition { - unimplemented!() + pub fn fork(&self, metadata: &RegionMetadataRef, config: &MergeTreeConfig) -> Partition { + let inner = self.inner.read().unwrap(); + // TODO(yingwen): TTL or evict shards. + let shard_builder = ShardBuilder::new(metadata.clone(), config); + let shards = inner + .shards + .iter() + .map(|shard| shard.fork(metadata.clone())) + .collect(); + + Partition { + inner: RwLock::new(Inner { + metadata: metadata.clone(), + shard_builder, + active_shard_id: inner.active_shard_id, + shards, + num_rows: 0, + }), + } } /// Returns true if the partition has data. pub fn has_data(&self) -> bool { - unimplemented!() + let inner = self.inner.read().unwrap(); + inner.num_rows > 0 } /// Returns shared memory size of the partition. pub fn shared_memory_size(&self) -> usize { - unimplemented!() + let inner = self.inner.read().unwrap(); + inner + .shards + .iter() + .map(|shard| shard.shared_memory_size()) + .sum() } /// Get partition key from the key value. @@ -160,17 +187,37 @@ pub type PartitionRef = Arc<Partition>; /// /// A key only exists in one shard. struct Inner { + metadata: RegionMetadataRef, /// Shard whose dictionary is active. shard_builder: ShardBuilder, active_shard_id: ShardId, - /// Shards with frozon dictionary. + /// Shards with frozen dictionary. shards: Vec<Shard>, + num_rows: usize, } impl Inner { + fn new(metadata: RegionMetadataRef, shard_builder: ShardBuilder) -> Self { + let mut inner = Self { + metadata, + shard_builder, + active_shard_id: 0, + shards: Vec::new(), + num_rows: 0, + }; + + if inner.metadata.primary_key.is_empty() { + let data_parts = DataParts::new(inner.metadata.clone(), DATA_INIT_CAP); + inner.shards.push(Shard::new(0, None, data_parts)); + inner.active_shard_id = 1; + } + + inner + } + fn find_key_in_shards(&self, primary_key: &[u8]) -> Option<PkId> { for shard in &self.shards { - if let Some(pkid) = shard.find_key(primary_key) { + if let Some(pkid) = shard.find_id_by_key(primary_key) { return Some(pkid); } } @@ -178,7 +225,24 @@ impl Inner { None } - fn write_to_shard(&mut self, _pk_id: PkId, _key_value: KeyValue) -> Result<()> { - unimplemented!() + fn write_to_shard(&mut self, pk_id: PkId, key_value: KeyValue) { + for shard in &mut self.shards { + if shard.shard_id == pk_id.shard_id { + shard.write_with_pk_id(pk_id, key_value); + self.num_rows += 1; + return; + } + } + } + + fn freeze_active_shard(&mut self) -> Result<()> { + if let Some(shard) = self + .shard_builder + .finish(self.active_shard_id, self.metadata.clone())? + { + self.active_shard_id += 1; + self.shards.push(shard); + } + Ok(()) } } diff --git a/src/mito2/src/memtable/merge_tree/shard.rs b/src/mito2/src/memtable/merge_tree/shard.rs index 9eceb4920130..86c5ea18f1a2 100644 --- a/src/mito2/src/memtable/merge_tree/shard.rs +++ b/src/mito2/src/memtable/merge_tree/shard.rs @@ -14,21 +14,16 @@ //! Shard in a partition. -use std::collections::HashSet; +use store_api::metadata::RegionMetadataRef; -use common_recordbatch::filter::SimpleFilterEvaluator; -use store_api::storage::ColumnId; - -use crate::error::Result; use crate::memtable::key_values::KeyValue; -use crate::memtable::merge_tree::data::DataParts; +use crate::memtable::merge_tree::data::{DataParts, DATA_INIT_CAP}; use crate::memtable::merge_tree::dict::KeyDictRef; -use crate::memtable::merge_tree::metrics::WriteMetrics; use crate::memtable::merge_tree::{PkId, ShardId}; /// Shard stores data related to the same key dictionary. pub struct Shard { - shard_id: ShardId, + pub(crate) shard_id: ShardId, /// Key dictionary of the shard. `None` if the schema of the tree doesn't have a primary key. key_dict: Option<KeyDictRef>, /// Data in the shard. @@ -36,35 +31,153 @@ pub struct Shard { } impl Shard { - /// Returns a shard without dictionary. - pub fn new_no_dict(_shard_id: ShardId) -> Shard { - unimplemented!() + /// Returns a new shard. + pub fn new(shard_id: ShardId, key_dict: Option<KeyDictRef>, data_parts: DataParts) -> Shard { + Shard { + shard_id, + key_dict, + data_parts, + } } /// Returns the pk id of the key if it exists. - pub fn find_key(&self, _key: &[u8]) -> Option<PkId> { - unimplemented!() + pub fn find_id_by_key(&self, key: &[u8]) -> Option<PkId> { + let key_dict = self.key_dict.as_ref()?; + let pk_index = key_dict.get_pk_index(key)?; + + Some(PkId { + shard_id: self.shard_id, + pk_index, + }) } /// Writes a key value into the shard. - pub fn write_key_value( - &mut self, - _pk_id: PkId, - _key_value: KeyValue, - _metrics: &mut WriteMetrics, - ) -> Result<()> { - unimplemented!() + pub fn write_with_pk_id(&mut self, pk_id: PkId, key_value: KeyValue) { + debug_assert_eq!(self.shard_id, pk_id.shard_id); + + self.data_parts.write_row(pk_id.pk_index, key_value); } /// Scans the shard. - pub fn scan( - &self, - _projection: &HashSet<ColumnId>, - _filters: &[SimpleFilterEvaluator], - ) -> ShardReader { + // TODO(yingwen): Push down projection to data parts. + pub fn scan(&self) -> ShardReader { unimplemented!() } + + /// Returns the memory size of the shard part. + pub fn shared_memory_size(&self) -> usize { + self.key_dict + .as_ref() + .map(|dict| dict.shared_memory_size()) + .unwrap_or(0) + } + + /// Forks a shard. + pub fn fork(&self, metadata: RegionMetadataRef) -> Shard { + Shard { + shard_id: self.shard_id, + key_dict: self.key_dict.clone(), + data_parts: DataParts::new(metadata, DATA_INIT_CAP), + } + } } /// Reader to read rows in a shard. pub struct ShardReader {} + +#[cfg(test)] +mod tests { + use std::sync::Arc; + + use super::*; + use crate::memtable::merge_tree::dict::KeyDictBuilder; + use crate::memtable::merge_tree::metrics::WriteMetrics; + use crate::memtable::merge_tree::PkIndex; + use crate::memtable::KeyValues; + use crate::test_util::memtable_util::{ + build_key_values_with_ts_seq_values, encode_key, encode_key_by_kv, encode_keys, + metadata_for_test, + }; + + fn input_with_key(metadata: &RegionMetadataRef) -> Vec<KeyValues> { + vec![ + build_key_values_with_ts_seq_values( + metadata, + "shard".to_string(), + 2, + [20, 21].into_iter(), + [Some(0.0), Some(1.0)].into_iter(), + 0, + ), + build_key_values_with_ts_seq_values( + metadata, + "shard".to_string(), + 0, + [0, 1].into_iter(), + [Some(0.0), Some(1.0)].into_iter(), + 1, + ), + build_key_values_with_ts_seq_values( + metadata, + "shard".to_string(), + 1, + [10, 11].into_iter(), + [Some(0.0), Some(1.0)].into_iter(), + 2, + ), + ] + } + + fn new_shard_with_dict( + shard_id: ShardId, + metadata: RegionMetadataRef, + input: &[KeyValues], + ) -> Shard { + let mut dict_builder = KeyDictBuilder::new(1024); + let mut metrics = WriteMetrics::default(); + let mut keys = Vec::with_capacity(input.len()); + for kvs in input { + encode_keys(&metadata, kvs, &mut keys); + } + for key in &keys { + dict_builder.insert_key(key, &mut metrics); + } + + let dict = dict_builder.finish().unwrap(); + let data_parts = DataParts::new(metadata, DATA_INIT_CAP); + + Shard::new(shard_id, Some(Arc::new(dict)), data_parts) + } + + #[test] + fn test_shard_find_by_key() { + let metadata = metadata_for_test(); + let input = input_with_key(&metadata); + let shard = new_shard_with_dict(8, metadata, &input); + for i in 0..input.len() { + let key = encode_key("shard", i as u32); + assert_eq!( + PkId { + shard_id: 8, + pk_index: i as PkIndex, + }, + shard.find_id_by_key(&key).unwrap() + ); + } + assert!(shard.find_id_by_key(&encode_key("shard", 100)).is_none()); + } + + #[test] + fn test_write_shard() { + let metadata = metadata_for_test(); + let input = input_with_key(&metadata); + let mut shard = new_shard_with_dict(8, metadata, &input); + for key_values in &input { + for kv in key_values.iter() { + let key = encode_key_by_kv(&kv); + let pk_id = shard.find_id_by_key(&key).unwrap(); + shard.write_with_pk_id(pk_id, kv); + } + } + } +} diff --git a/src/mito2/src/memtable/merge_tree/shard_builder.rs b/src/mito2/src/memtable/merge_tree/shard_builder.rs index c8d78029043c..f9a32a17a563 100644 --- a/src/mito2/src/memtable/merge_tree/shard_builder.rs +++ b/src/mito2/src/memtable/merge_tree/shard_builder.rs @@ -14,13 +14,20 @@ //! Builder of a shard. +use std::collections::HashSet; +use std::sync::Arc; + +use common_recordbatch::filter::SimpleFilterEvaluator; +use store_api::metadata::RegionMetadataRef; +use store_api::storage::ColumnId; + use crate::error::Result; use crate::memtable::key_values::KeyValue; -use crate::memtable::merge_tree::data::DataBuffer; +use crate::memtable::merge_tree::data::{DataBuffer, DataParts, DATA_INIT_CAP}; use crate::memtable::merge_tree::dict::KeyDictBuilder; use crate::memtable::merge_tree::metrics::WriteMetrics; use crate::memtable::merge_tree::shard::Shard; -use crate::memtable::merge_tree::ShardId; +use crate::memtable::merge_tree::{MergeTreeConfig, ShardId}; /// Builder to write keys and data to a shard that the key dictionary /// is still active. @@ -29,43 +36,155 @@ pub struct ShardBuilder { dict_builder: KeyDictBuilder, /// Buffer to store data. data_buffer: DataBuffer, - /// Max keys in an index shard. - index_max_keys_per_shard: usize, /// Number of rows to freeze a data part. data_freeze_threshold: usize, } impl ShardBuilder { - /// Write a key value with its encoded primary key. - pub fn write_with_key( - &mut self, - _key: &[u8], - _key_value: KeyValue, - _metrics: &mut WriteMetrics, - ) -> Result<()> { - unimplemented!() + /// Returns a new builder. + pub fn new(metadata: RegionMetadataRef, config: &MergeTreeConfig) -> ShardBuilder { + ShardBuilder { + dict_builder: KeyDictBuilder::new(config.index_max_keys_per_shard), + data_buffer: DataBuffer::with_capacity(metadata, DATA_INIT_CAP), + data_freeze_threshold: config.data_freeze_threshold, + } } - /// Returns true if the builder is empty. - pub fn is_empty(&self) -> bool { - unimplemented!() + /// Write a key value with its encoded primary key. + pub fn write_with_key(&mut self, key: &[u8], key_value: KeyValue, metrics: &mut WriteMetrics) { + // Safety: we check whether the builder need to freeze before. + let pk_index = self.dict_builder.insert_key(key, metrics); + self.data_buffer.write_row(pk_index, key_value); } /// Returns true if the builder need to freeze. pub fn should_freeze(&self) -> bool { - unimplemented!() + self.dict_builder.is_full() || self.data_buffer.num_rows() == self.data_freeze_threshold } /// Builds a new shard and resets the builder. - pub fn finish(&mut self, _shard_id: ShardId) -> Result<Shard> { - unimplemented!() + /// + /// Returns `None` if the builder is empty. + pub fn finish( + &mut self, + shard_id: ShardId, + metadata: RegionMetadataRef, + ) -> Result<Option<Shard>> { + if self.data_buffer.is_empty() { + return Ok(None); + } + + let key_dict = self.dict_builder.finish(); + let data_part = match &key_dict { + Some(dict) => { + let pk_weights = dict.pk_weights_to_sort_data(); + self.data_buffer.freeze(&pk_weights)? + } + None => { + let pk_weights = [0]; + self.data_buffer.freeze(&pk_weights)? + } + }; + + // build data parts. + let data_parts = DataParts::new(metadata, DATA_INIT_CAP).with_frozen(vec![data_part]); + let key_dict = key_dict.map(Arc::new); + + Ok(Some(Shard::new(shard_id, key_dict, data_parts))) } - /// Scans the shard builder - pub fn scan(&mut self, _shard_id: ShardId) -> Result<ShardBuilderReader> { + /// Scans the shard builder. + pub fn scan( + &mut self, + _projection: &HashSet<ColumnId>, + _filters: &[SimpleFilterEvaluator], + ) -> Result<ShardBuilderReader> { unimplemented!() } } -/// Reader to scan a shard. builder. +/// Reader to scan a shard builder. pub struct ShardBuilderReader {} + +// TODO(yingwen): Can we use generic for data reader? + +#[cfg(test)] +mod tests { + use std::sync::Arc; + + use super::*; + use crate::memtable::merge_tree::dict::KeyDictBuilder; + use crate::memtable::merge_tree::metrics::WriteMetrics; + use crate::memtable::KeyValues; + use crate::test_util::memtable_util::{ + build_key_values_with_ts_seq_values, encode_key_by_kv, encode_keys, metadata_for_test, + }; + + fn input_with_key(metadata: &RegionMetadataRef) -> Vec<KeyValues> { + vec![ + build_key_values_with_ts_seq_values( + metadata, + "shard_builder".to_string(), + 3, + [30, 31].into_iter(), + [Some(0.0), Some(1.0)].into_iter(), + 0, + ), + build_key_values_with_ts_seq_values( + metadata, + "shard_builder".to_string(), + 1, + [10, 11].into_iter(), + [Some(0.0), Some(1.0)].into_iter(), + 1, + ), + build_key_values_with_ts_seq_values( + metadata, + "shard_builder".to_string(), + 2, + [20, 21].into_iter(), + [Some(0.0), Some(1.0)].into_iter(), + 2, + ), + ] + } + + fn new_shard_builder( + shard_id: ShardId, + metadata: RegionMetadataRef, + input: &[KeyValues], + ) -> Shard { + let mut dict_builder = KeyDictBuilder::new(1024); + let mut metrics = WriteMetrics::default(); + let mut keys = Vec::with_capacity(input.len()); + for kvs in input { + encode_keys(&metadata, kvs, &mut keys); + } + for key in &keys { + dict_builder.insert_key(key, &mut metrics); + } + + let dict = dict_builder.finish().unwrap(); + let data_parts = DataParts::new(metadata, DATA_INIT_CAP); + + Shard::new(shard_id, Some(Arc::new(dict)), data_parts) + } + + #[test] + fn test_write_shard_builder() { + let metadata = metadata_for_test(); + let input = input_with_key(&metadata); + let config = MergeTreeConfig::default(); + let mut shard_builder = ShardBuilder::new(metadata.clone(), &config); + let mut metrics = WriteMetrics::default(); + assert!(shard_builder.finish(1, metadata.clone()).unwrap().is_none()); + + for key_values in &input { + for kv in key_values.iter() { + let key = encode_key_by_kv(&kv); + shard_builder.write_with_key(&key, kv, &mut metrics); + } + } + shard_builder.finish(1, metadata).unwrap().unwrap(); + } +} diff --git a/src/mito2/src/memtable/merge_tree/tree.rs b/src/mito2/src/memtable/merge_tree/tree.rs index d9c26611f362..4ae7d197b2e7 100644 --- a/src/mito2/src/memtable/merge_tree/tree.rs +++ b/src/mito2/src/memtable/merge_tree/tree.rs @@ -103,7 +103,7 @@ impl MergeTree { if !has_pk { // No primary key. - self.write_no_key(kv, metrics)?; + self.write_no_key(kv); continue; } @@ -202,7 +202,7 @@ impl MergeTree { } // Only fork partitions that have data. - let forked_part = part.fork(&metadata); + let forked_part = part.fork(&metadata, &self.config); forked.insert(*part_key, Arc::new(forked_part)); } @@ -236,11 +236,11 @@ impl MergeTree { partition.write_with_key(primary_key, key_value, metrics) } - fn write_no_key(&self, key_value: KeyValue, metrics: &mut WriteMetrics) -> Result<()> { + fn write_no_key(&self, key_value: KeyValue) { let partition_key = Partition::get_partition_key(&key_value, self.is_partitioned); let partition = self.get_or_create_partition(partition_key); - partition.write_no_key(key_value, metrics) + partition.write_no_key(key_value) } fn get_or_create_partition(&self, partition_key: PartitionKey) -> PartitionRef { diff --git a/src/mito2/src/test_util/memtable_util.rs b/src/mito2/src/test_util/memtable_util.rs index 7e761cad771a..584f21350719 100644 --- a/src/mito2/src/test_util/memtable_util.rs +++ b/src/mito2/src/test_util/memtable_util.rs @@ -22,15 +22,18 @@ use api::v1::value::ValueData; use api::v1::{Row, Rows, SemanticType}; use datatypes::data_type::ConcreteDataType; use datatypes::schema::ColumnSchema; +use datatypes::value::ValueRef; use store_api::metadata::{ColumnMetadata, RegionMetadataBuilder, RegionMetadataRef}; use store_api::storage::{ColumnId, RegionId, SequenceNumber}; use table::predicate::Predicate; use crate::error::Result; +use crate::memtable::key_values::KeyValue; use crate::memtable::{ BoxedBatchIterator, KeyValues, Memtable, MemtableBuilder, MemtableId, MemtableRef, MemtableStats, }; +use crate::row_converter::{McmpRowCodec, RowCodec, SortField}; /// Empty memtable for test. #[derive(Debug, Default)] @@ -93,14 +96,19 @@ impl MemtableBuilder for EmptyMemtableBuilder { /// /// The schema is `k0, k1, ts, v0, v1` and pk is `k0, k1`. pub(crate) fn metadata_for_test() -> RegionMetadataRef { - metadata_with_primary_key(vec![0, 1]) + metadata_with_primary_key(vec![0, 1], false) } /// Creates a region metadata to test memtable and specific primary key. /// -/// The schema is `k0, k1, ts, v0, v1`. -pub(crate) fn metadata_with_primary_key(primary_key: Vec<ColumnId>) -> RegionMetadataRef { +/// If `enable_table_id` is false, the schema is `k0, k1, ts, v0, v1`. +/// If `enable_table_id` is true, the schema is `k0, __table_id, ts, v0, v1`. +pub(crate) fn metadata_with_primary_key( + primary_key: Vec<ColumnId>, + enable_table_id: bool, +) -> RegionMetadataRef { let mut builder = RegionMetadataBuilder::new(RegionId::new(123, 456)); + let maybe_table_id = if enable_table_id { "table_id" } else { "k1" }; builder .push_column_metadata(ColumnMetadata { column_schema: ColumnSchema::new("k0", ConcreteDataType::string_datatype(), false), @@ -108,7 +116,11 @@ pub(crate) fn metadata_with_primary_key(primary_key: Vec<ColumnId>) -> RegionMet column_id: 0, }) .push_column_metadata(ColumnMetadata { - column_schema: ColumnSchema::new("k1", ConcreteDataType::int64_datatype(), false), + column_schema: ColumnSchema::new( + maybe_table_id, + ConcreteDataType::uint32_datatype(), + false, + ), semantic_type: semantic_type_of_column(1, &primary_key), column_id: 1, }) @@ -144,11 +156,31 @@ fn semantic_type_of_column(column_id: ColumnId, primary_key: &[ColumnId]) -> Sem } } +/// Builds key values with `len` rows for test. +pub(crate) fn build_key_values( + schema: &RegionMetadataRef, + k0: String, + k1: u32, + timestamps: &[i64], + sequence: SequenceNumber, +) -> KeyValues { + let values = timestamps.iter().map(|v| Some(*v as f64)); + + build_key_values_with_ts_seq_values( + schema, + k0, + k1, + timestamps.iter().copied(), + values, + sequence, + ) +} + /// Builds key values with timestamps (ms) and sequences for test. pub(crate) fn build_key_values_with_ts_seq_values( schema: &RegionMetadataRef, k0: String, - k1: i64, + k1: u32, timestamps: impl Iterator<Item = i64>, values: impl Iterator<Item = Option<f64>>, sequence: SequenceNumber, @@ -174,7 +206,7 @@ pub(crate) fn build_key_values_with_ts_seq_values( value_data: Some(ValueData::StringValue(k0.clone())), }, api::v1::Value { - value_data: Some(ValueData::I64Value(k1)), + value_data: Some(ValueData::U32Value(k1)), }, api::v1::Value { value_data: Some(ValueData::TimestampMillisecondValue(ts)), @@ -198,3 +230,40 @@ pub(crate) fn build_key_values_with_ts_seq_values( }; KeyValues::new(schema.as_ref(), mutation).unwrap() } + +/// Encode keys. +pub(crate) fn encode_keys( + metadata: &RegionMetadataRef, + key_values: &KeyValues, + keys: &mut Vec<Vec<u8>>, +) { + let row_codec = McmpRowCodec::new( + metadata + .primary_key_columns() + .map(|c| SortField::new(c.column_schema.data_type.clone())) + .collect(), + ); + for kv in key_values.iter() { + let key = row_codec.encode(kv.primary_keys()).unwrap(); + keys.push(key); + } +} + +/// Encode one key. +pub(crate) fn encode_key(k0: &str, k1: u32) -> Vec<u8> { + let row_codec = McmpRowCodec::new(vec![ + SortField::new(ConcreteDataType::string_datatype()), + SortField::new(ConcreteDataType::uint32_datatype()), + ]); + let key = [ValueRef::String(k0), ValueRef::UInt32(k1)]; + row_codec.encode(key.into_iter()).unwrap() +} + +/// Encode one key. +pub(crate) fn encode_key_by_kv(key_value: &KeyValue) -> Vec<u8> { + let row_codec = McmpRowCodec::new(vec![ + SortField::new(ConcreteDataType::string_datatype()), + SortField::new(ConcreteDataType::uint32_datatype()), + ]); + row_codec.encode(key_value.primary_keys()).unwrap() +}
feat
Implement write and fork for the new memtable (#3357)
ec59ce5c9a938da15ed0df6da266ef2885ad1699
2024-08-16 08:59:03
LFC
feat: able to handle concurrent region edit requests (#4569)
false
diff --git a/src/mito2/src/engine.rs b/src/mito2/src/engine.rs index d7f2ea034d75..62912b8ffb10 100644 --- a/src/mito2/src/engine.rs +++ b/src/mito2/src/engine.rs @@ -33,6 +33,8 @@ mod create_test; #[cfg(test)] mod drop_test; #[cfg(test)] +mod edit_region_test; +#[cfg(test)] mod filter_deleted_test; #[cfg(test)] mod flush_test; @@ -88,7 +90,7 @@ use crate::manifest::action::RegionEdit; use crate::metrics::HANDLE_REQUEST_ELAPSED; use crate::read::scan_region::{ScanParallism, ScanRegion, Scanner}; use crate::region::RegionUsage; -use crate::request::WorkerRequest; +use crate::request::{RegionEditRequest, WorkerRequest}; use crate::wal::entry_distributor::{ build_wal_entry_distributor_and_receivers, DEFAULT_ENTRY_RECEIVER_BUFFER_SIZE, }; @@ -196,11 +198,11 @@ impl MitoEngine { ); let (tx, rx) = oneshot::channel(); - let request = WorkerRequest::EditRegion { + let request = WorkerRequest::EditRegion(RegionEditRequest { region_id, edit, tx, - }; + }); self.inner .workers .submit_to_worker(region_id, request) diff --git a/src/mito2/src/engine/edit_region_test.rs b/src/mito2/src/engine/edit_region_test.rs new file mode 100644 index 000000000000..8dd682a37269 --- /dev/null +++ b/src/mito2/src/engine/edit_region_test.rs @@ -0,0 +1,120 @@ +// Copyright 2023 Greptime Team +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::sync::Arc; + +use object_store::ObjectStore; +use store_api::region_engine::RegionEngine; +use store_api::region_request::RegionRequest; +use store_api::storage::RegionId; +use tokio::sync::Barrier; + +use crate::config::MitoConfig; +use crate::engine::MitoEngine; +use crate::manifest::action::RegionEdit; +use crate::region::MitoRegionRef; +use crate::sst::file::{FileId, FileMeta}; +use crate::test_util::{CreateRequestBuilder, TestEnv}; + +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn test_edit_region_concurrently() { + const EDITS_PER_TASK: usize = 10; + let tasks_count = 10; + + // A task that creates SST files and edits the region with them. + struct Task { + region: MitoRegionRef, + ssts: Vec<FileMeta>, + } + + impl Task { + async fn create_ssts(&mut self, object_store: &ObjectStore) { + for _ in 0..EDITS_PER_TASK { + let file = FileMeta { + region_id: self.region.region_id, + file_id: FileId::random(), + level: 0, + ..Default::default() + }; + object_store + .write( + &format!("{}/{}.parquet", self.region.region_dir(), file.file_id), + b"x".as_slice(), + ) + .await + .unwrap(); + self.ssts.push(file); + } + } + + async fn edit_region(self, engine: MitoEngine) { + for sst in self.ssts { + let edit = RegionEdit { + files_to_add: vec![sst], + files_to_remove: vec![], + compaction_time_window: None, + flushed_entry_id: None, + flushed_sequence: None, + }; + engine + .edit_region(self.region.region_id, edit) + .await + .unwrap(); + } + } + } + + let mut env = TestEnv::new(); + let engine = env.create_engine(MitoConfig::default()).await; + + let region_id = RegionId::new(1, 1); + engine + .handle_request( + region_id, + RegionRequest::Create(CreateRequestBuilder::new().build()), + ) + .await + .unwrap(); + let region = engine.get_region(region_id).unwrap(); + + let mut tasks = Vec::with_capacity(tasks_count); + let object_store = env.get_object_store().unwrap(); + for _ in 0..tasks_count { + let mut task = Task { + region: region.clone(), + ssts: Vec::new(), + }; + task.create_ssts(&object_store).await; + tasks.push(task); + } + + let mut futures = Vec::with_capacity(tasks_count); + let barrier = Arc::new(Barrier::new(tasks_count)); + for task in tasks { + futures.push(tokio::spawn({ + let barrier = barrier.clone(); + let engine = engine.clone(); + async move { + barrier.wait().await; + task.edit_region(engine).await; + } + })); + } + futures::future::join_all(futures).await; + + assert_eq!( + region.version().ssts.levels()[0].files.len(), + tasks_count * EDITS_PER_TASK + ); +} diff --git a/src/mito2/src/error.rs b/src/mito2/src/error.rs index 140966ac336f..2e5826df4dc6 100644 --- a/src/mito2/src/error.rs +++ b/src/mito2/src/error.rs @@ -842,6 +842,13 @@ pub enum Error { #[snafu(implicit)] location: Location, }, + + #[snafu(display("Region {} is busy", region_id))] + RegionBusy { + region_id: RegionId, + #[snafu(implicit)] + location: Location, + }, } pub type Result<T, E = Error> = std::result::Result<T, E>; @@ -973,6 +980,7 @@ impl ErrorExt for Error { | FulltextFinish { source, .. } | ApplyFulltextIndex { source, .. } => source.status_code(), DecodeStats { .. } | StatsNotPresent { .. } => StatusCode::Internal, + RegionBusy { .. } => StatusCode::RegionBusy, } } diff --git a/src/mito2/src/request.rs b/src/mito2/src/request.rs index 780c85b1d1e0..d88bc994e97e 100644 --- a/src/mito2/src/request.rs +++ b/src/mito2/src/request.rs @@ -494,11 +494,7 @@ pub(crate) enum WorkerRequest { Stop, /// Use [RegionEdit] to edit a region directly. - EditRegion { - region_id: RegionId, - edit: RegionEdit, - tx: Sender<Result<()>>, - }, + EditRegion(RegionEditRequest), } impl WorkerRequest { @@ -762,6 +758,15 @@ pub(crate) struct RegionChangeResult { pub(crate) result: Result<()>, } +/// Request to edit a region directly. +#[derive(Debug)] +pub(crate) struct RegionEditRequest { + pub(crate) region_id: RegionId, + pub(crate) edit: RegionEdit, + /// The sender to notify the result to the region engine. + pub(crate) tx: Sender<Result<()>>, +} + /// Notifies the regin the result of editing region. #[derive(Debug)] pub(crate) struct RegionEditResult { diff --git a/src/mito2/src/worker.rs b/src/mito2/src/worker.rs index 82b48bcebb29..3aff7764f082 100644 --- a/src/mito2/src/worker.rs +++ b/src/mito2/src/worker.rs @@ -61,6 +61,7 @@ use crate::sst::index::intermediate::IntermediateManager; use crate::sst::index::puffin_manager::PuffinManagerFactory; use crate::time_provider::{StdTimeProvider, TimeProviderRef}; use crate::wal::Wal; +use crate::worker::handle_manifest::RegionEditQueues; /// Identifier for a worker. pub(crate) type WorkerId = u32; @@ -441,6 +442,7 @@ impl<S: LogStore> WorkerStarter<S> { flush_receiver: self.flush_receiver, stalled_count: WRITE_STALL_TOTAL.with_label_values(&[&id_string]), region_count: REGION_COUNT.with_label_values(&[&id_string]), + region_edit_queues: RegionEditQueues::default(), }; let handle = common_runtime::spawn_global(async move { worker_thread.run().await; @@ -629,6 +631,8 @@ struct RegionWorkerLoop<S> { stalled_count: IntGauge, /// Gauge of regions in the worker. region_count: IntGauge, + /// Queues for region edit requests. + region_edit_queues: RegionEditQueues, } impl<S: LogStore> RegionWorkerLoop<S> { @@ -727,12 +731,8 @@ impl<S: LogStore> RegionWorkerLoop<S> { WorkerRequest::SetReadonlyGracefully { region_id, sender } => { self.set_readonly_gracefully(region_id, sender).await; } - WorkerRequest::EditRegion { - region_id, - edit, - tx, - } => { - self.handle_region_edit(region_id, edit, tx).await; + WorkerRequest::EditRegion(request) => { + self.handle_region_edit(request).await; } // We receive a stop signal, but we still want to process remaining // requests. The worker thread will then check the running flag and @@ -824,7 +824,7 @@ impl<S: LogStore> RegionWorkerLoop<S> { BackgroundNotify::CompactionFailed(req) => self.handle_compaction_failure(req).await, BackgroundNotify::Truncate(req) => self.handle_truncate_result(req).await, BackgroundNotify::RegionChange(req) => self.handle_manifest_region_change_result(req), - BackgroundNotify::RegionEdit(req) => self.handle_region_edit_result(req), + BackgroundNotify::RegionEdit(req) => self.handle_region_edit_result(req).await, } } diff --git a/src/mito2/src/worker/handle_manifest.rs b/src/mito2/src/worker/handle_manifest.rs index e12f139b5b71..4ca2fc9c9fcb 100644 --- a/src/mito2/src/worker/handle_manifest.rs +++ b/src/mito2/src/worker/handle_manifest.rs @@ -16,38 +16,89 @@ //! //! It updates the manifest and applies the changes to the region in background. +use std::collections::{HashMap, VecDeque}; + use common_telemetry::{info, warn}; use snafu::ensure; use store_api::storage::RegionId; -use tokio::sync::oneshot::Sender; -use crate::error::{InvalidRequestSnafu, RegionNotFoundSnafu, Result}; +use crate::error::{InvalidRequestSnafu, RegionBusySnafu, RegionNotFoundSnafu, Result}; use crate::manifest::action::{ RegionChange, RegionEdit, RegionMetaAction, RegionMetaActionList, RegionTruncate, }; use crate::region::{MitoRegionRef, RegionState}; use crate::request::{ - BackgroundNotify, OptionOutputTx, RegionChangeResult, RegionEditResult, TruncateResult, - WorkerRequest, + BackgroundNotify, OptionOutputTx, RegionChangeResult, RegionEditRequest, RegionEditResult, + TruncateResult, WorkerRequest, }; use crate::worker::RegionWorkerLoop; +pub(crate) type RegionEditQueues = HashMap<RegionId, RegionEditQueue>; + +/// A queue for temporary store region edit requests, if the region is in the "Editing" state. +/// When the current region edit request is completed, the next (if there exists) request in the +/// queue will be processed. +/// Everything is done in the region worker loop. +pub(crate) struct RegionEditQueue { + region_id: RegionId, + requests: VecDeque<RegionEditRequest>, +} + +impl RegionEditQueue { + const QUEUE_MAX_LEN: usize = 128; + + fn new(region_id: RegionId) -> Self { + Self { + region_id, + requests: VecDeque::new(), + } + } + + fn enqueue(&mut self, request: RegionEditRequest) { + if self.requests.len() > Self::QUEUE_MAX_LEN { + let _ = request.tx.send( + RegionBusySnafu { + region_id: self.region_id, + } + .fail(), + ); + return; + }; + self.requests.push_back(request); + } + + fn dequeue(&mut self) -> Option<RegionEditRequest> { + self.requests.pop_front() + } +} + impl<S> RegionWorkerLoop<S> { /// Handles region edit request. - pub(crate) async fn handle_region_edit( - &self, - region_id: RegionId, - edit: RegionEdit, - sender: Sender<Result<()>>, - ) { - let region = match self.regions.writable_region(region_id) { - Ok(region) => region, - Err(e) => { - let _ = sender.send(Err(e)); - return; - } + pub(crate) async fn handle_region_edit(&mut self, request: RegionEditRequest) { + let region_id = request.region_id; + let Some(region) = self.regions.get_region(region_id) else { + let _ = request.tx.send(RegionNotFoundSnafu { region_id }.fail()); + return; }; + if !region.is_writable() { + if region.state() == RegionState::Editing { + self.region_edit_queues + .entry(region_id) + .or_insert_with(|| RegionEditQueue::new(region_id)) + .enqueue(request); + } else { + let _ = request.tx.send(RegionBusySnafu { region_id }.fail()); + } + return; + } + + let RegionEditRequest { + region_id: _, + edit, + tx: sender, + } = request; + // Marks the region as editing. if let Err(e) = region.set_editing() { let _ = sender.send(Err(e)); @@ -79,7 +130,7 @@ impl<S> RegionWorkerLoop<S> { } /// Handles region edit result. - pub(crate) fn handle_region_edit_result(&self, edit_result: RegionEditResult) { + pub(crate) async fn handle_region_edit_result(&mut self, edit_result: RegionEditResult) { let region = match self.regions.get_region(edit_result.region_id) { Some(region) => region, None => { @@ -104,6 +155,12 @@ impl<S> RegionWorkerLoop<S> { region.switch_state_to_writable(RegionState::Editing); let _ = edit_result.sender.send(edit_result.result); + + if let Some(edit_queue) = self.region_edit_queues.get_mut(&edit_result.region_id) { + if let Some(request) = edit_queue.dequeue() { + self.handle_region_edit(request).await; + } + } } /// Writes truncate action to the manifest and then applies it to the region in background.
feat
able to handle concurrent region edit requests (#4569)
c8cde704cf353bcde47c97937fdcbe9229d52438
2023-08-15 16:19:22
shuiyisong
chore: minor `auth` crate change (#2176)
false
diff --git a/src/auth/src/common.rs b/src/auth/src/common.rs index 8278ae53dc0b..d49f4883b7b8 100644 --- a/src/auth/src/common.rs +++ b/src/auth/src/common.rs @@ -14,10 +14,12 @@ use std::sync::Arc; +use digest::Digest; use secrecy::SecretString; -use snafu::OptionExt; +use sha1::Sha1; +use snafu::{ensure, OptionExt}; -use crate::error::{InvalidConfigSnafu, Result}; +use crate::error::{IllegalParamSnafu, InvalidConfigSnafu, Result, UserPasswordMismatchSnafu}; use crate::user_info::DefaultUserInfo; use crate::user_provider::static_user_provider::{StaticUserProvider, STATIC_USER_PROVIDER}; use crate::{UserInfoRef, UserProviderRef}; @@ -66,3 +68,80 @@ pub enum Password<'a> { MysqlNativePassword(HashedPassword<'a>, Salt<'a>), PgMD5(HashedPassword<'a>, Salt<'a>), } + +pub fn auth_mysql( + auth_data: HashedPassword, + salt: Salt, + username: &str, + save_pwd: &[u8], +) -> Result<()> { + ensure!( + auth_data.len() == 20, + IllegalParamSnafu { + msg: "Illegal mysql password length" + } + ); + // ref: https://github.com/mysql/mysql-server/blob/a246bad76b9271cb4333634e954040a970222e0a/sql/auth/password.cc#L62 + let hash_stage_2 = double_sha1(save_pwd); + let tmp = sha1_two(salt, &hash_stage_2); + // xor auth_data and tmp + let mut xor_result = [0u8; 20]; + for i in 0..20 { + xor_result[i] = auth_data[i] ^ tmp[i]; + } + let candidate_stage_2 = sha1_one(&xor_result); + if candidate_stage_2 == hash_stage_2 { + Ok(()) + } else { + UserPasswordMismatchSnafu { + username: username.to_string(), + } + .fail() + } +} + +fn sha1_two(input_1: &[u8], input_2: &[u8]) -> Vec<u8> { + let mut hasher = Sha1::new(); + hasher.update(input_1); + hasher.update(input_2); + hasher.finalize().to_vec() +} + +fn sha1_one(data: &[u8]) -> Vec<u8> { + let mut hasher = Sha1::new(); + hasher.update(data); + hasher.finalize().to_vec() +} + +fn double_sha1(data: &[u8]) -> Vec<u8> { + sha1_one(&sha1_one(data)) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_sha() { + let sha_1_answer: Vec<u8> = vec![ + 124, 74, 141, 9, 202, 55, 98, 175, 97, 229, 149, 32, 148, 61, 194, 100, 148, 248, 148, + 27, + ]; + let sha_1 = sha1_one("123456".as_bytes()); + assert_eq!(sha_1, sha_1_answer); + + let double_sha1_answer: Vec<u8> = vec![ + 107, 180, 131, 126, 183, 67, 41, 16, 94, 228, 86, 141, 218, 125, 198, 126, 210, 202, + 42, 217, + ]; + let double_sha1 = double_sha1("123456".as_bytes()); + assert_eq!(double_sha1, double_sha1_answer); + + let sha1_2_answer: Vec<u8> = vec![ + 132, 115, 215, 211, 99, 186, 164, 206, 168, 152, 217, 192, 117, 47, 240, 252, 142, 244, + 37, 204, + ]; + let sha1_2 = sha1_two("123456".as_bytes(), "654321".as_bytes()); + assert_eq!(sha1_2, sha1_2_answer); + } +} diff --git a/src/auth/src/error.rs b/src/auth/src/error.rs index 7acfb5726a33..f14ffbb19033 100644 --- a/src/auth/src/error.rs +++ b/src/auth/src/error.rs @@ -60,6 +60,9 @@ pub enum Error { schema: String, username: String, }, + + #[snafu(display("User is not authorized to perform this action"))] + PermissionDenied { location: Location }, } impl ErrorExt for Error { @@ -75,6 +78,7 @@ impl ErrorExt for Error { Error::UnsupportedPasswordType { .. } => StatusCode::UnsupportedPasswordType, Error::UserPasswordMismatch { .. } => StatusCode::UserPasswordMismatch, Error::AccessDenied { .. } => StatusCode::AccessDenied, + Error::PermissionDenied { .. } => StatusCode::PermissionDenied, } } diff --git a/src/auth/src/lib.rs b/src/auth/src/lib.rs index 8fa919b2a2b2..dadbdee02ce7 100644 --- a/src/auth/src/lib.rs +++ b/src/auth/src/lib.rs @@ -13,7 +13,7 @@ // limitations under the License. mod common; -mod error; +pub mod error; mod permission; mod user_info; mod user_provider; @@ -21,8 +21,9 @@ mod user_provider; #[cfg(feature = "testing")] pub mod tests; -pub use common::{user_provider_from_option, userinfo_by_name, HashedPassword, Identity, Password}; -pub use error::{Error, Result}; +pub use common::{ + auth_mysql, user_provider_from_option, userinfo_by_name, HashedPassword, Identity, Password, +}; pub use permission::{PermissionChecker, PermissionReq, PermissionResp}; pub use user_info::UserInfo; pub use user_provider::UserProvider; diff --git a/src/auth/src/permission.rs b/src/auth/src/permission.rs index 64734a3c5ae9..5ebc0ab20b94 100644 --- a/src/auth/src/permission.rs +++ b/src/auth/src/permission.rs @@ -17,7 +17,7 @@ use std::fmt::Debug; use api::v1::greptime_request::Request; use sql::statements::statement::Statement; -use crate::error::Result; +use crate::error::{PermissionDeniedSnafu, Result}; use crate::{PermissionCheckerRef, UserInfoRef}; #[derive(Debug, Clone)] @@ -53,7 +53,11 @@ impl PermissionChecker for Option<&PermissionCheckerRef> { req: PermissionReq, ) -> Result<PermissionResp> { match self { - Some(checker) => checker.check_permission(user_info, req), + Some(checker) => match checker.check_permission(user_info, req) { + Ok(PermissionResp::Reject) => PermissionDeniedSnafu.fail(), + Ok(PermissionResp::Allow) => Ok(PermissionResp::Allow), + Err(e) => Err(e), + }, None => Ok(PermissionResp::Allow), } } diff --git a/src/auth/src/tests.rs b/src/auth/src/tests.rs index 7cc1b5c7e45b..92574541a158 100644 --- a/src/auth/src/tests.rs +++ b/src/auth/src/tests.rs @@ -18,10 +18,7 @@ use crate::error::{ UserPasswordMismatchSnafu, }; use crate::user_info::DefaultUserInfo; -use crate::user_provider::static_user_provider::auth_mysql; -#[allow(unused_imports)] -use crate::Error; -use crate::{Identity, Password, UserInfoRef, UserProvider}; +use crate::{auth_mysql, Identity, Password, UserInfoRef, UserProvider}; pub struct DatabaseAuthInfo<'a> { pub catalog: &'a str, @@ -108,6 +105,8 @@ impl UserProvider for MockUserProvider { #[tokio::test] async fn test_auth_by_plain_text() { + use crate::error; + let user_provider = MockUserProvider::default(); assert_eq!("mock_user_provider", user_provider.name()); @@ -131,7 +130,7 @@ async fn test_auth_by_plain_text() { assert!(auth_result.is_err()); assert!(matches!( auth_result.err().unwrap(), - Error::UnsupportedPasswordType { .. } + error::Error::UnsupportedPasswordType { .. } )); // auth failed, err: user not exist. @@ -144,7 +143,7 @@ async fn test_auth_by_plain_text() { assert!(auth_result.is_err()); assert!(matches!( auth_result.err().unwrap(), - Error::UserNotFound { .. } + error::Error::UserNotFound { .. } )); // auth failed, err: wrong password @@ -157,7 +156,7 @@ async fn test_auth_by_plain_text() { assert!(auth_result.is_err()); assert!(matches!( auth_result.err().unwrap(), - Error::UserPasswordMismatch { .. } + error::Error::UserPasswordMismatch { .. } )) } diff --git a/src/auth/src/user_provider/static_user_provider.rs b/src/auth/src/user_provider/static_user_provider.rs index 5510b34fe06c..591a116b9281 100644 --- a/src/auth/src/user_provider/static_user_provider.rs +++ b/src/auth/src/user_provider/static_user_provider.rs @@ -19,18 +19,15 @@ use std::io::BufRead; use std::path::Path; use async_trait::async_trait; -use digest::Digest; use secrecy::ExposeSecret; -use sha1::Sha1; use snafu::{ensure, OptionExt, ResultExt}; -use crate::common::Salt; use crate::error::{ Error, IllegalParamSnafu, InvalidConfigSnafu, IoSnafu, Result, UnsupportedPasswordTypeSnafu, UserNotFoundSnafu, UserPasswordMismatchSnafu, }; use crate::user_info::DefaultUserInfo; -use crate::{HashedPassword, Identity, Password, UserInfoRef, UserProvider}; +use crate::{auth_mysql, Identity, Password, UserInfoRef, UserProvider}; pub(crate) const STATIC_USER_PROVIDER: &str = "static_user_provider"; @@ -136,12 +133,6 @@ impl UserProvider for StaticUserProvider { }; } Password::MysqlNativePassword(auth_data, salt) => { - ensure!( - auth_data.len() == 20, - IllegalParamSnafu { - msg: "Illegal MySQL native password format, length != 20" - } - ); auth_mysql(auth_data, salt, username, save_pwd) .map(|_| DefaultUserInfo::with_name(username)) } @@ -165,48 +156,6 @@ impl UserProvider for StaticUserProvider { } } -pub fn auth_mysql( - auth_data: HashedPassword, - salt: Salt, - username: &str, - save_pwd: &[u8], -) -> Result<()> { - // ref: https://github.com/mysql/mysql-server/blob/a246bad76b9271cb4333634e954040a970222e0a/sql/auth/password.cc#L62 - let hash_stage_2 = double_sha1(save_pwd); - let tmp = sha1_two(salt, &hash_stage_2); - // xor auth_data and tmp - let mut xor_result = [0u8; 20]; - for i in 0..20 { - xor_result[i] = auth_data[i] ^ tmp[i]; - } - let candidate_stage_2 = sha1_one(&xor_result); - if candidate_stage_2 == hash_stage_2 { - Ok(()) - } else { - UserPasswordMismatchSnafu { - username: username.to_string(), - } - .fail() - } -} - -fn sha1_two(input_1: &[u8], input_2: &[u8]) -> Vec<u8> { - let mut hasher = Sha1::new(); - hasher.update(input_1); - hasher.update(input_2); - hasher.finalize().to_vec() -} - -fn sha1_one(data: &[u8]) -> Vec<u8> { - let mut hasher = Sha1::new(); - hasher.update(data); - hasher.finalize().to_vec() -} - -fn double_sha1(data: &[u8]) -> Vec<u8> { - sha1_one(&sha1_one(data)) -} - #[cfg(test)] pub mod test { use std::fs::File; @@ -215,36 +164,10 @@ pub mod test { use common_test_util::temp_dir::create_temp_dir; use crate::user_info::DefaultUserInfo; - use crate::user_provider::static_user_provider::{ - double_sha1, sha1_one, sha1_two, StaticUserProvider, - }; + use crate::user_provider::static_user_provider::StaticUserProvider; use crate::user_provider::{Identity, Password}; use crate::UserProvider; - #[test] - fn test_sha() { - let sha_1_answer: Vec<u8> = vec![ - 124, 74, 141, 9, 202, 55, 98, 175, 97, 229, 149, 32, 148, 61, 194, 100, 148, 248, 148, - 27, - ]; - let sha_1 = sha1_one("123456".as_bytes()); - assert_eq!(sha_1, sha_1_answer); - - let double_sha1_answer: Vec<u8> = vec![ - 107, 180, 131, 126, 183, 67, 41, 16, 94, 228, 86, 141, 218, 125, 198, 126, 210, 202, - 42, 217, - ]; - let double_sha1 = double_sha1("123456".as_bytes()); - assert_eq!(double_sha1, double_sha1_answer); - - let sha1_2_answer: Vec<u8> = vec![ - 132, 115, 215, 211, 99, 186, 164, 206, 168, 152, 217, 192, 117, 47, 240, 252, 142, 244, - 37, 204, - ]; - let sha1_2 = sha1_two("123456".as_bytes(), "654321".as_bytes()); - assert_eq!(sha1_2, sha1_2_answer); - } - async fn test_authenticate(provider: &dyn UserProvider, username: &str, password: &str) { let re = provider .authenticate( diff --git a/src/auth/tests/mod.rs b/src/auth/tests/mod.rs index 3cc0cbe2e7d6..d89b8925d14c 100644 --- a/src/auth/tests/mod.rs +++ b/src/auth/tests/mod.rs @@ -17,7 +17,7 @@ use std::assert_matches::assert_matches; use std::sync::Arc; use api::v1::greptime_request::Request; -use auth::Error::InternalState; +use auth::error::Error::InternalState; use auth::{PermissionChecker, PermissionCheckerRef, PermissionReq, PermissionResp, UserInfoRef}; use sql::statements::show::{ShowDatabases, ShowKind}; use sql::statements::statement::Statement; @@ -29,7 +29,7 @@ impl PermissionChecker for DummyPermissionChecker { &self, _user_info: Option<UserInfoRef>, req: PermissionReq, - ) -> auth::Result<PermissionResp> { + ) -> auth::error::Result<PermissionResp> { match req { PermissionReq::GrpcRequest(_) => Ok(PermissionResp::Allow), PermissionReq::SqlStatement(_) => Ok(PermissionResp::Reject), diff --git a/src/cmd/src/error.rs b/src/cmd/src/error.rs index 3fe37a96f462..3c55361e40b1 100644 --- a/src/cmd/src/error.rs +++ b/src/cmd/src/error.rs @@ -80,7 +80,7 @@ pub enum Error { #[snafu(display("Illegal auth config: {}", source))] IllegalAuthConfig { location: Location, - source: auth::Error, + source: auth::error::Error, }, #[snafu(display("Unsupported selector type, {} source: {}", selector_type, source))] diff --git a/src/common/error/src/status_code.rs b/src/common/error/src/status_code.rs index 81f8daa15751..92ea598d04b5 100644 --- a/src/common/error/src/status_code.rs +++ b/src/common/error/src/status_code.rs @@ -84,6 +84,8 @@ pub enum StatusCode { InvalidAuthHeader = 7004, /// Illegal request to connect catalog-schema AccessDenied = 7005, + /// User is not authorized to perform the operation + PermissionDenied = 7006, // ====== End of auth related status code ===== } @@ -120,7 +122,8 @@ impl StatusCode { | StatusCode::UserPasswordMismatch | StatusCode::AuthHeaderNotFound | StatusCode::InvalidAuthHeader - | StatusCode::AccessDenied => false, + | StatusCode::AccessDenied + | StatusCode::PermissionDenied => false, } } @@ -151,7 +154,8 @@ impl StatusCode { | StatusCode::UserPasswordMismatch | StatusCode::AuthHeaderNotFound | StatusCode::InvalidAuthHeader - | StatusCode::AccessDenied => false, + | StatusCode::AccessDenied + | StatusCode::PermissionDenied => false, } } diff --git a/src/frontend/src/error.rs b/src/frontend/src/error.rs index e5c4d03c1a9c..07c393db32be 100644 --- a/src/frontend/src/error.rs +++ b/src/frontend/src/error.rs @@ -593,7 +593,7 @@ pub enum Error { #[snafu(display("Failed to pass permission check, source: {}", source))] Permission { - source: auth::Error, + source: auth::error::Error, location: Location, }, } diff --git a/src/servers/src/error.rs b/src/servers/src/error.rs index 116d6b353a88..ed930a092738 100644 --- a/src/servers/src/error.rs +++ b/src/servers/src/error.rs @@ -200,7 +200,7 @@ pub enum Error { #[snafu(display("Failed to get user info, source: {}", source))] Auth { location: Location, - source: auth::Error, + source: auth::error::Error, }, #[snafu(display("Not found http or grpc authorization header"))] @@ -451,7 +451,7 @@ fn status_to_tonic_code(status_code: StatusCode) -> Code { | StatusCode::UserPasswordMismatch | StatusCode::AuthHeaderNotFound | StatusCode::InvalidAuthHeader => Code::Unauthenticated, - StatusCode::AccessDenied => Code::PermissionDenied, + StatusCode::AccessDenied | StatusCode::PermissionDenied => Code::PermissionDenied, } } diff --git a/src/servers/src/mysql/handler.rs b/src/servers/src/mysql/handler.rs index 61668a8e01f3..0deebc02bb30 100644 --- a/src/servers/src/mysql/handler.rs +++ b/src/servers/src/mysql/handler.rs @@ -186,7 +186,8 @@ impl<W: AsyncWrite + Send + Sync + Unpin> AsyncMysqlShim<W> for MysqlInstanceShi } }; } - let user_info = user_info.unwrap_or_else(|| auth::userinfo_by_name(None)); + let user_info = + user_info.unwrap_or_else(|| auth::userinfo_by_name(Some(username.to_string()))); self.session.set_user_info(user_info);
chore
minor `auth` crate change (#2176)
b0cbfa7ffb6881413353d8eb28e1db7c61d72951
2022-11-30 18:55:27
xiaomin tang
docs: add a roadmap link in README (#673)
false
diff --git a/README.md b/README.md index 6f4f3398bcb6..c54ba3520248 100644 --- a/README.md +++ b/README.md @@ -56,7 +56,6 @@ To compile GreptimeDB from source, you'll need: find an installation instructions [here](https://grpc.io/docs/protoc-installation/). **Note that `protoc` version needs to be >= 3.15** because we have used the `optional` keyword. You can check it with `protoc --version`. - #### Build with Docker @@ -161,6 +160,8 @@ break things. Benchmark on development branch may not represent its potential performance. We release pre-built binaries constantly for functional evaluation. Do not use it in production at the moment. +For future plans, check out [GreptimeDB roadmap](https://github.com/GreptimeTeam/greptimedb/issues/669). + ## Community Our core team is thrilled too see you participate in any ways you like. When you are stuck, try to
docs
add a roadmap link in README (#673)
bf8c7170222ab873d0f0503bdb15d05b3acb85c2
2023-04-04 19:06:54
dennis zhuang
feat: try to do manifest checkpoint on opening region (#1321)
false
diff --git a/config/datanode.example.toml b/config/datanode.example.toml index cab3f174285c..ba1d04a1ac3e 100644 --- a/config/datanode.example.toml +++ b/config/datanode.example.toml @@ -49,6 +49,8 @@ max_purge_tasks = 32 checkpoint_margin = 10 # Region manifest logs and checkpoints gc execution duration gc_duration = '30s' +# Whether to try creating a manifest checkpoint on region opening +checkpoint_on_startup = false # Procedure storage options, see `standalone.example.toml`. # [procedure.store] diff --git a/config/standalone.example.toml b/config/standalone.example.toml index e06ca04a047f..35531b3b340e 100644 --- a/config/standalone.example.toml +++ b/config/standalone.example.toml @@ -114,7 +114,8 @@ max_purge_tasks = 32 checkpoint_margin = 10 # Region manifest logs and checkpoints gc execution duration gc_duration = '30s' - +# Whether to try creating a manifest checkpoint on region opening +checkpoint_on_startup = false # Procedure storage options. # Uncomment to enable. diff --git a/src/cmd/src/datanode.rs b/src/cmd/src/datanode.rs index 5aba79f81d86..796bbe75a07d 100644 --- a/src/cmd/src/datanode.rs +++ b/src/cmd/src/datanode.rs @@ -224,6 +224,7 @@ mod tests { [storage.manifest] checkpoint_margin = 9 gc_duration = '7s' + checkpoint_on_startup = true "#; write!(file, "{}", toml_str).unwrap(); @@ -275,6 +276,7 @@ mod tests { RegionManifestConfig { checkpoint_margin: Some(9), gc_duration: Some(Duration::from_secs(7)), + checkpoint_on_startup: true, }, options.storage.manifest, ); diff --git a/src/datanode/src/datanode.rs b/src/datanode/src/datanode.rs index 0883f7d4baf8..4b0eb3eff30e 100644 --- a/src/datanode/src/datanode.rs +++ b/src/datanode/src/datanode.rs @@ -129,6 +129,8 @@ pub struct RegionManifestConfig { /// Region manifest logs and checkpoints gc task execution duration. #[serde(with = "humantime_serde")] pub gc_duration: Option<Duration>, + /// Whether to try creating a manifest checkpoint on region opening + pub checkpoint_on_startup: bool, } impl Default for RegionManifestConfig { @@ -136,6 +138,7 @@ impl Default for RegionManifestConfig { Self { checkpoint_margin: Some(10u16), gc_duration: Some(Duration::from_secs(30)), + checkpoint_on_startup: false, } } } @@ -176,6 +179,7 @@ impl From<&DatanodeOptions> for SchedulerConfig { impl From<&DatanodeOptions> for StorageEngineConfig { fn from(value: &DatanodeOptions) -> Self { Self { + manifest_checkpoint_on_startup: value.storage.manifest.checkpoint_on_startup, manifest_checkpoint_margin: value.storage.manifest.checkpoint_margin, manifest_gc_duration: value.storage.manifest.gc_duration, max_files_in_l0: value.storage.compaction.max_files_in_level0, diff --git a/src/storage/src/config.rs b/src/storage/src/config.rs index 1e74826d6d4a..bffcc4ffbaff 100644 --- a/src/storage/src/config.rs +++ b/src/storage/src/config.rs @@ -20,6 +20,7 @@ use common_base::readable_size::ReadableSize; #[derive(Debug, Clone)] pub struct EngineConfig { + pub manifest_checkpoint_on_startup: bool, pub manifest_checkpoint_margin: Option<u16>, pub manifest_gc_duration: Option<Duration>, pub max_files_in_l0: usize, @@ -30,6 +31,7 @@ pub struct EngineConfig { impl Default for EngineConfig { fn default() -> Self { Self { + manifest_checkpoint_on_startup: false, manifest_checkpoint_margin: Some(10), manifest_gc_duration: Some(Duration::from_secs(30)), max_files_in_l0: 8, diff --git a/src/storage/src/manifest/impl_.rs b/src/storage/src/manifest/impl_.rs index b320dab5b14c..052965419d0e 100644 --- a/src/storage/src/manifest/impl_.rs +++ b/src/storage/src/manifest/impl_.rs @@ -81,11 +81,15 @@ impl<S: 'static + Checkpoint<Error = Error>, M: 'static + MetaAction<Error = Err Self::new(manifest_dir, object_store, None, None, None) } - pub fn checkpointer(&self) -> &Option<Arc<dyn Checkpointer<Checkpoint = S, MetaAction = M>>> { + #[inline] + pub(crate) fn checkpointer( + &self, + ) -> &Option<Arc<dyn Checkpointer<Checkpoint = S, MetaAction = M>>> { &self.checkpointer } - pub fn set_last_checkpoint_version(&self, version: ManifestVersion) { + #[inline] + pub(crate) fn set_last_checkpoint_version(&self, version: ManifestVersion) { self.last_checkpoint_version .store(version, Ordering::Relaxed); } @@ -95,7 +99,7 @@ impl<S: 'static + Checkpoint<Error = Error>, M: 'static + MetaAction<Error = Err self.inner.update_state(version, protocol); } - pub async fn save_checkpoint(&self, checkpoint: &RegionCheckpoint) -> Result<()> { + pub(crate) async fn save_checkpoint(&self, checkpoint: &RegionCheckpoint) -> Result<()> { ensure!( checkpoint .protocol @@ -111,8 +115,19 @@ impl<S: 'static + Checkpoint<Error = Error>, M: 'static + MetaAction<Error = Err .await } + pub(crate) async fn may_do_checkpoint(&self, version: ManifestVersion) -> Result<()> { + if version - self.last_checkpoint_version.load(Ordering::Relaxed) + >= self.checkpoint_actions_margin as u64 + { + let s = self.do_checkpoint().await?; + debug!("Manifest checkpoint, checkpoint: {:#?}", s); + } + + Ok(()) + } + #[inline] - pub fn manifest_store(&self) -> &Arc<ManifestObjectStore> { + pub(crate) fn manifest_store(&self) -> &Arc<ManifestObjectStore> { self.inner.manifest_store() } } @@ -128,12 +143,8 @@ impl<S: 'static + Checkpoint<Error = Error>, M: 'static + MetaAction<Error = Err async fn update(&self, action_list: M) -> Result<ManifestVersion> { let version = self.inner.save(action_list).await?; - if version - self.last_checkpoint_version.load(Ordering::Relaxed) - >= self.checkpoint_actions_margin as u64 - { - let s = self.do_checkpoint().await?; - debug!("Manifest checkpoint, checkpoint: {:#?}", s); - } + + self.may_do_checkpoint(version).await?; Ok(version) } diff --git a/src/storage/src/region.rs b/src/storage/src/region.rs index 38bbb74536c7..f553bbfd03b0 100644 --- a/src/storage/src/region.rs +++ b/src/storage/src/region.rs @@ -328,6 +328,12 @@ impl<S: LogStore> RegionImpl<S> { .replay(recovered_metadata_after_flushed, writer_ctx) .await?; + // Try to do a manifest checkpoint on opening + if store_config.engine_config.manifest_checkpoint_on_startup { + let manifest = &store_config.manifest; + manifest.may_do_checkpoint(manifest.last_version()).await?; + } + let inner = Arc::new(RegionInner { shared, writer,
feat
try to do manifest checkpoint on opening region (#1321)
42bf7e99655bf842a08c657d1d601c0a8a9f41f2
2024-11-14 19:07:02
Lin Yihai
refactor: Avoid wrapping Option for CacheManagerRef (#4996)
false
diff --git a/src/mito2/src/cache/write_cache.rs b/src/mito2/src/cache/write_cache.rs index 822ff4a29e1b..2bea36ad3d0c 100644 --- a/src/mito2/src/cache/write_cache.rs +++ b/src/mito2/src/cache/write_cache.rs @@ -501,7 +501,7 @@ mod tests { // Read metadata from write cache let builder = ParquetReaderBuilder::new(data_home, handle.clone(), mock_store.clone()) - .cache(Some(cache_manager.clone())); + .cache(cache_manager.clone()); let reader = builder.build().await.unwrap(); // Check parquet metadata diff --git a/src/mito2/src/compaction.rs b/src/mito2/src/compaction.rs index 2c2a8f092af8..57337d8f74ae 100644 --- a/src/mito2/src/compaction.rs +++ b/src/mito2/src/compaction.rs @@ -562,7 +562,7 @@ pub struct SerializedCompactionOutput { struct CompactionSstReaderBuilder<'a> { metadata: RegionMetadataRef, sst_layer: AccessLayerRef, - cache: Option<CacheManagerRef>, + cache: CacheManagerRef, inputs: &'a [FileHandle], append_mode: bool, filter_deleted: bool, diff --git a/src/mito2/src/compaction/compactor.rs b/src/mito2/src/compaction/compactor.rs index bf197690cf3d..3e0228a4b2a4 100644 --- a/src/mito2/src/compaction/compactor.rs +++ b/src/mito2/src/compaction/compactor.rs @@ -295,7 +295,7 @@ impl Compactor for DefaultCompactor { let reader = CompactionSstReaderBuilder { metadata: region_metadata.clone(), sst_layer: sst_layer.clone(), - cache: Some(cache_manager.clone()), + cache: cache_manager.clone(), inputs: &output.inputs, append_mode, filter_deleted: output.filter_deleted, diff --git a/src/mito2/src/engine.rs b/src/mito2/src/engine.rs index bf9777efa5f5..c60b7c4107ed 100644 --- a/src/mito2/src/engine.rs +++ b/src/mito2/src/engine.rs @@ -438,16 +438,12 @@ impl EngineInner { channel_size: self.config.parallel_scan_channel_size, }; - let scan_region = ScanRegion::new( - version, - region.access_layer.clone(), - request, - Some(cache_manager), - ) - .with_parallelism(scan_parallelism) - .with_ignore_inverted_index(self.config.inverted_index.apply_on_query.disabled()) - .with_ignore_fulltext_index(self.config.fulltext_index.apply_on_query.disabled()) - .with_start_time(query_start); + let scan_region = + ScanRegion::new(version, region.access_layer.clone(), request, cache_manager) + .with_parallelism(scan_parallelism) + .with_ignore_inverted_index(self.config.inverted_index.apply_on_query.disabled()) + .with_ignore_fulltext_index(self.config.fulltext_index.apply_on_query.disabled()) + .with_start_time(query_start); Ok(scan_region) } diff --git a/src/mito2/src/read/last_row.rs b/src/mito2/src/read/last_row.rs index f40172c21d3b..d97e35ac08b9 100644 --- a/src/mito2/src/read/last_row.rs +++ b/src/mito2/src/read/last_row.rs @@ -85,7 +85,7 @@ impl RowGroupLastRowCachedReader { pub(crate) fn new( file_id: FileId, row_group_idx: usize, - cache_manager: Option<CacheManagerRef>, + cache_manager: CacheManagerRef, row_group_reader: RowGroupReader, ) -> Self { let key = SelectorResultKey { @@ -94,9 +94,6 @@ impl RowGroupLastRowCachedReader { selector: TimeSeriesRowSelector::LastRow, }; - let Some(cache_manager) = cache_manager else { - return Self::new_miss(key, row_group_reader, None); - }; if let Some(value) = cache_manager.get_selector_result(&key) { let schema_matches = value.projection == row_group_reader diff --git a/src/mito2/src/read/projection.rs b/src/mito2/src/read/projection.rs index 9ba5f6eccf1e..78866f0c1ba0 100644 --- a/src/mito2/src/read/projection.rs +++ b/src/mito2/src/read/projection.rs @@ -171,7 +171,7 @@ impl ProjectionMapper { pub(crate) fn convert( &self, batch: &Batch, - cache_manager: Option<&CacheManager>, + cache_manager: &CacheManager, ) -> common_recordbatch::error::Result<RecordBatch> { debug_assert_eq!(self.batch_fields.len(), batch.fields().len()); debug_assert!(self @@ -204,15 +204,12 @@ impl ProjectionMapper { match index { BatchIndex::Tag(idx) => { let value = &pk_values[*idx]; - let vector = match cache_manager { - Some(cache) => repeated_vector_with_cache( - &column_schema.data_type, - value, - num_rows, - cache, - )?, - None => new_repeated_vector(&column_schema.data_type, value, num_rows)?, - }; + let vector = repeated_vector_with_cache( + &column_schema.data_type, + value, + num_rows, + cache_manager, + )?; columns.push(vector); } BatchIndex::Timestamp => { @@ -360,7 +357,7 @@ mod tests { // With vector cache. let cache = CacheManager::builder().vector_cache_size(1024).build(); let batch = new_batch(0, &[1, 2], &[(3, 3), (4, 4)], 3); - let record_batch = mapper.convert(&batch, Some(&cache)).unwrap(); + let record_batch = mapper.convert(&batch, &cache).unwrap(); let expect = "\ +---------------------+----+----+----+----+ | ts | k0 | k1 | v0 | v1 | @@ -380,7 +377,7 @@ mod tests { assert!(cache .get_repeated_vector(&ConcreteDataType::int64_datatype(), &Value::Int64(3)) .is_none()); - let record_batch = mapper.convert(&batch, Some(&cache)).unwrap(); + let record_batch = mapper.convert(&batch, &cache).unwrap(); assert_eq!(expect, print_record_batch(record_batch)); } @@ -401,7 +398,8 @@ mod tests { ); let batch = new_batch(0, &[1, 2], &[(4, 4)], 3); - let record_batch = mapper.convert(&batch, None).unwrap(); + let cache = CacheManager::builder().vector_cache_size(1024).build(); + let record_batch = mapper.convert(&batch, &cache).unwrap(); let expect = "\ +----+----+ | v1 | k0 | diff --git a/src/mito2/src/read/range.rs b/src/mito2/src/read/range.rs index 677b37354d5b..4bf9314915b9 100644 --- a/src/mito2/src/read/range.rs +++ b/src/mito2/src/read/range.rs @@ -90,7 +90,7 @@ impl RangeMeta { Self::push_unordered_file_ranges( input.memtables.len(), &input.files, - input.cache_manager.as_deref(), + &input.cache_manager, &mut ranges, ); @@ -172,16 +172,15 @@ impl RangeMeta { fn push_unordered_file_ranges( num_memtables: usize, files: &[FileHandle], - cache: Option<&CacheManager>, + cache: &CacheManager, ranges: &mut Vec<RangeMeta>, ) { // For append mode, we can parallelize reading row groups. for (i, file) in files.iter().enumerate() { let file_index = num_memtables + i; // Get parquet meta from the cache. - let parquet_meta = cache.and_then(|c| { - c.get_parquet_meta_data_from_mem_cache(file.region_id(), file.file_id()) - }); + let parquet_meta = + cache.get_parquet_meta_data_from_mem_cache(file.region_id(), file.file_id()); if let Some(parquet_meta) = parquet_meta { // Scans each row group. for row_group_index in 0..file.meta_ref().num_row_groups { diff --git a/src/mito2/src/read/scan_region.rs b/src/mito2/src/read/scan_region.rs index 04dadf924486..0241ba72037e 100644 --- a/src/mito2/src/read/scan_region.rs +++ b/src/mito2/src/read/scan_region.rs @@ -167,7 +167,7 @@ pub(crate) struct ScanRegion { /// Scan request. request: ScanRequest, /// Cache. - cache_manager: Option<CacheManagerRef>, + cache_manager: CacheManagerRef, /// Parallelism to scan. parallelism: ScanParallelism, /// Whether to ignore inverted index. @@ -184,7 +184,7 @@ impl ScanRegion { version: VersionRef, access_layer: AccessLayerRef, request: ScanRequest, - cache_manager: Option<CacheManagerRef>, + cache_manager: CacheManagerRef, ) -> ScanRegion { ScanRegion { version, @@ -381,17 +381,12 @@ impl ScanRegion { } let file_cache = || -> Option<FileCacheRef> { - let cache_manager = self.cache_manager.as_ref()?; - let write_cache = cache_manager.write_cache()?; + let write_cache = self.cache_manager.write_cache()?; let file_cache = write_cache.file_cache(); Some(file_cache) }(); - let index_cache = self - .cache_manager - .as_ref() - .and_then(|c| c.index_cache()) - .cloned(); + let index_cache = self.cache_manager.index_cache().cloned(); InvertedIndexApplierBuilder::new( self.access_layer.region_dir().to_string(), @@ -471,7 +466,7 @@ pub(crate) struct ScanInput { /// Handles to SST files to scan. pub(crate) files: Vec<FileHandle>, /// Cache. - pub(crate) cache_manager: Option<CacheManagerRef>, + pub(crate) cache_manager: CacheManagerRef, /// Ignores file not found error. ignore_file_not_found: bool, /// Parallelism to scan data. @@ -502,7 +497,7 @@ impl ScanInput { predicate: None, memtables: Vec::new(), files: Vec::new(), - cache_manager: None, + cache_manager: CacheManagerRef::default(), ignore_file_not_found: false, parallelism: ScanParallelism::default(), inverted_index_applier: None, @@ -545,7 +540,7 @@ impl ScanInput { /// Sets cache for this query. #[must_use] - pub(crate) fn with_cache(mut self, cache: Option<CacheManagerRef>) -> Self { + pub(crate) fn with_cache(mut self, cache: CacheManagerRef) -> Self { self.cache_manager = cache; self } diff --git a/src/mito2/src/read/seq_scan.rs b/src/mito2/src/read/seq_scan.rs index 9b7a71a36c51..345d1d615ba5 100644 --- a/src/mito2/src/read/seq_scan.rs +++ b/src/mito2/src/read/seq_scan.rs @@ -229,7 +229,7 @@ impl SeqScan { .await .map_err(BoxedError::new) .context(ExternalSnafu)?; - let cache = stream_ctx.input.cache_manager.as_deref(); + let cache = &stream_ctx.input.cache_manager; let mut metrics = ScannerMetrics::default(); let mut fetch_start = Instant::now(); #[cfg(debug_assertions)] diff --git a/src/mito2/src/read/unordered_scan.rs b/src/mito2/src/read/unordered_scan.rs index 707b7d4ba65c..7a2ce12e62ca 100644 --- a/src/mito2/src/read/unordered_scan.rs +++ b/src/mito2/src/read/unordered_scan.rs @@ -135,7 +135,7 @@ impl UnorderedScan { let stream = try_stream! { part_metrics.on_first_poll(); - let cache = stream_ctx.input.cache_manager.as_deref(); + let cache = &stream_ctx.input.cache_manager; // Scans each part. for part_range in part_ranges { let mut metrics = ScannerMetrics::default(); diff --git a/src/mito2/src/sst/parquet.rs b/src/mito2/src/sst/parquet.rs index ae51a0d37c29..c94ae600735f 100644 --- a/src/mito2/src/sst/parquet.rs +++ b/src/mito2/src/sst/parquet.rs @@ -195,11 +195,11 @@ mod tests { .unwrap(); // Enable page cache. - let cache = Some(Arc::new( + let cache = Arc::new( CacheManager::builder() .page_cache_size(64 * 1024 * 1024) .build(), - )); + ); let builder = ParquetReaderBuilder::new(FILE_DIR.to_string(), handle.clone(), object_store) .cache(cache.clone()); for _ in 0..3 { @@ -219,15 +219,15 @@ mod tests { // Doesn't have compressed page cached. let page_key = PageKey::new_compressed(metadata.region_id, handle.file_id(), 0, 0); - assert!(cache.as_ref().unwrap().get_pages(&page_key).is_none()); + assert!(cache.get_pages(&page_key).is_none()); // Cache 4 row groups. for i in 0..4 { let page_key = PageKey::new_uncompressed(metadata.region_id, handle.file_id(), i, 0); - assert!(cache.as_ref().unwrap().get_pages(&page_key).is_some()); + assert!(cache.get_pages(&page_key).is_some()); } let page_key = PageKey::new_uncompressed(metadata.region_id, handle.file_id(), 5, 0); - assert!(cache.as_ref().unwrap().get_pages(&page_key).is_none()); + assert!(cache.get_pages(&page_key).is_none()); } #[tokio::test] diff --git a/src/mito2/src/sst/parquet/reader.rs b/src/mito2/src/sst/parquet/reader.rs index b73026a7a6e3..cd219f47ccd6 100644 --- a/src/mito2/src/sst/parquet/reader.rs +++ b/src/mito2/src/sst/parquet/reader.rs @@ -82,7 +82,7 @@ pub struct ParquetReaderBuilder { /// can contain columns not in the parquet file. projection: Option<Vec<ColumnId>>, /// Manager that caches SST data. - cache_manager: Option<CacheManagerRef>, + cache_manager: CacheManagerRef, /// Index appliers. inverted_index_applier: Option<InvertedIndexApplierRef>, fulltext_index_applier: Option<FulltextIndexApplierRef>, @@ -106,7 +106,7 @@ impl ParquetReaderBuilder { predicate: None, time_range: None, projection: None, - cache_manager: None, + cache_manager: CacheManagerRef::default(), inverted_index_applier: None, fulltext_index_applier: None, expected_metadata: None, @@ -138,7 +138,7 @@ impl ParquetReaderBuilder { /// Attaches the cache to the builder. #[must_use] - pub fn cache(mut self, cache: Option<CacheManagerRef>) -> ParquetReaderBuilder { + pub fn cache(mut self, cache: CacheManagerRef) -> ParquetReaderBuilder { self.cache_manager = cache; self } @@ -313,10 +313,12 @@ impl ParquetReaderBuilder { let region_id = self.file_handle.region_id(); let file_id = self.file_handle.file_id(); // Tries to get from global cache. - if let Some(manager) = &self.cache_manager { - if let Some(metadata) = manager.get_parquet_meta_data(region_id, file_id).await { - return Ok(metadata); - } + if let Some(metadata) = self + .cache_manager + .get_parquet_meta_data(region_id, file_id) + .await + { + return Ok(metadata); } // Cache miss, load metadata directly. @@ -324,13 +326,11 @@ impl ParquetReaderBuilder { let metadata = metadata_loader.load().await?; let metadata = Arc::new(metadata); // Cache the metadata. - if let Some(cache) = &self.cache_manager { - cache.put_parquet_meta_data( - self.file_handle.region_id(), - self.file_handle.file_id(), - metadata.clone(), - ); - } + self.cache_manager.put_parquet_meta_data( + self.file_handle.region_id(), + self.file_handle.file_id(), + metadata.clone(), + ); Ok(metadata) } @@ -846,7 +846,7 @@ pub(crate) struct RowGroupReaderBuilder { /// Field levels to read. field_levels: FieldLevels, /// Cache. - cache_manager: Option<CacheManagerRef>, + cache_manager: CacheManagerRef, } impl RowGroupReaderBuilder { @@ -864,7 +864,7 @@ impl RowGroupReaderBuilder { &self.parquet_meta } - pub(crate) fn cache_manager(&self) -> &Option<CacheManagerRef> { + pub(crate) fn cache_manager(&self) -> &CacheManagerRef { &self.cache_manager } diff --git a/src/mito2/src/sst/parquet/row_group.rs b/src/mito2/src/sst/parquet/row_group.rs index 73382c06d9b3..dd572d8863f8 100644 --- a/src/mito2/src/sst/parquet/row_group.rs +++ b/src/mito2/src/sst/parquet/row_group.rs @@ -48,7 +48,7 @@ pub struct InMemoryRowGroup<'a> { region_id: RegionId, file_id: FileId, row_group_idx: usize, - cache_manager: Option<CacheManagerRef>, + cache_manager: CacheManagerRef, /// Row group level cached pages for each column. /// /// These pages are uncompressed pages of a row group. @@ -69,7 +69,7 @@ impl<'a> InMemoryRowGroup<'a> { file_id: FileId, parquet_meta: &'a ParquetMetaData, row_group_idx: usize, - cache_manager: Option<CacheManagerRef>, + cache_manager: CacheManagerRef, file_path: &'a str, object_store: ObjectStore, ) -> Self { @@ -208,19 +208,18 @@ impl<'a> InMemoryRowGroup<'a> { }; let column = self.metadata.column(idx); - if let Some(cache) = &self.cache_manager { - if !cache_uncompressed_pages(column) { - // For columns that have multiple uncompressed pages, we only cache the compressed page - // to save memory. - let page_key = PageKey::new_compressed( - self.region_id, - self.file_id, - self.row_group_idx, - idx, - ); - cache - .put_pages(page_key, Arc::new(PageValue::new_compressed(data.clone()))); - } + + if !cache_uncompressed_pages(column) { + // For columns that have multiple uncompressed pages, we only cache the compressed page + // to save memory. + let page_key = PageKey::new_compressed( + self.region_id, + self.file_id, + self.row_group_idx, + idx, + ); + self.cache_manager + .put_pages(page_key, Arc::new(PageValue::new_compressed(data.clone()))); } *chunk = Some(Arc::new(ColumnChunkData::Dense { @@ -242,9 +241,6 @@ impl<'a> InMemoryRowGroup<'a> { .enumerate() .filter(|(idx, chunk)| chunk.is_none() && projection.leaf_included(*idx)) .for_each(|(idx, chunk)| { - let Some(cache) = &self.cache_manager else { - return; - }; let column = self.metadata.column(idx); if cache_uncompressed_pages(column) { // Fetches uncompressed pages for the row group. @@ -254,7 +250,7 @@ impl<'a> InMemoryRowGroup<'a> { self.row_group_idx, idx, ); - self.column_uncompressed_pages[idx] = cache.get_pages(&page_key); + self.column_uncompressed_pages[idx] = self.cache_manager.get_pages(&page_key); } else { // Fetches the compressed page from the cache. let page_key = PageKey::new_compressed( @@ -264,7 +260,7 @@ impl<'a> InMemoryRowGroup<'a> { idx, ); - *chunk = cache.get_pages(&page_key).map(|page_value| { + *chunk = self.cache_manager.get_pages(&page_key).map(|page_value| { Arc::new(ColumnChunkData::Dense { offset: column.byte_range().0 as usize, data: page_value.compressed.clone(), @@ -300,7 +296,7 @@ impl<'a> InMemoryRowGroup<'a> { key: IndexKey, ranges: &[Range<u64>], ) -> Option<Vec<Bytes>> { - if let Some(cache) = self.cache_manager.as_ref()?.write_cache() { + if let Some(cache) = self.cache_manager.write_cache() { return cache.file_cache().read_ranges(key, ranges).await; } None @@ -331,10 +327,6 @@ impl<'a> InMemoryRowGroup<'a> { } }; - let Some(cache) = &self.cache_manager else { - return Ok(Box::new(page_reader)); - }; - let column = self.metadata.column(i); if cache_uncompressed_pages(column) { // This column use row group level page cache. @@ -343,7 +335,7 @@ impl<'a> InMemoryRowGroup<'a> { let page_value = Arc::new(PageValue::new_row_group(pages)); let page_key = PageKey::new_uncompressed(self.region_id, self.file_id, self.row_group_idx, i); - cache.put_pages(page_key, page_value.clone()); + self.cache_manager.put_pages(page_key, page_value.clone()); return Ok(Box::new(RowGroupCachedReader::new(&page_value.row_group))); }
refactor
Avoid wrapping Option for CacheManagerRef (#4996)
50bea2f107bf4d999933234ccd715a0cb8edf8de
2024-04-09 08:58:21
Ruihang Xia
feat: treat all number types as field candidates (#3670)
false
diff --git a/src/servers/src/http/prometheus_resp.rs b/src/servers/src/http/prometheus_resp.rs index 94d57ed65b5e..0f9176cdf6ee 100644 --- a/src/servers/src/http/prometheus_resp.rs +++ b/src/servers/src/http/prometheus_resp.rs @@ -36,7 +36,7 @@ use super::header::{collect_plan_metrics, GREPTIME_DB_HEADER_METRICS}; use super::prometheus::{ PromData, PromQueryResult, PromSeriesMatrix, PromSeriesVector, PrometheusResponse, }; -use crate::error::{CollectRecordbatchSnafu, InternalSnafu, Result}; +use crate::error::{CollectRecordbatchSnafu, Result, UnexpectedResultSnafu}; #[derive(Debug, Default, Serialize, Deserialize, JsonSchema, PartialEq)] pub struct PrometheusJsonResponse { @@ -183,7 +183,17 @@ impl PrometheusJsonResponse { timestamp_column_index = Some(i); } } - ConcreteDataType::Float64(_) => { + // Treat all value types as field + ConcreteDataType::Float32(_) + | ConcreteDataType::Float64(_) + | ConcreteDataType::Int8(_) + | ConcreteDataType::Int16(_) + | ConcreteDataType::Int32(_) + | ConcreteDataType::Int64(_) + | ConcreteDataType::UInt8(_) + | ConcreteDataType::UInt16(_) + | ConcreteDataType::UInt32(_) + | ConcreteDataType::UInt64(_) => { if first_field_column_index.is_none() { first_field_column_index = Some(i); } @@ -195,11 +205,11 @@ impl PrometheusJsonResponse { } } - let timestamp_column_index = timestamp_column_index.context(InternalSnafu { - err_msg: "no timestamp column found".to_string(), + let timestamp_column_index = timestamp_column_index.context(UnexpectedResultSnafu { + reason: "no timestamp column found".to_string(), })?; - let first_field_column_index = first_field_column_index.context(InternalSnafu { - err_msg: "no value column found".to_string(), + let first_field_column_index = first_field_column_index.context(UnexpectedResultSnafu { + reason: "no value column found".to_string(), })?; let metric_name = (METRIC_NAME.to_string(), metric_name); @@ -226,8 +236,11 @@ impl PrometheusJsonResponse { .as_any() .downcast_ref::<TimestampMillisecondVector>() .unwrap(); - let field_column = batch + let casted_field_column = batch .column(first_field_column_index) + .cast(&ConcreteDataType::float64_datatype()) + .unwrap(); + let field_column = casted_field_column .as_any() .downcast_ref::<Float64Vector>() .unwrap(); diff --git a/tests-integration/tests/http.rs b/tests-integration/tests/http.rs index ea8ec073ceed..20ef70a0ffff 100644 --- a/tests-integration/tests/http.rs +++ b/tests-integration/tests/http.rs @@ -436,6 +436,12 @@ pub async fn test_prom_http_api(store_type: StorageType) { .send() .await; assert_eq!(res.status(), StatusCode::OK); + let res = client + .post("/v1/prometheus/api/v1/query_range?query=count(count(up))&start=1&end=100&step=5") + .header("Content-Type", "application/x-www-form-urlencoded") + .send() + .await; + assert_eq!(res.status(), StatusCode::OK); // labels let res = client
feat
treat all number types as field candidates (#3670)
b35221ccb63cfa67900b634b61a7e84d7a39fbf9
2024-12-09 12:52:47
Weny Xu
ci: set meta replicas to 1 (#5111)
false
diff --git a/.github/actions/setup-greptimedb-cluster/action.yml b/.github/actions/setup-greptimedb-cluster/action.yml index 088a46582507..7c385c43a9a9 100644 --- a/.github/actions/setup-greptimedb-cluster/action.yml +++ b/.github/actions/setup-greptimedb-cluster/action.yml @@ -8,7 +8,7 @@ inputs: default: 2 description: "Number of Datanode replicas" meta-replicas: - default: 3 + default: 1 description: "Number of Metasrv replicas" image-registry: default: "docker.io" @@ -58,7 +58,7 @@ runs: --set image.tag=${{ inputs.image-tag }} \ --set base.podTemplate.main.resources.requests.cpu=50m \ --set base.podTemplate.main.resources.requests.memory=256Mi \ - --set base.podTemplate.main.resources.limits.cpu=1000m \ + --set base.podTemplate.main.resources.limits.cpu=2000m \ --set base.podTemplate.main.resources.limits.memory=2Gi \ --set frontend.replicas=${{ inputs.frontend-replicas }} \ --set datanode.replicas=${{ inputs.datanode-replicas }} \
ci
set meta replicas to 1 (#5111)
88d46a38ae631ec4a9dce43449abd84735048fd4
2024-12-24 14:24:59
Ruihang Xia
chore: bump opendal to fork version to fix prometheus layer (#5223)
false
diff --git a/Cargo.lock b/Cargo.lock index 8387a80663ed..7049caad46e6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7472,8 +7472,7 @@ checksum = "b410bbe7e14ab526a0e86877eb47c6996a2bd7746f027ba551028c925390e4e9" [[package]] name = "opendal" version = "0.50.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb28bb6c64e116ceaf8dd4e87099d3cfea4a58e85e62b104fef74c91afba0f44" +source = "git+https://github.com/GreptimeTeam/opendal.git?rev=c82605177f2feec83e49dcaa537c505639d94024#c82605177f2feec83e49dcaa537c505639d94024" dependencies = [ "anyhow", "async-trait", diff --git a/src/object-store/Cargo.toml b/src/object-store/Cargo.toml index b82be7376a72..275343fa5e45 100644 --- a/src/object-store/Cargo.toml +++ b/src/object-store/Cargo.toml @@ -17,7 +17,7 @@ futures.workspace = true lazy_static.workspace = true md5 = "0.7" moka = { workspace = true, features = ["future"] } -opendal = { version = "0.50", features = [ +opendal = { git = "https://github.com/GreptimeTeam/opendal.git", rev = "c82605177f2feec83e49dcaa537c505639d94024", features = [ "layers-tracing", "layers-prometheus", "services-azblob",
chore
bump opendal to fork version to fix prometheus layer (#5223)
b8c50d00aa81e8fb4a4d5e7dabc92401ce437642
2023-09-04 12:00:48
Zou Wei
feat: sqlness test for interval type (#2265)
false
diff --git a/tests/cases/standalone/common/select/dummy.result b/tests/cases/standalone/common/select/dummy.result index dade059db531..58845ddf5860 100644 --- a/tests/cases/standalone/common/select/dummy.result +++ b/tests/cases/standalone/common/select/dummy.result @@ -111,27 +111,3 @@ DROP TABLE test_unixtime; Affected Rows: 1 -select INTERVAL '1 year 2 months 3 days 4 hours 5 minutes 6 seconds 100 microseconds'; - -+---------------------------------------------------------+ -| IntervalMonthDayNano("1109194275255040973236744059552") | -+---------------------------------------------------------+ -| 0 years 14 mons 3 days 4 hours 5 mins 6.000100000 secs | -+---------------------------------------------------------+ - -select INTERVAL '1 year 2 months 3 days 4 hours' + INTERVAL '1 year'; - -+------------------------------------------------------------------------------------------------------------------+ -| IntervalMonthDayNano("1109194275255040972930743959552") + IntervalMonthDayNano("950737950171172051122527404032") | -+------------------------------------------------------------------------------------------------------------------+ -| 0 years 26 mons 3 days 4 hours 0 mins 0.000000000 secs | -+------------------------------------------------------------------------------------------------------------------+ - -select INTERVAL '1 year 2 months 3 days 4 hours' - INTERVAL '1 year'; - -+------------------------------------------------------------------------------------------------------------------+ -| IntervalMonthDayNano("1109194275255040972930743959552") - IntervalMonthDayNano("950737950171172051122527404032") | -+------------------------------------------------------------------------------------------------------------------+ -| 0 years 2 mons 3 days 4 hours 0 mins 0.000000000 secs | -+------------------------------------------------------------------------------------------------------------------+ - diff --git a/tests/cases/standalone/common/select/dummy.sql b/tests/cases/standalone/common/select/dummy.sql index 2b4c39642879..81ef9324a7ce 100644 --- a/tests/cases/standalone/common/select/dummy.sql +++ b/tests/cases/standalone/common/select/dummy.sql @@ -32,8 +32,3 @@ select TO_UNIXTIME(b) from test_unixtime; DROP TABLE test_unixtime; -select INTERVAL '1 year 2 months 3 days 4 hours 5 minutes 6 seconds 100 microseconds'; - -select INTERVAL '1 year 2 months 3 days 4 hours' + INTERVAL '1 year'; - -select INTERVAL '1 year 2 months 3 days 4 hours' - INTERVAL '1 year'; diff --git a/tests/cases/standalone/common/types/interval/interval.result b/tests/cases/standalone/common/types/interval/interval.result new file mode 100644 index 000000000000..14d06feac65c --- /dev/null +++ b/tests/cases/standalone/common/types/interval/interval.result @@ -0,0 +1,269 @@ +-- common test +SELECT INTERVAL '1 year 2 months 3 days 4 hours 5 minutes 6 seconds 100 microseconds'; + ++---------------------------------------------------------+ +| IntervalMonthDayNano("1109194275255040973236744059552") | ++---------------------------------------------------------+ +| 0 years 14 mons 3 days 4 hours 5 mins 6.000100000 secs | ++---------------------------------------------------------+ + +SELECT INTERVAL '1.5 year'; + ++---------------------------------------------------------+ +| IntervalMonthDayNano("1426106925256758076683791106048") | ++---------------------------------------------------------+ +| 0 years 18 mons 0 days 0 hours 0 mins 0.000000000 secs | ++---------------------------------------------------------+ + +SELECT INTERVAL '-2 months'; + ++---------------------------------------------------------+ +| IntervalMonthDayNano("-158456325028528675187087900672") | ++---------------------------------------------------------+ +| 0 years -2 mons 0 days 0 hours 0 mins 0.000000000 secs | ++---------------------------------------------------------+ + +SELECT INTERVAL '1 year 2 months 3 days 4 hours' + INTERVAL '1 year'; + ++------------------------------------------------------------------------------------------------------------------+ +| IntervalMonthDayNano("1109194275255040972930743959552") + IntervalMonthDayNano("950737950171172051122527404032") | ++------------------------------------------------------------------------------------------------------------------+ +| 0 years 26 mons 3 days 4 hours 0 mins 0.000000000 secs | ++------------------------------------------------------------------------------------------------------------------+ + +SELECT INTERVAL '1 year 2 months 3 days 4 hours' - INTERVAL '1 year'; + ++------------------------------------------------------------------------------------------------------------------+ +| IntervalMonthDayNano("1109194275255040972930743959552") - IntervalMonthDayNano("950737950171172051122527404032") | ++------------------------------------------------------------------------------------------------------------------+ +| 0 years 2 mons 3 days 4 hours 0 mins 0.000000000 secs | ++------------------------------------------------------------------------------------------------------------------+ + +SELECT INTERVAL '6 years' * 2; + +Error: 3000(PlanQuery), This feature is not implemented: Unsupported interval operator: Multiply + +SELECT INTERVAL '6 years' / 2; + +Error: 3000(PlanQuery), This feature is not implemented: Unsupported interval operator: Divide + +SELECT INTERVAL '6 years' = INTERVAL '72 months'; + +Error: 3000(PlanQuery), This feature is not implemented: Unsupported interval operator: Eq + +SELECT arrow_typeof(INTERVAL '1 month'); + ++---------------------------------------------------------------------+ +| arrow_typeof(IntervalMonthDayNano("79228162514264337593543950336")) | ++---------------------------------------------------------------------+ +| Interval(MonthDayNano) | ++---------------------------------------------------------------------+ + +-- INTERVAL + TIME CONSTANT +SELECT current_time() + INTERVAL '1 hour'; + +Error: 3000(PlanQuery), Error during planning: Cannot coerce arithmetic expression Time64(Nanosecond) + Interval(MonthDayNano) to valid types + +-- table with interval type test +CREATE TABLE IF NOT EXISTS intervals( + ts TIMESTAMP TIME INDEX, + interval_value INTERVAL, +); + +Affected Rows: 0 + +DESCRIBE TABLE intervals; + ++----------------+----------------------+-----+------+---------+---------------+ +| Column | Type | Key | Null | Default | Semantic Type | ++----------------+----------------------+-----+------+---------+---------------+ +| ts | TimestampMillisecond | PRI | NO | | TIMESTAMP | +| interval_value | IntervalMonthDayNano | | YES | | FIELD | ++----------------+----------------------+-----+------+---------+---------------+ + +INSERT INTO intervals(ts, interval_value) +values +('2022-01-01 00:00:01', INTERVAL '1 year'), +('2022-01-01 00:00:02', INTERVAL '1 year'), +('2022-02-01 00:00:01', INTERVAL '2 year 2 months'), +('2022-03-01 00:00:01', INTERVAL '3 year 3 hours'), +('2022-04-01 00:00:01', INTERVAL '4 year 4 minutes'), +('2022-05-01 00:00:01', INTERVAL '5 year 5 seconds'), +('2022-06-01 00:00:01', INTERVAL '6 year 6 milliseconds'), +('2022-07-01 00:00:01', INTERVAL '7 year 7 microseconds'), +('2022-08-01 00:00:01', INTERVAL '8 year 8 nanoseconds'), +('2022-09-01 00:00:01', INTERVAL '9 year 9 days'), +('2022-10-01 00:00:01', INTERVAL '10 year 10 hours 10 minutes 10 seconds 10 milliseconds 10 microseconds 10 nanoseconds'), +('2022-11-01 00:00:01', INTERVAL '11 year 11 days 11 hours 11 minutes 11 seconds 11 milliseconds 11 microseconds 11 nanoseconds'), +('2022-12-01 00:00:01', INTERVAL '12 year 12 days 12 hours 12 minutes 12 seconds 12 milliseconds 12 microseconds 12 nanoseconds'); + +Affected Rows: 13 + +SELECT * FROM intervals; + ++---------------------+-------------------------------------------------------------+ +| ts | interval_value | ++---------------------+-------------------------------------------------------------+ +| 2022-01-01T00:00:01 | 0 years 12 mons 0 days 0 hours 0 mins 0.000000000 secs | +| 2022-01-01T00:00:02 | 0 years 12 mons 0 days 0 hours 0 mins 0.000000000 secs | +| 2022-02-01T00:00:01 | 0 years 26 mons 0 days 0 hours 0 mins 0.000000000 secs | +| 2022-03-01T00:00:01 | 0 years 36 mons 0 days 3 hours 0 mins 0.000000000 secs | +| 2022-04-01T00:00:01 | 0 years 48 mons 0 days 0 hours 4 mins 0.000000000 secs | +| 2022-05-01T00:00:01 | 0 years 60 mons 0 days 0 hours 0 mins 5.000000000 secs | +| 2022-06-01T00:00:01 | 0 years 72 mons 0 days 0 hours 0 mins 0.006000000 secs | +| 2022-07-01T00:00:01 | 0 years 84 mons 0 days 0 hours 0 mins 0.000007000 secs | +| 2022-08-01T00:00:01 | 0 years 96 mons 0 days 0 hours 0 mins 0.000000008 secs | +| 2022-09-01T00:00:01 | 0 years 108 mons 9 days 0 hours 0 mins 0.000000000 secs | +| 2022-10-01T00:00:01 | 0 years 120 mons 0 days 10 hours 10 mins 10.010010010 secs | +| 2022-11-01T00:00:01 | 0 years 132 mons 11 days 11 hours 11 mins 11.011011011 secs | +| 2022-12-01T00:00:01 | 0 years 144 mons 12 days 12 hours 12 mins 12.012012012 secs | ++---------------------+-------------------------------------------------------------+ + +SELECT DISTINCT interval_value FROM intervals ORDER BY interval_value; + ++-------------------------------------------------------------+ +| interval_value | ++-------------------------------------------------------------+ +| 0 years 12 mons 0 days 0 hours 0 mins 0.000000000 secs | +| 0 years 26 mons 0 days 0 hours 0 mins 0.000000000 secs | +| 0 years 36 mons 0 days 3 hours 0 mins 0.000000000 secs | +| 0 years 48 mons 0 days 0 hours 4 mins 0.000000000 secs | +| 0 years 60 mons 0 days 0 hours 0 mins 5.000000000 secs | +| 0 years 72 mons 0 days 0 hours 0 mins 0.006000000 secs | +| 0 years 84 mons 0 days 0 hours 0 mins 0.000007000 secs | +| 0 years 96 mons 0 days 0 hours 0 mins 0.000000008 secs | +| 0 years 108 mons 9 days 0 hours 0 mins 0.000000000 secs | +| 0 years 120 mons 0 days 10 hours 10 mins 10.010010010 secs | +| 0 years 132 mons 11 days 11 hours 11 mins 11.011011011 secs | +| 0 years 144 mons 12 days 12 hours 12 mins 12.012012012 secs | ++-------------------------------------------------------------+ + +-- ts + interval +SELECT ts + interval_value as new_value from intervals; + ++-------------------------+ +| new_value | ++-------------------------+ +| 2023-01-01T00:00:01 | +| 2023-01-01T00:00:02 | +| 2024-04-01T00:00:01 | +| 2025-03-01T03:00:01 | +| 2026-04-01T00:04:01 | +| 2027-05-01T00:00:06 | +| 2028-06-01T00:00:01.006 | +| 2029-07-01T00:00:01 | +| 2030-08-01T00:00:01 | +| 2031-09-10T00:00:01 | +| 2032-10-01T10:10:11.010 | +| 2033-11-12T11:11:12.011 | +| 2034-12-13T12:12:13.012 | ++-------------------------+ + +-- ts - interval +SELECT ts - interval_value as new_value from intervals; + ++-------------------------+ +| new_value | ++-------------------------+ +| 2021-01-01T00:00:01 | +| 2021-01-01T00:00:02 | +| 2019-12-01T00:00:01 | +| 2019-02-28T21:00:01 | +| 2018-03-31T23:56:01 | +| 2017-04-30T23:59:56 | +| 2016-06-01T00:00:00.994 | +| 2015-07-01T00:00:00.999 | +| 2014-08-01T00:00:00.999 | +| 2013-08-23T00:00:01 | +| 2012-09-30T13:49:50.989 | +| 2011-10-20T12:48:49.988 | +| 2010-11-18T11:47:48.987 | ++-------------------------+ + +-- DATE + INTERVAL +SELECT DATE '2000-10-30' + interval_value from intervals; + ++-----------------------------------------------+ +| Utf8("2000-10-30") + intervals.interval_value | ++-----------------------------------------------+ +| 2001-10-30 | +| 2001-10-30 | +| 2002-12-30 | +| 2003-10-30 | +| 2004-10-30 | +| 2005-10-30 | +| 2006-10-30 | +| 2007-10-30 | +| 2008-10-30 | +| 2009-11-08 | +| 2010-10-30 | +| 2011-11-10 | +| 2012-11-11 | ++-----------------------------------------------+ + +-- DATE - INTERVAL +-- Run failed in distributed mode, but passed in standalone mode: +-- SELECT DATE '2000-10-30' - interval_value from intervals; +-- INTERVAL + TIMESTAMP CONSTANT +SELECT TIMESTAMP '1992-09-20 11:30:00.123456' + interval_value as new_value from intervals; + ++-------------------------------+ +| new_value | ++-------------------------------+ +| 1993-09-20T11:30:00.123456 | +| 1993-09-20T11:30:00.123456 | +| 1994-11-20T11:30:00.123456 | +| 1995-09-20T14:30:00.123456 | +| 1996-09-20T11:34:00.123456 | +| 1997-09-20T11:30:05.123456 | +| 1998-09-20T11:30:00.129456 | +| 1999-09-20T11:30:00.123463 | +| 2000-09-20T11:30:00.123456008 | +| 2001-09-29T11:30:00.123456 | +| 2002-09-20T21:40:10.133466010 | +| 2003-10-01T22:41:11.134467011 | +| 2004-10-02T23:42:12.135468012 | ++-------------------------------+ + +-- TIMESTAMP CONSTANT - INTERVAL +SELECT TIMESTAMP '1992-09-20 11:30:00.123456' - interval_value as new_value from intervals; + ++-------------------------------+ +| new_value | ++-------------------------------+ +| 1991-09-20T11:30:00.123456 | +| 1991-09-20T11:30:00.123456 | +| 1990-07-20T11:30:00.123456 | +| 1989-09-20T08:30:00.123456 | +| 1988-09-20T11:26:00.123456 | +| 1987-09-20T11:29:55.123456 | +| 1986-09-20T11:30:00.117456 | +| 1985-09-20T11:30:00.123449 | +| 1984-09-20T11:30:00.123455992 | +| 1983-09-11T11:30:00.123456 | +| 1982-09-20T01:19:50.113445990 | +| 1981-09-09T00:18:49.112444989 | +| 1980-09-07T23:17:48.111443988 | ++-------------------------------+ + +-- Interval type does not support aggregation functions. +SELECT MIN(interval_value) from intervals; + +Error: 1003(Internal), Internal error: Min/Max accumulator not implemented for type Interval(MonthDayNano). This was likely caused by a bug in DataFusion's code and we would welcome that you file an bug report in our issue tracker + +SELECT MAX(interval_value) from intervals; + +Error: 1003(Internal), Internal error: Min/Max accumulator not implemented for type Interval(MonthDayNano). This was likely caused by a bug in DataFusion's code and we would welcome that you file an bug report in our issue tracker + +SELECT SUM(interval_value) from intervals; + +Error: 3000(PlanQuery), Error during planning: The function Sum does not support inputs of type Interval(MonthDayNano). + +SELECT AVG(interval_value) from intervals; + +Error: 3000(PlanQuery), Error during planning: The function Avg does not support inputs of type Interval(MonthDayNano). + +DROP TABLE intervals; + +Affected Rows: 1 + diff --git a/tests/cases/standalone/common/types/interval/interval.sql b/tests/cases/standalone/common/types/interval/interval.sql new file mode 100644 index 000000000000..e1bb6dcc34e9 --- /dev/null +++ b/tests/cases/standalone/common/types/interval/interval.sql @@ -0,0 +1,80 @@ +-- common test +SELECT INTERVAL '1 year 2 months 3 days 4 hours 5 minutes 6 seconds 100 microseconds'; + +SELECT INTERVAL '1.5 year'; + +SELECT INTERVAL '-2 months'; + +SELECT INTERVAL '1 year 2 months 3 days 4 hours' + INTERVAL '1 year'; + +SELECT INTERVAL '1 year 2 months 3 days 4 hours' - INTERVAL '1 year'; + +SELECT INTERVAL '6 years' * 2; + +SELECT INTERVAL '6 years' / 2; + +SELECT INTERVAL '6 years' = INTERVAL '72 months'; + +SELECT arrow_typeof(INTERVAL '1 month'); + +-- INTERVAL + TIME CONSTANT +SELECT current_time() + INTERVAL '1 hour'; + +-- table with interval type test +CREATE TABLE IF NOT EXISTS intervals( + ts TIMESTAMP TIME INDEX, + interval_value INTERVAL, +); + +DESCRIBE TABLE intervals; + +INSERT INTO intervals(ts, interval_value) +values +('2022-01-01 00:00:01', INTERVAL '1 year'), +('2022-01-01 00:00:02', INTERVAL '1 year'), +('2022-02-01 00:00:01', INTERVAL '2 year 2 months'), +('2022-03-01 00:00:01', INTERVAL '3 year 3 hours'), +('2022-04-01 00:00:01', INTERVAL '4 year 4 minutes'), +('2022-05-01 00:00:01', INTERVAL '5 year 5 seconds'), +('2022-06-01 00:00:01', INTERVAL '6 year 6 milliseconds'), +('2022-07-01 00:00:01', INTERVAL '7 year 7 microseconds'), +('2022-08-01 00:00:01', INTERVAL '8 year 8 nanoseconds'), +('2022-09-01 00:00:01', INTERVAL '9 year 9 days'), +('2022-10-01 00:00:01', INTERVAL '10 year 10 hours 10 minutes 10 seconds 10 milliseconds 10 microseconds 10 nanoseconds'), +('2022-11-01 00:00:01', INTERVAL '11 year 11 days 11 hours 11 minutes 11 seconds 11 milliseconds 11 microseconds 11 nanoseconds'), +('2022-12-01 00:00:01', INTERVAL '12 year 12 days 12 hours 12 minutes 12 seconds 12 milliseconds 12 microseconds 12 nanoseconds'); + +SELECT * FROM intervals; + +SELECT DISTINCT interval_value FROM intervals ORDER BY interval_value; + +-- ts + interval +SELECT ts + interval_value as new_value from intervals; + +-- ts - interval +SELECT ts - interval_value as new_value from intervals; + +-- DATE + INTERVAL +SELECT DATE '2000-10-30' + interval_value from intervals; + +-- DATE - INTERVAL +-- Run failed in distributed mode, but passed in standalone mode: +-- SELECT DATE '2000-10-30' - interval_value from intervals; + +-- INTERVAL + TIMESTAMP CONSTANT +SELECT TIMESTAMP '1992-09-20 11:30:00.123456' + interval_value as new_value from intervals; + +-- TIMESTAMP CONSTANT - INTERVAL +SELECT TIMESTAMP '1992-09-20 11:30:00.123456' - interval_value as new_value from intervals; + + +-- Interval type does not support aggregation functions. +SELECT MIN(interval_value) from intervals; + +SELECT MAX(interval_value) from intervals; + +SELECT SUM(interval_value) from intervals; + +SELECT AVG(interval_value) from intervals; + +DROP TABLE intervals;
feat
sqlness test for interval type (#2265)
c370b4b40df05bd0023f60ba1246f4c9412b6c1a
2025-01-13 13:05:12
ZonaHe
feat: update dashboard to v0.7.7 (#5350)
false
diff --git a/src/servers/dashboard/VERSION b/src/servers/dashboard/VERSION index 378c127dd70e..b977a66d97cc 100644 --- a/src/servers/dashboard/VERSION +++ b/src/servers/dashboard/VERSION @@ -1 +1 @@ -v0.7.6 +v0.7.7
feat
update dashboard to v0.7.7 (#5350)
7727508485bd9b6224105145017951822a1a12df
2023-07-31 09:24:39
Zou Wei
feat: impl interval type (#1952)
false
diff --git a/src/api/src/helper.rs b/src/api/src/helper.rs index 143c44ceb0da..b708b05449fb 100644 --- a/src/api/src/helper.rs +++ b/src/api/src/helper.rs @@ -111,7 +111,8 @@ impl TryFrom<ConcreteDataType> for ColumnDataTypeWrapper { TimeType::Microsecond(_) => ColumnDataType::TimeMicrosecond, TimeType::Nanosecond(_) => ColumnDataType::TimeNanosecond, }, - ConcreteDataType::Null(_) + ConcreteDataType::Interval(_) + | ConcreteDataType::Null(_) | ConcreteDataType::List(_) | ConcreteDataType::Dictionary(_) => { return error::IntoColumnDataTypeSnafu { from: datatype }.fail() @@ -255,7 +256,7 @@ pub fn push_vals(column: &mut Column, origin_count: usize, vector: VectorRef) { TimeUnit::Microsecond => values.time_microsecond_values.push(val.value()), TimeUnit::Nanosecond => values.time_nanosecond_values.push(val.value()), }, - Value::List(_) => unreachable!(), + Value::Interval(_) | Value::List(_) => unreachable!(), }); column.null_mask = null_mask.into_vec(); } diff --git a/src/common/grpc-expr/src/insert.rs b/src/common/grpc-expr/src/insert.rs index b220e2e6f4d2..13a51ddc893e 100644 --- a/src/common/grpc-expr/src/insert.rs +++ b/src/common/grpc-expr/src/insert.rs @@ -424,7 +424,10 @@ fn values_to_vector(data_type: &ConcreteDataType, values: Values) -> VectorRef { )), }, - ConcreteDataType::Null(_) | ConcreteDataType::List(_) | ConcreteDataType::Dictionary(_) => { + ConcreteDataType::Interval(_) + | ConcreteDataType::Null(_) + | ConcreteDataType::List(_) + | ConcreteDataType::Dictionary(_) => { unreachable!() } } @@ -553,7 +556,10 @@ fn convert_values(data_type: &ConcreteDataType, values: Values) -> Vec<Value> { .map(|v| Value::Time(Time::new_nanosecond(v))) .collect(), - ConcreteDataType::Null(_) | ConcreteDataType::List(_) | ConcreteDataType::Dictionary(_) => { + ConcreteDataType::Interval(_) + | ConcreteDataType::Null(_) + | ConcreteDataType::List(_) + | ConcreteDataType::Dictionary(_) => { unreachable!() } } diff --git a/src/common/grpc/src/select.rs b/src/common/grpc/src/select.rs index 21eed06c0c9c..aed198d7faeb 100644 --- a/src/common/grpc/src/select.rs +++ b/src/common/grpc/src/select.rs @@ -68,7 +68,7 @@ macro_rules! convert_arrow_array_to_grpc_vals { return Ok(vals); }, )+ - ConcreteDataType::Null(_) | ConcreteDataType::List(_) | ConcreteDataType::Dictionary(_) => unreachable!("Should not send {:?} in gRPC", $data_type), + ConcreteDataType::Null(_) | ConcreteDataType::List(_) | ConcreteDataType::Dictionary(_)| ConcreteDataType::Interval(_) => unreachable!("Should not send {:?} in gRPC", $data_type), } }}; } diff --git a/src/common/time/src/error.rs b/src/common/time/src/error.rs index d9511a3f7491..6e4f4d78ae9c 100644 --- a/src/common/time/src/error.rs +++ b/src/common/time/src/error.rs @@ -32,6 +32,9 @@ pub enum Error { #[snafu(display("Failed to parse a string into Timestamp, raw string: {}", raw))] ParseTimestamp { raw: String, location: Location }, + #[snafu(display("Failed to parse a string into Interval, raw string: {}", raw))] + ParseInterval { raw: String, location: Location }, + #[snafu(display("Current timestamp overflow, source: {}", source))] TimestampOverflow { source: TryFromIntError, @@ -71,6 +74,7 @@ impl ErrorExt for Error { Error::InvalidDateStr { .. } | Error::ArithmeticOverflow { .. } => { StatusCode::InvalidArguments } + Error::ParseInterval { .. } => StatusCode::InvalidArguments, } } @@ -88,6 +92,7 @@ impl ErrorExt for Error { | Error::ParseOffsetStr { .. } | Error::ParseTimeZoneName { .. } => None, Error::InvalidDateStr { location, .. } => Some(*location), + Error::ParseInterval { location, .. } => Some(*location), } } } diff --git a/src/common/time/src/interval.rs b/src/common/time/src/interval.rs new file mode 100644 index 000000000000..7d4f6bc5f7b4 --- /dev/null +++ b/src/common/time/src/interval.rs @@ -0,0 +1,743 @@ +// Copyright 2023 Greptime Team +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::cmp::Ordering; +use std::default::Default; +use std::fmt::{self, Display, Formatter, Write}; +use std::hash::{Hash, Hasher}; + +use arrow::datatypes::IntervalUnit as ArrowIntervalUnit; +use serde::{Deserialize, Serialize}; +use serde_json::Value; + +#[derive( + Debug, Default, Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord, Serialize, Deserialize, +)] +pub enum IntervalUnit { + /// Indicates the number of elapsed whole months, stored as 4-byte integers. + YearMonth, + /// Indicates the number of elapsed days and milliseconds, + /// stored as 2 contiguous 32-bit integers (days, milliseconds) (8-bytes in total). + DayTime, + /// A triple of the number of elapsed months, days, and nanoseconds. + /// The values are stored contiguously in 16 byte blocks. Months and + /// days are encoded as 32 bit integers and nanoseconds is encoded as a + /// 64 bit integer. All integers are signed. Each field is independent + /// (e.g. there is no constraint that nanoseconds have the same sign + /// as days or that the quantity of nanoseconds represents less + /// than a day's worth of time). + #[default] + MonthDayNano, +} + +impl From<&ArrowIntervalUnit> for IntervalUnit { + fn from(unit: &ArrowIntervalUnit) -> Self { + match unit { + ArrowIntervalUnit::YearMonth => IntervalUnit::YearMonth, + ArrowIntervalUnit::DayTime => IntervalUnit::DayTime, + ArrowIntervalUnit::MonthDayNano => IntervalUnit::MonthDayNano, + } + } +} + +impl From<ArrowIntervalUnit> for IntervalUnit { + fn from(unit: ArrowIntervalUnit) -> Self { + (&unit).into() + } +} + +/// Interval Type represents a period of time. +/// It is composed of months, days and nanoseconds. +/// 3 kinds of interval are supported: year-month, day-time and +/// month-day-nano, which will be stored in the following format. +/// Interval data format: +/// | months | days | nsecs | +/// | 4bytes | 4bytes | 8bytes | +#[derive(Debug, Clone, Default, Copy, Serialize, Deserialize)] +pub struct Interval { + months: i32, + days: i32, + nsecs: i64, + unit: IntervalUnit, +} + +// Nanosecond convert to other time unit +pub const NANOS_PER_SEC: i64 = 1_000_000_000; +pub const NANOS_PER_MILLI: i64 = 1_000_000; +pub const NANOS_PER_MICRO: i64 = 1_000; +pub const NANOS_PER_HOUR: i64 = 60 * 60 * NANOS_PER_SEC; +pub const NANOS_PER_DAY: i64 = 24 * NANOS_PER_HOUR; +pub const NANOS_PER_MONTH: i64 = 30 * NANOS_PER_DAY; + +pub const DAYS_PER_MONTH: i64 = 30; + +impl Interval { + /// Creates a new interval from months, days and nanoseconds. + /// Precision is nanosecond. + pub fn from_month_day_nano(months: i32, days: i32, nsecs: i64) -> Self { + Interval { + months, + days, + nsecs, + unit: IntervalUnit::MonthDayNano, + } + } + + /// Creates a new interval from months. + pub fn from_year_month(months: i32) -> Self { + Interval { + months, + days: 0, + nsecs: 0, + unit: IntervalUnit::YearMonth, + } + } + + /// Creates a new interval from days and milliseconds. + pub fn from_day_time(days: i32, millis: i32) -> Self { + Interval { + months: 0, + days, + nsecs: (millis as i64) * NANOS_PER_MILLI, + unit: IntervalUnit::DayTime, + } + } + + /// Converts the interval to nanoseconds. + pub fn to_nanosecond(&self) -> i128 { + let days = (self.days as i64) + DAYS_PER_MONTH * (self.months as i64); + (self.nsecs as i128) + (NANOS_PER_DAY as i128) * (days as i128) + } + + /// Smallest interval value. + pub const MIN: Self = Self { + months: i32::MIN, + days: i32::MIN, + nsecs: i64::MIN, + unit: IntervalUnit::MonthDayNano, + }; + + /// Largest interval value. + pub const MAX: Self = Self { + months: i32::MAX, + days: i32::MAX, + nsecs: i64::MAX, + unit: IntervalUnit::MonthDayNano, + }; + + /// Returns the justified interval. + /// allows you to adjust the interval of 30-day as one month and the interval of 24-hour as one day + pub fn justified_interval(&self) -> Self { + let mut result = *self; + let extra_months_d = self.days as i64 / DAYS_PER_MONTH; + let extra_months_nsecs = self.nsecs / NANOS_PER_MONTH; + result.days -= (extra_months_d * DAYS_PER_MONTH) as i32; + result.nsecs -= extra_months_nsecs * NANOS_PER_MONTH; + + let extra_days = self.nsecs / NANOS_PER_DAY; + result.nsecs -= extra_days * NANOS_PER_DAY; + + result.months += extra_months_d as i32 + extra_months_nsecs as i32; + result.days += extra_days as i32; + + result + } + + /// Convert Interval to nanoseconds, + /// to check whether Interval is positive + pub fn is_positive(&self) -> bool { + self.to_nanosecond() > 0 + } + + /// is_zero + pub fn is_zero(&self) -> bool { + self.months == 0 && self.days == 0 && self.nsecs == 0 + } + + /// get unit + pub fn unit(&self) -> IntervalUnit { + self.unit + } + + /// Multiple Interval by an integer with overflow check. + /// Returns justified Interval, or `None` if overflow occurred. + pub fn checked_mul_int<I>(&self, rhs: I) -> Option<Self> + where + I: TryInto<i32>, + { + let rhs = rhs.try_into().ok()?; + let months = self.months.checked_mul(rhs)?; + let days = self.days.checked_mul(rhs)?; + let nsecs = self.nsecs.checked_mul(rhs as i64)?; + + Some( + Self { + months, + days, + nsecs, + unit: self.unit, + } + .justified_interval(), + ) + } + + /// Convert Interval to ISO 8601 string + pub fn to_iso8601_string(self) -> String { + IntervalFormat::from(self).to_iso8601_string() + } + + /// Convert Interval to postgres verbose string + pub fn to_postgres_string(self) -> String { + IntervalFormat::from(self).to_postgres_string() + } + + /// Convert Interval to sql_standard string + pub fn to_sql_standard_string(self) -> String { + IntervalFormat::from(self).to_sql_standard_string() + } + + /// Interval Type and i128[MonthDayNano] Convert + /// v consists of months(i32) | days(i32) | nsecs(i64) + pub fn from_i128(v: i128) -> Self { + Interval { + nsecs: v as i64, + days: (v >> 64) as i32, + months: (v >> 96) as i32, + unit: IntervalUnit::MonthDayNano, + } + } + + /// `Interval` Type and i64[DayTime] Convert + /// v consists of days(i32) | milliseconds(i32) + pub fn from_i64(v: i64) -> Self { + Interval { + nsecs: ((v as i32) as i64) * NANOS_PER_MILLI, + days: (v >> 32) as i32, + months: 0, + unit: IntervalUnit::DayTime, + } + } + + /// `Interval` Type and i32[YearMonth] Convert + /// v consists of months(i32) + pub fn from_i32(v: i32) -> Self { + Interval { + nsecs: 0, + days: 0, + months: v, + unit: IntervalUnit::YearMonth, + } + } + + pub fn to_i128(&self) -> i128 { + let mut result = 0; + result |= self.months as i128; + result <<= 32; + result |= self.days as i128; + result <<= 64; + result |= self.nsecs as i128; + result + } + + pub fn to_i64(&self) -> i64 { + let mut result = 0; + result |= self.days as i64; + result <<= 32; + result |= self.nsecs / NANOS_PER_MILLI; + result + } + + pub fn to_i32(&self) -> i32 { + self.months + } +} + +impl From<i128> for Interval { + fn from(v: i128) -> Self { + Self::from_i128(v) + } +} + +impl From<Interval> for i128 { + fn from(v: Interval) -> Self { + v.to_i128() + } +} + +impl From<Interval> for serde_json::Value { + fn from(v: Interval) -> Self { + Value::String(v.to_string()) + } +} + +impl Display for Interval { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + let mut s = String::new(); + if self.months != 0 { + write!(s, "{} months ", self.months)?; + } + if self.days != 0 { + write!(s, "{} days ", self.days)?; + } + if self.nsecs != 0 { + write!(s, "{} nsecs", self.nsecs)?; + } + write!(f, "{}", s.trim()) + } +} + +/// https://www.postgresql.org/docs/current/datatype-datetime.html#DATATYPE-INTERVAL-OUTPUT +/// support postgres format, iso8601 format and sql standard format +#[derive(Debug, Clone, Default, Copy, Serialize, Deserialize)] +pub struct IntervalFormat { + pub years: i32, + pub months: i32, + pub days: i32, + pub hours: i64, + pub minutes: i64, + pub seconds: i64, + pub microseconds: i64, +} + +impl From<Interval> for IntervalFormat { + fn from(val: Interval) -> IntervalFormat { + let months = val.months; + let days = val.days; + let microseconds = val.nsecs / NANOS_PER_MICRO; + let years = (months - (months % 12)) / 12; + let months = months - years * 12; + let hours = (microseconds - (microseconds % 3_600_000_000)) / 3_600_000_000; + let microseconds = microseconds - hours * 3_600_000_000; + let minutes = (microseconds - (microseconds % 60_000_000)) / 60_000_000; + let microseconds = microseconds - minutes * 60_000_000; + let seconds = (microseconds - (microseconds % 1_000_000)) / 1_000_000; + let microseconds = microseconds - seconds * 1_000_000; + IntervalFormat { + years, + months, + days, + hours, + minutes, + seconds, + microseconds, + } + } +} + +impl IntervalFormat { + /// All the field in the interval is 0 + pub fn is_zero(&self) -> bool { + self.years == 0 + && self.months == 0 + && self.days == 0 + && self.hours == 0 + && self.minutes == 0 + && self.seconds == 0 + && self.microseconds == 0 + } + + /// Determine if year or month exist + pub fn has_year_month(&self) -> bool { + self.years != 0 || self.months != 0 + } + + /// Determine if day exists + pub fn has_day(&self) -> bool { + self.days != 0 + } + + /// Determine time part(includes hours, minutes, seconds, microseconds) is positive + pub fn has_time_part_positive(&self) -> bool { + self.hours > 0 || self.minutes > 0 || self.seconds > 0 || self.microseconds > 0 + } + + // time part means hours, minutes, seconds, microseconds + pub fn has_time_part(&self) -> bool { + self.hours != 0 || self.minutes != 0 || self.seconds != 0 || self.microseconds != 0 + } + + /// Convert IntervalFormat to iso8601 format string + /// ISO pattern - PnYnMnDTnHnMnS + /// for example: P1Y2M3DT4H5M6.789S + pub fn to_iso8601_string(&self) -> String { + if self.is_zero() { + return "PT0S".to_string(); + } + let fract_str = match self.microseconds { + 0 => "".to_string(), + _ => format!(".{:06}", self.microseconds) + .trim_end_matches('0') + .to_string(), + }; + format!( + "P{}Y{}M{}DT{}H{}M{}{}S", + self.years, self.months, self.days, self.hours, self.minutes, self.seconds, fract_str + ) + } + + /// Convert IntervalFormat to sql standard format string + /// SQL standard pattern - [years - months] [days] [hours:minutes:seconds[.fractional seconds]] + /// for example: 1-2 3:4:5.678 + pub fn to_sql_standard_string(self) -> String { + if self.is_zero() { + "0".to_string() + } else if !self.has_time_part() && !self.has_day() { + get_year_month(self.months, self.years, true) + } else if !self.has_time_part() && !self.has_year_month() { + format!("{} 0:00:00", self.days) + } else if !self.has_year_month() && !self.has_day() { + get_time_part( + self.hours, + self.minutes, + self.seconds, + self.microseconds, + self.has_time_part_positive(), + true, + ) + } else { + let year_month = get_year_month(self.months, self.years, false); + let time_interval = get_time_part( + self.hours, + self.minutes, + self.seconds, + self.microseconds, + self.has_time_part_positive(), + false, + ); + format!("{} {:+} {}", year_month, self.days, time_interval) + } + } + + /// Convert IntervalFormat to postgres format string + /// postgres pattern - [years - months] [days] [hours[:minutes[:seconds[.fractional seconds]]]] + /// for example: -1 year -2 mons +3 days -04:05:06 + pub fn to_postgres_string(&self) -> String { + if self.is_zero() { + return "00:00:00".to_string(); + } + let mut result = "".to_string(); + if self.has_year_month() { + if self.years != 0 { + result.push_str(&format!("{} year ", self.years)); + } + if self.months != 0 { + result.push_str(&format!("{} mons ", self.months)); + } + } + if self.has_day() { + result.push_str(&format!("{} days ", self.days)); + } + result.push_str(&self.get_postgres_time_part()); + result.trim().to_string() + } + + /// get postgres time part(include hours, minutes, seconds, microseconds) + fn get_postgres_time_part(&self) -> String { + let mut time_part = "".to_string(); + if self.has_time_part() { + let sign = if !self.has_time_part_positive() { + "-" + } else { + "" + }; + let hours = Self::padding_i64(self.hours); + time_part.push_str(&format!( + "{}{}:{}:{}", + sign, + hours, + Self::padding_i64(self.minutes), + Self::padding_i64(self.seconds), + )); + if self.microseconds != 0 { + time_part.push_str(&format!(".{:06}", self.microseconds.unsigned_abs())) + } + } + time_part + } + + /// padding i64 to string with 2 digits + fn padding_i64(val: i64) -> String { + let num = if val < 0 { + val.unsigned_abs() + } else { + val as u64 + }; + format!("{:02}", num) + } +} + +/// get year month string +fn get_year_month(mons: i32, years: i32, is_only_year_month: bool) -> String { + let months = mons.unsigned_abs(); + if years == 0 || is_only_year_month { + format!("{}-{}", years, months) + } else { + format!("{:+}-{}", years, months) + } +} + +/// get time part string +fn get_time_part( + hours: i64, + mins: i64, + secs: i64, + micros: i64, + is_time_part_positive: bool, + is_only_time: bool, +) -> String { + let mut interval = "".to_string(); + if is_time_part_positive && is_only_time { + interval.push_str(&format!("{}:{:02}:{:02}", hours, mins, secs)); + } else { + let minutes = mins.unsigned_abs(); + let seconds = secs.unsigned_abs(); + interval.push_str(&format!("{:+}:{:02}:{:02}", hours, minutes, seconds)); + } + if micros != 0 { + let microseconds = format!(".{:06}", micros.unsigned_abs()); + interval.push_str(&microseconds); + } + interval +} + +/// IntervalCompare is used to compare two intervals +/// It makes interval into nanoseconds style. +#[derive(PartialEq, Eq, Hash, PartialOrd, Ord)] +struct IntervalCompare(i128); + +impl From<Interval> for IntervalCompare { + fn from(interval: Interval) -> Self { + Self(interval.to_nanosecond()) + } +} + +impl Ord for Interval { + fn cmp(&self, other: &Self) -> Ordering { + IntervalCompare::from(*self).cmp(&IntervalCompare::from(*other)) + } +} + +impl PartialOrd for Interval { + fn partial_cmp(&self, other: &Self) -> Option<Ordering> { + Some(self.cmp(other)) + } +} + +impl Eq for Interval {} + +impl PartialEq for Interval { + fn eq(&self, other: &Self) -> bool { + self.cmp(other).is_eq() + } +} + +impl Hash for Interval { + fn hash<H: Hasher>(&self, state: &mut H) { + IntervalCompare::from(*self).hash(state) + } +} + +#[cfg(test)] +mod tests { + use std::collections::HashMap; + + use super::*; + + #[test] + fn test_from_year_month() { + let interval = Interval::from_year_month(1); + assert_eq!(interval.months, 1); + } + + #[test] + fn test_from_date_time() { + let interval = Interval::from_day_time(1, 2); + assert_eq!(interval.days, 1); + assert_eq!(interval.nsecs, 2_000_000); + } + + #[test] + fn test_interval_is_positive() { + let interval = Interval::from_year_month(1); + assert!(interval.is_positive()); + let interval = Interval::from_year_month(-1); + assert!(!interval.is_positive()); + + let interval = Interval::from_day_time(1, i32::MIN); + assert!(!interval.is_positive()); + } + + #[test] + fn test_to_nanosecond() { + let interval = Interval::from_year_month(1); + assert_eq!(interval.to_nanosecond(), 2592000000000000); + let interval = Interval::from_day_time(1, 2); + assert_eq!(interval.to_nanosecond(), 86400002000000); + + let max_interval = Interval::from_month_day_nano(i32::MAX, i32::MAX, i64::MAX); + assert_eq!(max_interval.to_nanosecond(), 5751829423496836854775807); + + let min_interval = Interval::from_month_day_nano(i32::MIN, i32::MIN, i64::MIN); + assert_eq!(min_interval.to_nanosecond(), -5751829426175236854775808); + } + + #[test] + fn test_interval_is_zero() { + let interval = Interval::from_month_day_nano(1, 1, 1); + assert!(!interval.is_zero()); + let interval = Interval::from_month_day_nano(0, 0, 0); + assert!(interval.is_zero()); + } + + #[test] + fn test_interval_i128_convert() { + let interval = Interval::from_month_day_nano(1, 1, 1); + let interval_i128 = interval.to_i128(); + assert_eq!(interval_i128, 79228162532711081667253501953); + } + + #[test] + fn test_convert_interval_format() { + let interval = Interval::from_month_day_nano(14, 160, 1000000); + let interval_format = IntervalFormat::from(interval); + assert_eq!(interval_format.years, 1); + assert_eq!(interval_format.months, 2); + assert_eq!(interval_format.days, 160); + assert_eq!(interval_format.hours, 0); + assert_eq!(interval_format.minutes, 0); + assert_eq!(interval_format.seconds, 0); + assert_eq!(interval_format.microseconds, 1000); + } + + #[test] + fn test_interval_hash() { + let interval = Interval::from_month_day_nano(1, 31, 1); + let interval2 = Interval::from_month_day_nano(2, 1, 1); + let mut map = HashMap::new(); + map.insert(interval, 1); + assert_eq!(map.get(&interval2), Some(&1)); + } + + #[test] + fn test_interval_mul_int() { + let interval = Interval::from_month_day_nano(1, 1, 1); + let interval2 = interval.checked_mul_int(2).unwrap(); + assert_eq!(interval2.months, 2); + assert_eq!(interval2.days, 2); + assert_eq!(interval2.nsecs, 2); + + // test justified interval + let interval = Interval::from_month_day_nano(1, 31, 1); + let interval2 = interval.checked_mul_int(2).unwrap(); + assert_eq!(interval2.months, 4); + assert_eq!(interval2.days, 2); + assert_eq!(interval2.nsecs, 2); + + // test overflow situation + let interval = Interval::from_month_day_nano(i32::MAX, 1, 1); + let interval2 = interval.checked_mul_int(2); + assert!(interval2.is_none()); + } + + #[test] + fn test_display() { + let interval = Interval::from_month_day_nano(1, 1, 1); + assert_eq!(interval.to_string(), "1 months 1 days 1 nsecs"); + + let interval = Interval::from_month_day_nano(14, 31, 10000000000); + assert_eq!(interval.to_string(), "14 months 31 days 10000000000 nsecs"); + } + + #[test] + fn test_interval_justified() { + let interval = Interval::from_month_day_nano(1, 131, 1).justified_interval(); + let interval2 = Interval::from_month_day_nano(5, 11, 1); + assert_eq!(interval, interval2); + + let interval = Interval::from_month_day_nano(1, 1, NANOS_PER_MONTH + 2 * NANOS_PER_DAY) + .justified_interval(); + let interval2 = Interval::from_month_day_nano(2, 3, 0); + assert_eq!(interval, interval2); + } + + #[test] + fn test_serde_json() { + let interval = Interval::from_month_day_nano(1, 1, 1); + let json = serde_json::to_string(&interval).unwrap(); + assert_eq!( + json, + "{\"months\":1,\"days\":1,\"nsecs\":1,\"unit\":\"MonthDayNano\"}" + ); + let interval2: Interval = serde_json::from_str(&json).unwrap(); + assert_eq!(interval, interval2); + } + + #[test] + fn test_to_iso8601_string() { + // Test interval zero + let interval = Interval::from_month_day_nano(0, 0, 0); + assert_eq!(interval.to_iso8601_string(), "PT0S"); + + let interval = Interval::from_month_day_nano(1, 1, 1); + assert_eq!(interval.to_iso8601_string(), "P0Y1M1DT0H0M0S"); + + let interval = Interval::from_month_day_nano(14, 31, 10000000000); + assert_eq!(interval.to_iso8601_string(), "P1Y2M31DT0H0M10S"); + + let interval = Interval::from_month_day_nano(14, 31, 23210200000000); + assert_eq!(interval.to_iso8601_string(), "P1Y2M31DT6H26M50.2S"); + } + + #[test] + fn test_to_postgres_string() { + // Test interval zero + let interval = Interval::from_month_day_nano(0, 0, 0); + assert_eq!(interval.to_postgres_string(), "00:00:00"); + + let interval = Interval::from_month_day_nano(23, 100, 23210200000000); + assert_eq!( + interval.to_postgres_string(), + "1 year 11 mons 100 days 06:26:50.200000" + ); + } + + #[test] + fn test_to_sql_standard_string() { + // Test zero interval + let interval = Interval::from_month_day_nano(0, 0, 0); + assert_eq!(interval.to_sql_standard_string(), "0"); + + let interval = Interval::from_month_day_nano(23, 100, 23210200000000); + assert_eq!( + interval.to_sql_standard_string(), + "+1-11 +100 +6:26:50.200000" + ); + + // Test interval without year, month, day + let interval = Interval::from_month_day_nano(0, 0, 23210200000000); + assert_eq!(interval.to_sql_standard_string(), "6:26:50.200000"); + } + + #[test] + fn test_from_arrow_interval_unit() { + let unit = ArrowIntervalUnit::YearMonth; + assert_eq!(IntervalUnit::from(unit), IntervalUnit::YearMonth); + + let unit = ArrowIntervalUnit::DayTime; + assert_eq!(IntervalUnit::from(unit), IntervalUnit::DayTime); + + let unit = ArrowIntervalUnit::MonthDayNano; + assert_eq!(IntervalUnit::from(unit), IntervalUnit::MonthDayNano); + } +} diff --git a/src/common/time/src/lib.rs b/src/common/time/src/lib.rs index 1328d08a23d3..9cd61cf6fb8b 100644 --- a/src/common/time/src/lib.rs +++ b/src/common/time/src/lib.rs @@ -15,6 +15,7 @@ pub mod date; pub mod datetime; pub mod error; +pub mod interval; pub mod range; pub mod time; pub mod timestamp; @@ -24,6 +25,7 @@ pub mod util; pub use date::Date; pub use datetime::DateTime; +pub use interval::Interval; pub use range::RangeMillis; pub use timestamp::Timestamp; pub use timestamp_millis::TimestampMillis; diff --git a/src/datatypes/src/data_type.rs b/src/datatypes/src/data_type.rs index 74c5450a9499..4d87c7641280 100644 --- a/src/datatypes/src/data_type.rs +++ b/src/datatypes/src/data_type.rs @@ -15,7 +15,10 @@ use std::fmt; use std::sync::Arc; -use arrow::datatypes::{DataType as ArrowDataType, TimeUnit as ArrowTimeUnit}; +use arrow::datatypes::{ + DataType as ArrowDataType, IntervalUnit as ArrowIntervalUnit, TimeUnit as ArrowTimeUnit, +}; +use common_time::interval::IntervalUnit; use common_time::timestamp::TimeUnit; use paste::paste; use serde::{Deserialize, Serialize}; @@ -24,7 +27,8 @@ use crate::error::{self, Error, Result}; use crate::type_id::LogicalTypeId; use crate::types::{ BinaryType, BooleanType, DateTimeType, DateType, DictionaryType, Float32Type, Float64Type, - Int16Type, Int32Type, Int64Type, Int8Type, ListType, NullType, StringType, TimeMillisecondType, + Int16Type, Int32Type, Int64Type, Int8Type, IntervalDayTimeType, IntervalMonthDayNanoType, + IntervalType, IntervalYearMonthType, ListType, NullType, StringType, TimeMillisecondType, TimeType, TimestampMicrosecondType, TimestampMillisecondType, TimestampNanosecondType, TimestampSecondType, TimestampType, UInt16Type, UInt32Type, UInt64Type, UInt8Type, }; @@ -59,6 +63,9 @@ pub enum ConcreteDataType { Timestamp(TimestampType), Time(TimeType), + // Interval types: + Interval(IntervalType), + // Compound types: List(ListType), Dictionary(DictionaryType), @@ -87,6 +94,7 @@ impl fmt::Display for ConcreteDataType { ConcreteDataType::Time(_) => write!(f, "Time"), ConcreteDataType::List(_) => write!(f, "List"), ConcreteDataType::Dictionary(_) => write!(f, "Dictionary"), + ConcreteDataType::Interval(_) => write!(f, "Interval"), } } } @@ -113,6 +121,7 @@ impl ConcreteDataType { | ConcreteDataType::DateTime(_) | ConcreteDataType::Timestamp(_) | ConcreteDataType::Time(_) + | ConcreteDataType::Interval(_) ) } @@ -127,6 +136,7 @@ impl ConcreteDataType { | ConcreteDataType::DateTime(_) | ConcreteDataType::Timestamp(_) | ConcreteDataType::Time(_) + | ConcreteDataType::Interval(_) ) } @@ -222,6 +232,7 @@ impl TryFrom<&ArrowDataType> for ConcreteDataType { ArrowDataType::Date32 => Self::date_datatype(), ArrowDataType::Date64 => Self::datetime_datatype(), ArrowDataType::Timestamp(u, _) => ConcreteDataType::from_arrow_time_unit(u), + ArrowDataType::Interval(u) => ConcreteDataType::from_arrow_interval_unit(u), ArrowDataType::Binary | ArrowDataType::LargeBinary => Self::binary_datatype(), ArrowDataType::Utf8 | ArrowDataType::LargeUtf8 => Self::string_datatype(), ArrowDataType::List(field) => Self::List(ListType::new( @@ -311,6 +322,20 @@ impl ConcreteDataType { Self::time_datatype(TimeUnit::Nanosecond) } + pub fn interval_month_day_nano_datatype() -> Self { + ConcreteDataType::Interval(IntervalType::MonthDayNano( + IntervalMonthDayNanoType::default(), + )) + } + + pub fn interval_year_month_datatype() -> Self { + ConcreteDataType::Interval(IntervalType::YearMonth(IntervalYearMonthType::default())) + } + + pub fn interval_day_time_datatype() -> Self { + ConcreteDataType::Interval(IntervalType::DayTime(IntervalDayTimeType::default())) + } + pub fn timestamp_datatype(unit: TimeUnit) -> Self { match unit { TimeUnit::Second => Self::timestamp_second_datatype(), @@ -330,6 +355,22 @@ impl ConcreteDataType { } } + pub fn interval_datatype(unit: IntervalUnit) -> Self { + match unit { + IntervalUnit::YearMonth => Self::interval_year_month_datatype(), + IntervalUnit::DayTime => Self::interval_day_time_datatype(), + IntervalUnit::MonthDayNano => Self::interval_month_day_nano_datatype(), + } + } + + pub fn from_arrow_interval_unit(u: &ArrowIntervalUnit) -> Self { + match u { + ArrowIntervalUnit::YearMonth => Self::interval_year_month_datatype(), + ArrowIntervalUnit::DayTime => Self::interval_day_time_datatype(), + ArrowIntervalUnit::MonthDayNano => Self::interval_month_day_nano_datatype(), + } + } + pub fn list_datatype(item_type: ConcreteDataType) -> ConcreteDataType { ConcreteDataType::List(ListType::new(item_type)) } @@ -545,6 +586,10 @@ mod tests { assert!(ConcreteDataType::time_millisecond_datatype().is_stringifiable()); assert!(ConcreteDataType::time_microsecond_datatype().is_stringifiable()); assert!(ConcreteDataType::time_nanosecond_datatype().is_stringifiable()); + + assert!(ConcreteDataType::interval_year_month_datatype().is_stringifiable()); + assert!(ConcreteDataType::interval_day_time_datatype().is_stringifiable()); + assert!(ConcreteDataType::interval_month_day_nano_datatype().is_stringifiable()); } #[test] @@ -563,6 +608,9 @@ mod tests { assert!(ConcreteDataType::time_millisecond_datatype().is_signed()); assert!(ConcreteDataType::time_microsecond_datatype().is_signed()); assert!(ConcreteDataType::time_nanosecond_datatype().is_signed()); + assert!(ConcreteDataType::interval_year_month_datatype().is_signed()); + assert!(ConcreteDataType::interval_day_time_datatype().is_signed()); + assert!(ConcreteDataType::interval_month_day_nano_datatype().is_signed()); assert!(!ConcreteDataType::uint8_datatype().is_signed()); assert!(!ConcreteDataType::uint16_datatype().is_signed()); @@ -589,6 +637,9 @@ mod tests { assert!(!ConcreteDataType::time_millisecond_datatype().is_unsigned()); assert!(!ConcreteDataType::time_microsecond_datatype().is_unsigned()); assert!(!ConcreteDataType::time_nanosecond_datatype().is_unsigned()); + assert!(!ConcreteDataType::interval_year_month_datatype().is_unsigned()); + assert!(!ConcreteDataType::interval_day_time_datatype().is_unsigned()); + assert!(!ConcreteDataType::interval_month_day_nano_datatype().is_unsigned()); assert!(ConcreteDataType::uint8_datatype().is_unsigned()); assert!(ConcreteDataType::uint16_datatype().is_unsigned()); @@ -691,5 +742,12 @@ mod tests { "Date" ); assert_eq!(ConcreteDataType::time_second_datatype().to_string(), "Time"); + assert_eq!( + ConcreteDataType::from_arrow_type(&ArrowDataType::Interval( + arrow_schema::IntervalUnit::MonthDayNano, + )) + .to_string(), + "Interval" + ) } } diff --git a/src/datatypes/src/interval.rs b/src/datatypes/src/interval.rs new file mode 100644 index 000000000000..1a1e2b1dbd2e --- /dev/null +++ b/src/datatypes/src/interval.rs @@ -0,0 +1,138 @@ +// Copyright 2023 Greptime Team +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use common_time::interval::Interval; +use paste::paste; +use serde::{Deserialize, Serialize}; + +use crate::prelude::{Scalar, Value, ValueRef}; +use crate::scalars::ScalarRef; +use crate::types::{ + IntervalDayTimeType, IntervalMonthDayNanoType, IntervalYearMonthType, WrapperType, +}; +use crate::vectors::{IntervalDayTimeVector, IntervalMonthDayNanoVector, IntervalYearMonthVector}; + +macro_rules! define_interval_with_unit { + ($unit: ident, $native_ty: ty) => { + paste! { + #[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)] + pub struct [<Interval $unit>](pub Interval); + + impl [<Interval $unit>] { + pub fn new(val: $native_ty) -> Self { + Self(Interval:: [<from_ $native_ty>](val)) + } + } + + impl Default for [<Interval $unit>] { + fn default() -> Self { + Self::new(0) + } + } + + impl From<[<Interval $unit>]> for Value { + fn from(t: [<Interval $unit>]) -> Value { + Value::Interval(t.0) + } + } + + impl From<[<Interval $unit>]> for serde_json::Value { + fn from(t: [<Interval $unit>]) -> Self { + t.0.into() + } + } + + impl From<[<Interval $unit>]> for ValueRef<'static> { + fn from(t: [<Interval $unit>]) -> Self { + ValueRef::Interval(t.0) + } + } + + impl Scalar for [<Interval $unit>] { + type VectorType = [<Interval $unit Vector>]; + type RefType<'a> = [<Interval $unit>]; + + fn as_scalar_ref(&self) -> Self::RefType<'_> { + *self + } + + fn upcast_gat<'short, 'long: 'short>( + long: Self::RefType<'long>, + ) -> Self::RefType<'short> { + long + } + } + + impl<'a> ScalarRef<'a> for [<Interval $unit>] { + type ScalarType = [<Interval $unit>]; + + fn to_owned_scalar(&self) -> Self::ScalarType { + *self + } + } + + impl WrapperType for [<Interval $unit>] { + type LogicalType = [<Interval $unit Type>]; + type Native = $native_ty; + + fn from_native(value: Self::Native) -> Self { + Self::new(value) + } + + fn into_native(self) -> Self::Native { + self.0.[<to_ $native_ty>]() + } + } + + impl From<$native_ty> for [<Interval $unit>] { + fn from(val: $native_ty) -> Self { + [<Interval $unit>]::from_native(val as $native_ty) + } + } + + impl From<[<Interval $unit>]> for $native_ty { + fn from(val: [<Interval $unit>]) -> Self { + val.0.[<to_ $native_ty>]() + } + } + } + }; +} + +define_interval_with_unit!(YearMonth, i32); +define_interval_with_unit!(DayTime, i64); +define_interval_with_unit!(MonthDayNano, i128); + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_interval_scalar() { + let interval = IntervalYearMonth::new(1000); + assert_eq!(interval, interval.as_scalar_ref()); + assert_eq!(interval, interval.to_owned_scalar()); + assert_eq!(1000, interval.into_native()); + + let interval = IntervalDayTime::new(1000); + assert_eq!(interval, interval.as_scalar_ref()); + assert_eq!(interval, interval.to_owned_scalar()); + assert_eq!(1000, interval.into_native()); + + let interval = IntervalMonthDayNano::new(1000); + assert_eq!(interval, interval.as_scalar_ref()); + assert_eq!(interval, interval.to_owned_scalar()); + assert_eq!(1000, interval.into_native()); + } +} diff --git a/src/datatypes/src/lib.rs b/src/datatypes/src/lib.rs index 2ed804deeec8..3d8a7835c4b8 100644 --- a/src/datatypes/src/lib.rs +++ b/src/datatypes/src/lib.rs @@ -17,6 +17,7 @@ pub mod arrow_array; pub mod data_type; pub mod error; +pub mod interval; pub mod macros; pub mod prelude; pub mod scalars; diff --git a/src/datatypes/src/type_id.rs b/src/datatypes/src/type_id.rs index 602d4cba8b8a..8a4a02589da2 100644 --- a/src/datatypes/src/type_id.rs +++ b/src/datatypes/src/type_id.rs @@ -51,6 +51,12 @@ pub enum LogicalTypeId { TimeMillisecond, TimeMicrosecond, TimeNanosecond, + /// A 32-bit interval representing the elapsed time in months. + IntervalYearMonth, + /// A 64-bit interval representing the elapsed time in days and milliseconds. + IntervalDayTime, + /// A 128-bit interval representing the elapsed time in months, days and nanoseconds. + IntervalMonthDayNano, List, Dictionary, @@ -102,6 +108,11 @@ impl LogicalTypeId { LogicalTypeId::TimeMillisecond => ConcreteDataType::time_millisecond_datatype(), LogicalTypeId::TimeMicrosecond => ConcreteDataType::time_microsecond_datatype(), LogicalTypeId::TimeNanosecond => ConcreteDataType::time_nanosecond_datatype(), + LogicalTypeId::IntervalYearMonth => ConcreteDataType::interval_year_month_datatype(), + LogicalTypeId::IntervalDayTime => ConcreteDataType::interval_day_time_datatype(), + LogicalTypeId::IntervalMonthDayNano => { + ConcreteDataType::interval_month_day_nano_datatype() + } } } } diff --git a/src/datatypes/src/types.rs b/src/datatypes/src/types.rs index 41870c35f789..d01822a3b9e9 100644 --- a/src/datatypes/src/types.rs +++ b/src/datatypes/src/types.rs @@ -17,6 +17,7 @@ mod boolean_type; mod date_type; mod datetime_type; mod dictionary_type; +mod interval_type; mod list_type; mod null_type; mod primitive_type; @@ -29,6 +30,9 @@ pub use boolean_type::BooleanType; pub use date_type::DateType; pub use datetime_type::DateTimeType; pub use dictionary_type::DictionaryType; +pub use interval_type::{ + IntervalDayTimeType, IntervalMonthDayNanoType, IntervalType, IntervalYearMonthType, +}; pub use list_type::ListType; pub use null_type::NullType; pub use primitive_type::{ diff --git a/src/datatypes/src/types/interval_type.rs b/src/datatypes/src/types/interval_type.rs new file mode 100644 index 000000000000..d9f105673b95 --- /dev/null +++ b/src/datatypes/src/types/interval_type.rs @@ -0,0 +1,176 @@ +// Copyright 2023 Greptime Team +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use arrow::datatypes::{ + DataType as ArrowDataType, IntervalDayTimeType as ArrowIntervalDayTimeType, + IntervalMonthDayNanoType as ArrowIntervalMonthDayNanoType, IntervalUnit as ArrowIntervalUnit, + IntervalYearMonthType as ArrowIntervalYearMonthType, +}; +use common_time::interval::IntervalUnit; +use common_time::Interval; +use enum_dispatch::enum_dispatch; +use paste::paste; +use serde::{Deserialize, Serialize}; +use snafu::OptionExt; + +use crate::data_type::ConcreteDataType; +use crate::error; +use crate::interval::{IntervalDayTime, IntervalMonthDayNano, IntervalYearMonth}; +use crate::prelude::{ + DataType, LogicalTypeId, MutableVector, ScalarVectorBuilder, Value, ValueRef, Vector, +}; +use crate::types::LogicalPrimitiveType; +use crate::vectors::{ + IntervalDayTimeVector, IntervalDayTimeVectorBuilder, IntervalMonthDayNanoVector, + IntervalMonthDayNanoVectorBuilder, IntervalYearMonthVector, IntervalYearMonthVectorBuilder, + PrimitiveVector, +}; + +/// The "calendar" interval is a type of time interval that does not +/// have a precise duration without taking into account a specific +/// base timestamp. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)] +#[enum_dispatch(DataType)] +pub enum IntervalType { + YearMonth(IntervalYearMonthType), + DayTime(IntervalDayTimeType), + MonthDayNano(IntervalMonthDayNanoType), +} + +impl IntervalType { + /// Returns the unit of the interval. + pub fn unit(&self) -> IntervalUnit { + match self { + IntervalType::YearMonth(_) => IntervalUnit::YearMonth, + IntervalType::DayTime(_) => IntervalUnit::DayTime, + IntervalType::MonthDayNano(_) => IntervalUnit::MonthDayNano, + } + } +} + +macro_rules! impl_data_type_for_interval { + ($unit: ident, $type: ty) => { + paste! { + #[derive(Debug, Default, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)] + pub struct [<Interval $unit Type>]; + + impl DataType for [<Interval $unit Type>] { + fn name(&self) -> &str { + stringify!([<Interval $unit>]) + } + + fn logical_type_id(&self) -> LogicalTypeId { + LogicalTypeId::[<Interval $unit>] + } + + fn default_value(&self) -> Value { + Value::Interval(Interval::from_i128(0)) + } + + fn as_arrow_type(&self) -> ArrowDataType { + ArrowDataType::Interval(ArrowIntervalUnit::$unit) + } + + fn create_mutable_vector(&self, capacity: usize) -> Box<dyn MutableVector> { + Box::new([<Interval $unit Vector Builder>]::with_capacity(capacity)) + } + + fn is_timestamp_compatible(&self) -> bool { + false + } + } + + impl LogicalPrimitiveType for [<Interval $unit Type>] { + type ArrowPrimitive = [<Arrow Interval $unit Type>]; + type Native = $type; + type Wrapper = [<Interval $unit>]; + type LargestType = Self; + + fn build_data_type() -> ConcreteDataType { + ConcreteDataType::Interval(IntervalType::$unit( + [<Interval $unit Type>]::default(), + )) + } + + fn type_name() -> &'static str { + stringify!([<Interval $unit Type>]) + } + + fn cast_vector(vector: &dyn Vector) -> crate::Result<&PrimitiveVector<Self>> { + vector + .as_any() + .downcast_ref::<[<Interval $unit Vector>]>() + .with_context(|| error::CastTypeSnafu { + msg: format!( + "Failed to cast {} to {}", + vector.vector_type_name(), stringify!([<Interval $unit Vector>]) + ), + }) + } + + fn cast_value_ref(value: ValueRef) -> crate::Result<Option<Self::Wrapper>> { + match value { + ValueRef::Null => Ok(None), + ValueRef::Interval(t) => Ok(Some([<Interval $unit>](t))), + other => error::CastTypeSnafu { + msg: format!("Failed to cast value {:?} to {}", other, stringify!([<Interval $unit>])), + } + .fail(), + } + } + } + } + } +} + +impl_data_type_for_interval!(YearMonth, i32); +impl_data_type_for_interval!(DayTime, i64); +impl_data_type_for_interval!(MonthDayNano, i128); + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_interval_type_unit() { + assert_eq!( + IntervalUnit::DayTime, + IntervalType::DayTime(IntervalDayTimeType).unit() + ); + assert_eq!( + IntervalUnit::MonthDayNano, + IntervalType::MonthDayNano(IntervalMonthDayNanoType).unit() + ); + assert_eq!( + IntervalUnit::YearMonth, + IntervalType::YearMonth(IntervalYearMonthType).unit() + ); + } + + #[test] + fn test_interval_as_arrow_type() { + assert_eq!( + ArrowDataType::Interval(ArrowIntervalUnit::DayTime), + IntervalType::DayTime(IntervalDayTimeType).as_arrow_type() + ); + assert_eq!( + ArrowDataType::Interval(ArrowIntervalUnit::MonthDayNano), + IntervalType::MonthDayNano(IntervalMonthDayNanoType).as_arrow_type() + ); + assert_eq!( + ArrowDataType::Interval(ArrowIntervalUnit::YearMonth), + IntervalType::YearMonth(IntervalYearMonthType).as_arrow_type() + ); + } +} diff --git a/src/datatypes/src/types/primitive_type.rs b/src/datatypes/src/types/primitive_type.rs index 4a9df85b3099..d8526d8a1321 100644 --- a/src/datatypes/src/types/primitive_type.rs +++ b/src/datatypes/src/types/primitive_type.rs @@ -46,6 +46,7 @@ impl_native_type!(i8); impl_native_type!(i16); impl_native_type!(i32); impl_native_type!(i64); +impl_native_type!(i128); impl_native_type!(f32); impl_native_type!(f64); diff --git a/src/datatypes/src/value.rs b/src/datatypes/src/value.rs index 483f4ec9de69..097cb4d10380 100644 --- a/src/datatypes/src/value.rs +++ b/src/datatypes/src/value.rs @@ -22,8 +22,10 @@ use common_base::bytes::{Bytes, StringBytes}; use common_telemetry::logging; use common_time::date::Date; use common_time::datetime::DateTime; +use common_time::interval::IntervalUnit; use common_time::time::Time; use common_time::timestamp::{TimeUnit, Timestamp}; +use common_time::Interval; use datafusion_common::ScalarValue; pub use ordered_float::OrderedFloat; use serde::{Deserialize, Serialize}; @@ -33,7 +35,7 @@ use crate::error; use crate::error::Result; use crate::prelude::*; use crate::type_id::LogicalTypeId; -use crate::types::ListType; +use crate::types::{IntervalType, ListType}; use crate::vectors::ListVector; pub type OrderedF32 = OrderedFloat<f32>; @@ -68,6 +70,7 @@ pub enum Value { DateTime(DateTime), Timestamp(Timestamp), Time(Time), + Interval(Interval), List(ListValue), } @@ -100,6 +103,7 @@ impl Display for Value { Value::DateTime(v) => write!(f, "{v}"), Value::Timestamp(v) => write!(f, "{}", v.to_iso8601_string()), Value::Time(t) => write!(f, "{}", t.to_iso8601_string()), + Value::Interval(v) => write!(f, "{}", v.to_iso8601_string()), Value::List(v) => { let default = Box::<Vec<Value>>::default(); let items = v.items().as_ref().unwrap_or(&default); @@ -139,6 +143,7 @@ impl Value { Value::DateTime(_) => ConcreteDataType::datetime_datatype(), Value::Time(t) => ConcreteDataType::time_datatype(*t.unit()), Value::Timestamp(v) => ConcreteDataType::timestamp_datatype(v.unit()), + Value::Interval(v) => ConcreteDataType::interval_datatype(v.unit()), Value::List(list) => ConcreteDataType::list_datatype(list.datatype().clone()), } } @@ -182,6 +187,7 @@ impl Value { Value::List(v) => ValueRef::List(ListValueRef::Ref { val: v }), Value::Timestamp(v) => ValueRef::Timestamp(*v), Value::Time(v) => ValueRef::Time(*v), + Value::Interval(v) => ValueRef::Interval(*v), } } @@ -235,6 +241,11 @@ impl Value { TimeUnit::Microsecond => LogicalTypeId::TimeMicrosecond, TimeUnit::Nanosecond => LogicalTypeId::TimeNanosecond, }, + Value::Interval(v) => match v.unit() { + IntervalUnit::YearMonth => LogicalTypeId::IntervalYearMonth, + IntervalUnit::DayTime => LogicalTypeId::IntervalDayTime, + IntervalUnit::MonthDayNano => LogicalTypeId::IntervalMonthDayNano, + }, } } @@ -276,6 +287,11 @@ impl Value { } Value::Timestamp(t) => timestamp_to_scalar_value(t.unit(), Some(t.value())), Value::Time(t) => time_to_scalar_value(*t.unit(), Some(t.value()))?, + Value::Interval(v) => match v.unit() { + IntervalUnit::YearMonth => ScalarValue::IntervalYearMonth(Some(v.to_i32())), + IntervalUnit::DayTime => ScalarValue::IntervalDayTime(Some(v.to_i64())), + IntervalUnit::MonthDayNano => ScalarValue::IntervalMonthDayNano(Some(v.to_i128())), + }, }; Ok(scalar_value) @@ -301,6 +317,11 @@ pub fn to_null_scalar_value(output_type: &ConcreteDataType) -> Result<ScalarValu ConcreteDataType::Date(_) => ScalarValue::Date32(None), ConcreteDataType::DateTime(_) => ScalarValue::Date64(None), ConcreteDataType::Timestamp(t) => timestamp_to_scalar_value(t.unit(), None), + ConcreteDataType::Interval(v) => match v { + IntervalType::YearMonth(_) => ScalarValue::IntervalYearMonth(None), + IntervalType::DayTime(_) => ScalarValue::IntervalDayTime(None), + IntervalType::MonthDayNano(_) => ScalarValue::IntervalMonthDayNano(None), + }, ConcreteDataType::List(_) => { ScalarValue::List(None, Arc::new(new_item_field(output_type.as_arrow_type()))) } @@ -361,6 +382,16 @@ pub fn scalar_value_to_timestamp(scalar: &ScalarValue) -> Option<Timestamp> { } } +/// Convert [ScalarValue] to [Interval]. +pub fn scalar_value_to_interval(scalar: &ScalarValue) -> Option<Interval> { + match scalar { + ScalarValue::IntervalYearMonth(v) => v.map(Interval::from_i32), + ScalarValue::IntervalDayTime(v) => v.map(Interval::from_i64), + ScalarValue::IntervalMonthDayNano(v) => v.map(Interval::from_i128), + _ => None, + } +} + macro_rules! impl_ord_for_value_like { ($Type: ident, $left: ident, $right: ident) => { if $left.is_null() && !$right.is_null() { @@ -387,6 +418,7 @@ macro_rules! impl_ord_for_value_like { ($Type::DateTime(v1), $Type::DateTime(v2)) => v1.cmp(v2), ($Type::Timestamp(v1), $Type::Timestamp(v2)) => v1.cmp(v2), ($Type::Time(v1), $Type::Time(v2)) => v1.cmp(v2), + ($Type::Interval(v1), $Type::Interval(v2)) => v1.cmp(v2), ($Type::List(v1), $Type::List(v2)) => v1.cmp(v2), _ => panic!( "Cannot compare different values {:?} and {:?}", @@ -444,6 +476,7 @@ impl_value_from!(Binary, Bytes); impl_value_from!(Date, Date); impl_value_from!(DateTime, DateTime); impl_value_from!(Timestamp, Timestamp); +impl_value_from!(Interval, Interval); impl From<String> for Value { fn from(string: String) -> Value { @@ -493,6 +526,7 @@ impl TryFrom<Value> for serde_json::Value { Value::List(v) => serde_json::to_value(v)?, Value::Timestamp(v) => serde_json::to_value(v.value())?, Value::Time(v) => serde_json::to_value(v.value())?, + Value::Interval(v) => serde_json::to_value(v.to_i128())?, }; Ok(json_value) @@ -643,10 +677,16 @@ impl TryFrom<ScalarValue> for Value { .map(|x| Value::Time(Time::new(x, TimeUnit::Nanosecond))) .unwrap_or(Value::Null), + ScalarValue::IntervalYearMonth(t) => t + .map(|x| Value::Interval(Interval::from_i32(x))) + .unwrap_or(Value::Null), + ScalarValue::IntervalDayTime(t) => t + .map(|x| Value::Interval(Interval::from_i64(x))) + .unwrap_or(Value::Null), + ScalarValue::IntervalMonthDayNano(t) => t + .map(|x| Value::Interval(Interval::from_i128(x))) + .unwrap_or(Value::Null), ScalarValue::Decimal128(_, _, _) - | ScalarValue::IntervalYearMonth(_) - | ScalarValue::IntervalDayTime(_) - | ScalarValue::IntervalMonthDayNano(_) | ScalarValue::Struct(_, _) | ScalarValue::Dictionary(_, _) => { return error::UnsupportedArrowTypeSnafu { @@ -686,6 +726,7 @@ pub enum ValueRef<'a> { DateTime(DateTime), Timestamp(Timestamp), Time(Time), + Interval(Interval), // Compound types: List(ListValueRef<'a>), @@ -749,6 +790,11 @@ impl<'a> ValueRef<'a> { impl_as_for_value_ref!(self, Time) } + /// Cast itself to [Interval]. + pub fn as_interval(&self) -> Result<Option<Interval>> { + impl_as_for_value_ref!(self, Interval) + } + /// Cast itself to [ListValueRef]. pub fn as_list(&self) -> Result<Option<ListValueRef>> { impl_as_for_value_ref!(self, List) @@ -801,6 +847,7 @@ impl_value_ref_from!(Date, Date); impl_value_ref_from!(DateTime, DateTime); impl_value_ref_from!(Timestamp, Timestamp); impl_value_ref_from!(Time, Time); +impl_value_ref_from!(Interval, Interval); impl<'a> From<&'a str> for ValueRef<'a> { fn from(string: &'a str) -> ValueRef<'a> { @@ -1063,6 +1110,18 @@ mod tests { .try_into() .unwrap() ); + assert_eq!( + Value::Null, + ScalarValue::IntervalMonthDayNano(None).try_into().unwrap() + ); + assert_eq!( + Value::Interval(Interval::from_month_day_nano(1, 1, 1)), + ScalarValue::IntervalMonthDayNano(Some( + Interval::from_month_day_nano(1, 1, 1).to_i128() + )) + .try_into() + .unwrap() + ); assert_eq!( Value::Time(Time::new(1, TimeUnit::Second)), @@ -1248,6 +1307,10 @@ mod tests { &ConcreteDataType::time_nanosecond_datatype(), &Value::Time(Time::new_nanosecond(1)), ); + check_type_and_value( + &ConcreteDataType::interval_month_day_nano_datatype(), + &Value::Interval(Interval::from_month_day_nano(1, 2, 3)), + ); } #[test] @@ -1405,6 +1468,7 @@ mod tests { check_as_value_ref!(Float64, OrderedF64::from(16.0)); check_as_value_ref!(Timestamp, Timestamp::new_millisecond(1)); check_as_value_ref!(Time, Time::new_millisecond(1)); + check_as_value_ref!(Interval, Interval::from_month_day_nano(1, 2, 3)); assert_eq!( ValueRef::String("hello"), diff --git a/src/datatypes/src/vectors.rs b/src/datatypes/src/vectors.rs index 5c873a070a35..0e4c40fafdf0 100644 --- a/src/datatypes/src/vectors.rs +++ b/src/datatypes/src/vectors.rs @@ -32,6 +32,7 @@ mod date; mod datetime; mod eq; mod helper; +mod interval; mod list; mod null; pub(crate) mod operations; @@ -47,6 +48,10 @@ pub use constant::ConstantVector; pub use date::{DateVector, DateVectorBuilder}; pub use datetime::{DateTimeVector, DateTimeVectorBuilder}; pub use helper::Helper; +pub use interval::{ + IntervalDayTimeVector, IntervalDayTimeVectorBuilder, IntervalMonthDayNanoVector, + IntervalMonthDayNanoVectorBuilder, IntervalYearMonthVector, IntervalYearMonthVectorBuilder, +}; pub use list::{ListIter, ListVector, ListVectorBuilder}; pub use null::{NullVector, NullVectorBuilder}; pub use primitive::{ diff --git a/src/datatypes/src/vectors/eq.rs b/src/datatypes/src/vectors/eq.rs index 1f9c2527ded2..4adb0df34df5 100644 --- a/src/datatypes/src/vectors/eq.rs +++ b/src/datatypes/src/vectors/eq.rs @@ -14,14 +14,17 @@ use std::sync::Arc; +use common_time::interval::IntervalUnit; + use crate::data_type::DataType; use crate::types::{TimeType, TimestampType}; use crate::vectors::constant::ConstantVector; use crate::vectors::{ - BinaryVector, BooleanVector, DateTimeVector, DateVector, ListVector, PrimitiveVector, - StringVector, TimeMicrosecondVector, TimeMillisecondVector, TimeNanosecondVector, - TimeSecondVector, TimestampMicrosecondVector, TimestampMillisecondVector, - TimestampNanosecondVector, TimestampSecondVector, Vector, + BinaryVector, BooleanVector, DateTimeVector, DateVector, IntervalDayTimeVector, + IntervalMonthDayNanoVector, IntervalYearMonthVector, ListVector, PrimitiveVector, StringVector, + TimeMicrosecondVector, TimeMillisecondVector, TimeNanosecondVector, TimeSecondVector, + TimestampMicrosecondVector, TimestampMillisecondVector, TimestampNanosecondVector, + TimestampSecondVector, Vector, }; use crate::with_match_primitive_type_id; @@ -94,6 +97,17 @@ fn equal(lhs: &dyn Vector, rhs: &dyn Vector) -> bool { is_vector_eq!(TimestampNanosecondVector, lhs, rhs) } }, + Interval(v) => match v.unit() { + IntervalUnit::YearMonth => { + is_vector_eq!(IntervalYearMonthVector, lhs, rhs) + } + IntervalUnit::DayTime => { + is_vector_eq!(IntervalDayTimeVector, lhs, rhs) + } + IntervalUnit::MonthDayNano => { + is_vector_eq!(IntervalMonthDayNanoVector, lhs, rhs) + } + }, List(_) => is_vector_eq!(ListVector, lhs, rhs), UInt8(_) | UInt16(_) | UInt32(_) | UInt64(_) | Int8(_) | Int16(_) | Int32(_) | Int64(_) | Float32(_) | Float64(_) | Dictionary(_) => { @@ -198,6 +212,16 @@ mod tests { assert_vector_ref_eq(Arc::new(TimeMillisecondVector::from_values([100, 120]))); assert_vector_ref_eq(Arc::new(TimeMicrosecondVector::from_values([100, 120]))); assert_vector_ref_eq(Arc::new(TimeNanosecondVector::from_values([100, 120]))); + + assert_vector_ref_eq(Arc::new(IntervalYearMonthVector::from_values([ + 1000, 2000, 3000, 4000, + ]))); + assert_vector_ref_eq(Arc::new(IntervalDayTimeVector::from_values([ + 1000, 2000, 3000, 4000, + ]))); + assert_vector_ref_eq(Arc::new(IntervalMonthDayNanoVector::from_values([ + 1000, 2000, 3000, 4000, + ]))); } #[test] @@ -250,5 +274,18 @@ mod tests { Arc::new(TimeMicrosecondVector::from_values([100, 120])), Arc::new(TimeMicrosecondVector::from_values([200, 220])), ); + + assert_vector_ref_ne( + Arc::new(IntervalDayTimeVector::from_values([1000, 2000])), + Arc::new(IntervalDayTimeVector::from_values([2100, 1200])), + ); + assert_vector_ref_ne( + Arc::new(IntervalMonthDayNanoVector::from_values([1000, 2000])), + Arc::new(IntervalMonthDayNanoVector::from_values([2100, 1200])), + ); + assert_vector_ref_ne( + Arc::new(IntervalYearMonthVector::from_values([1000, 2000])), + Arc::new(IntervalYearMonthVector::from_values([2100, 1200])), + ); } } diff --git a/src/datatypes/src/vectors/helper.rs b/src/datatypes/src/vectors/helper.rs index 8a7c45cf28e1..44a9a2cc7d5a 100644 --- a/src/datatypes/src/vectors/helper.rs +++ b/src/datatypes/src/vectors/helper.rs @@ -21,17 +21,19 @@ use arrow::array::{Array, ArrayRef, StringArray}; use arrow::compute; use arrow::compute::kernels::comparison; use arrow::datatypes::{DataType as ArrowDataType, TimeUnit}; +use arrow_schema::IntervalUnit; use datafusion_common::ScalarValue; use snafu::{OptionExt, ResultExt}; +use super::{IntervalDayTimeVector, IntervalYearMonthVector}; use crate::data_type::ConcreteDataType; use crate::error::{self, Result}; use crate::scalars::{Scalar, ScalarVectorBuilder}; use crate::value::{ListValue, ListValueRef}; use crate::vectors::{ BinaryVector, BooleanVector, ConstantVector, DateTimeVector, DateVector, Float32Vector, - Float64Vector, Int16Vector, Int32Vector, Int64Vector, Int8Vector, ListVector, - ListVectorBuilder, MutableVector, NullVector, StringVector, TimeMicrosecondVector, + Float64Vector, Int16Vector, Int32Vector, Int64Vector, Int8Vector, IntervalMonthDayNanoVector, + ListVector, ListVectorBuilder, MutableVector, NullVector, StringVector, TimeMicrosecondVector, TimeMillisecondVector, TimeNanosecondVector, TimeSecondVector, TimestampMicrosecondVector, TimestampMillisecondVector, TimestampNanosecondVector, TimestampSecondVector, UInt16Vector, UInt32Vector, UInt64Vector, UInt8Vector, Vector, VectorRef, @@ -207,10 +209,16 @@ impl Helper { ScalarValue::Time64Nanosecond(v) => { ConstantVector::new(Arc::new(TimeNanosecondVector::from(vec![v])), length) } + ScalarValue::IntervalYearMonth(v) => { + ConstantVector::new(Arc::new(IntervalYearMonthVector::from(vec![v])), length) + } + ScalarValue::IntervalDayTime(v) => { + ConstantVector::new(Arc::new(IntervalDayTimeVector::from(vec![v])), length) + } + ScalarValue::IntervalMonthDayNano(v) => { + ConstantVector::new(Arc::new(IntervalMonthDayNanoVector::from(vec![v])), length) + } ScalarValue::Decimal128(_, _, _) - | ScalarValue::IntervalYearMonth(_) - | ScalarValue::IntervalDayTime(_) - | ScalarValue::IntervalMonthDayNano(_) | ScalarValue::Struct(_, _) | ScalarValue::Dictionary(_, _) => { return error::ConversionSnafu { @@ -286,9 +294,19 @@ impl Helper { } _ => unimplemented!("Arrow array datatype: {:?}", array.as_ref().data_type()), }, + ArrowDataType::Interval(unit) => match unit { + IntervalUnit::YearMonth => Arc::new( + IntervalYearMonthVector::try_from_arrow_interval_array(array)?, + ), + IntervalUnit::DayTime => { + Arc::new(IntervalDayTimeVector::try_from_arrow_interval_array(array)?) + } + IntervalUnit::MonthDayNano => Arc::new( + IntervalMonthDayNanoVector::try_from_arrow_interval_array(array)?, + ), + }, ArrowDataType::Float16 | ArrowDataType::Duration(_) - | ArrowDataType::Interval(_) | ArrowDataType::LargeList(_) | ArrowDataType::FixedSizeList(_, _) | ArrowDataType::Struct(_) @@ -330,7 +348,7 @@ mod tests { }; use arrow::datatypes::{Field, Int32Type}; use common_time::time::Time; - use common_time::{Date, DateTime}; + use common_time::{Date, DateTime, Interval}; use super::*; use crate::value::Value; @@ -478,4 +496,20 @@ mod tests { assert_eq!(Value::Time(Time::new_second(42)), vector.get(i)); } } + + #[test] + fn test_try_from_scalar_interval_value() { + let vector = + Helper::try_from_scalar_value(ScalarValue::IntervalMonthDayNano(Some(2000)), 3) + .unwrap(); + + assert_eq!( + ConcreteDataType::interval_month_day_nano_datatype(), + vector.data_type() + ); + assert_eq!(3, vector.len()); + for i in 0..vector.len() { + assert_eq!(Value::Interval(Interval::from_i128(2000)), vector.get(i)); + } + } } diff --git a/src/datatypes/src/vectors/interval.rs b/src/datatypes/src/vectors/interval.rs new file mode 100644 index 000000000000..cf619cfc9219 --- /dev/null +++ b/src/datatypes/src/vectors/interval.rs @@ -0,0 +1,25 @@ +// Copyright 2023 Greptime Team +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::{PrimitiveVector, PrimitiveVectorBuilder}; +use crate::types::{IntervalDayTimeType, IntervalMonthDayNanoType, IntervalYearMonthType}; + +pub type IntervalYearMonthVector = PrimitiveVector<IntervalYearMonthType>; +pub type IntervalYearMonthVectorBuilder = PrimitiveVectorBuilder<IntervalYearMonthType>; + +pub type IntervalMonthDayNanoVector = PrimitiveVector<IntervalMonthDayNanoType>; +pub type IntervalMonthDayNanoVectorBuilder = PrimitiveVectorBuilder<IntervalMonthDayNanoType>; + +pub type IntervalDayTimeVector = PrimitiveVector<IntervalDayTimeType>; +pub type IntervalDayTimeVectorBuilder = PrimitiveVectorBuilder<IntervalDayTimeType>; diff --git a/src/datatypes/src/vectors/primitive.rs b/src/datatypes/src/vectors/primitive.rs index 3b61a8da0930..506a6bf9976b 100644 --- a/src/datatypes/src/vectors/primitive.rs +++ b/src/datatypes/src/vectors/primitive.rs @@ -23,6 +23,7 @@ use arrow::array::{ TimestampMicrosecondArray, TimestampMillisecondArray, TimestampNanosecondArray, TimestampSecondArray, }; +use arrow_array::{IntervalDayTimeArray, IntervalMonthDayNanoArray, IntervalYearMonthArray}; use arrow_schema::DataType; use serde_json::Value as JsonValue; use snafu::OptionExt; @@ -159,6 +160,40 @@ impl<T: LogicalPrimitiveType> PrimitiveVector<T> { Ok(Self::new(concrete_array)) } + pub fn try_from_arrow_interval_array(array: impl AsRef<dyn Array>) -> Result<Self> { + let array = array.as_ref(); + let array_data = match array.data_type() { + DataType::Interval(unit) => match unit { + arrow_schema::IntervalUnit::YearMonth => array + .as_any() + .downcast_ref::<IntervalYearMonthArray>() + .unwrap() + .to_data(), + arrow_schema::IntervalUnit::DayTime => array + .as_any() + .downcast_ref::<IntervalDayTimeArray>() + .unwrap() + .to_data(), + arrow_schema::IntervalUnit::MonthDayNano => array + .as_any() + .downcast_ref::<IntervalMonthDayNanoArray>() + .unwrap() + .to_data(), + }, + arrow_type => { + return CastTypeSnafu { + msg: format!( + "Failed to cast arrow array {:?} to interval vector", + arrow_type, + ), + } + .fail()?; + } + }; + let concrete_array = PrimitiveArray::<T::ArrowPrimitive>::from(array_data); + Ok(Self::new(concrete_array)) + } + pub fn from_slice<P: AsRef<[T::Native]>>(slice: P) -> Self { let iter = slice.as_ref().iter().copied(); Self { @@ -495,6 +530,7 @@ mod tests { Time64NanosecondArray, }; use arrow::datatypes::DataType as ArrowDataType; + use arrow_array::{IntervalDayTimeArray, IntervalYearMonthArray}; use serde_json; use super::*; @@ -502,9 +538,9 @@ mod tests { use crate::serialize::Serializable; use crate::types::Int64Type; use crate::vectors::{ - TimeMicrosecondVector, TimeMillisecondVector, TimeNanosecondVector, TimeSecondVector, - TimestampMicrosecondVector, TimestampMillisecondVector, TimestampNanosecondVector, - TimestampSecondVector, + IntervalDayTimeVector, IntervalYearMonthVector, TimeMicrosecondVector, + TimeMillisecondVector, TimeNanosecondVector, TimeSecondVector, TimestampMicrosecondVector, + TimestampMillisecondVector, TimestampNanosecondVector, TimestampSecondVector, }; fn check_vec(v: Int32Vector) { @@ -711,4 +747,28 @@ mod tests { let array: ArrayRef = Arc::new(Int32Array::from(vec![1i32, 2, 3])); assert!(TimestampSecondVector::try_from_arrow_timestamp_array(array).is_err()); } + + #[test] + fn test_try_from_arrow_interval_array() { + let array: ArrayRef = Arc::new(IntervalYearMonthArray::from(vec![1000, 2000, 3000])); + let vector = IntervalYearMonthVector::try_from_arrow_interval_array(array).unwrap(); + assert_eq!( + IntervalYearMonthVector::from_values(vec![1000, 2000, 3000]), + vector + ); + + let array: ArrayRef = Arc::new(IntervalDayTimeArray::from(vec![1000, 2000, 3000])); + let vector = IntervalDayTimeVector::try_from_arrow_interval_array(array).unwrap(); + assert_eq!( + IntervalDayTimeVector::from_values(vec![1000, 2000, 3000]), + vector + ); + + let array: ArrayRef = Arc::new(IntervalYearMonthArray::from(vec![1000, 2000, 3000])); + let vector = IntervalYearMonthVector::try_from_arrow_interval_array(array).unwrap(); + assert_eq!( + IntervalYearMonthVector::from_values(vec![1000, 2000, 3000]), + vector + ); + } } diff --git a/src/servers/src/mysql/writer.rs b/src/servers/src/mysql/writer.rs index abacc17fb052..83a802dcd3e4 100644 --- a/src/servers/src/mysql/writer.rs +++ b/src/servers/src/mysql/writer.rs @@ -182,6 +182,7 @@ impl<'a, W: AsyncWrite + Unpin> MysqlResultWriter<'a, W> { Value::DateTime(v) => row_writer.write_col(v.to_chrono_datetime())?, Value::Timestamp(v) => row_writer .write_col(v.to_timezone_aware_string(query_context.time_zone()))?, + Value::Interval(v) => row_writer.write_col(v.to_iso8601_string())?, Value::List(_) => { return Err(Error::Internal { err_msg: format!( @@ -241,6 +242,7 @@ pub(crate) fn create_mysql_column( ConcreteDataType::Time(_) => Ok(ColumnType::MYSQL_TYPE_TIME), ConcreteDataType::Date(_) => Ok(ColumnType::MYSQL_TYPE_DATE), ConcreteDataType::DateTime(_) => Ok(ColumnType::MYSQL_TYPE_DATETIME), + ConcreteDataType::Interval(_) => Ok(ColumnType::MYSQL_TYPE_VARCHAR), _ => error::InternalSnafu { err_msg: format!("not implemented for column datatype {:?}", data_type), } diff --git a/src/servers/src/postgres/types.rs b/src/servers/src/postgres/types.rs index 28322b74b9f7..89ecec05b44b 100644 --- a/src/servers/src/postgres/types.rs +++ b/src/servers/src/postgres/types.rs @@ -97,12 +97,14 @@ pub(super) fn encode_value(value: &Value, builder: &mut DataRowEncoder) -> PgWir }))) } } - Value::List(_) => Err(PgWireError::ApiError(Box::new(Error::Internal { - err_msg: format!( - "cannot write value {:?} in postgres protocol: unimplemented", - &value - ), - }))), + Value::Interval(_) | Value::List(_) => { + Err(PgWireError::ApiError(Box::new(Error::Internal { + err_msg: format!( + "cannot write value {:?} in postgres protocol: unimplemented", + &value + ), + }))) + } } } @@ -122,6 +124,7 @@ pub(super) fn type_gt_to_pg(origin: &ConcreteDataType) -> Result<Type> { &ConcreteDataType::DateTime(_) => Ok(Type::TIMESTAMP), &ConcreteDataType::Timestamp(_) => Ok(Type::TIMESTAMP), &ConcreteDataType::Time(_) => Ok(Type::TIME), + &ConcreteDataType::Interval(_) => Ok(Type::INTERVAL), &ConcreteDataType::List(_) | &ConcreteDataType::Dictionary(_) => error::InternalSnafu { err_msg: format!("not implemented for column datatype {origin:?}"), } diff --git a/src/sql/src/statements.rs b/src/sql/src/statements.rs index e9baa3c8ad4b..955c6cdedbd4 100644 --- a/src/sql/src/statements.rs +++ b/src/sql/src/statements.rs @@ -369,6 +369,7 @@ pub fn sql_data_type_to_concrete_data_type(data_type: &SqlDataType) -> Result<Co })? .map(|t| ConcreteDataType::timestamp_datatype(t.unit())) .unwrap_or(ConcreteDataType::timestamp_millisecond_datatype())), + SqlDataType::Interval => Ok(ConcreteDataType::interval_month_day_nano_datatype()), _ => error::SqlTypeNotSupportedSnafu { t: data_type.clone(), } @@ -404,6 +405,7 @@ pub fn concrete_data_type_to_sql_data_type(data_type: &ConcreteDataType) -> Resu ConcreteDataType::Null(_) | ConcreteDataType::List(_) | ConcreteDataType::Dictionary(_) => { unreachable!() } + ConcreteDataType::Interval(_) => Ok(SqlDataType::Interval), } } @@ -499,7 +501,11 @@ mod tests { check_type( SqlDataType::Datetime(None), ConcreteDataType::datetime_datatype(), - ) + ); + check_type( + SqlDataType::Interval, + ConcreteDataType::interval_month_day_nano_datatype(), + ); } #[test] diff --git a/tests/cases/standalone/common/select/dummy.result b/tests/cases/standalone/common/select/dummy.result index 0887fdd7f8f3..fe0ef9853c9c 100644 --- a/tests/cases/standalone/common/select/dummy.result +++ b/tests/cases/standalone/common/select/dummy.result @@ -103,3 +103,27 @@ DROP TABLE test_unixtime; Affected Rows: 1 +select INTERVAL '1 year 2 months 3 days 4 hours 5 minutes 6 seconds 100 microseconds'; + ++---------------------------------------------------------+ +| IntervalMonthDayNano("1109194275255040973236744059552") | ++---------------------------------------------------------+ +| 0 years 14 mons 3 days 4 hours 5 mins 6.000100000 secs | ++---------------------------------------------------------+ + +select INTERVAL '1 year 2 months 3 days 4 hours' + INTERVAL '1 year'; + ++------------------------------------------------------------------------------------------------------------------+ +| IntervalMonthDayNano("1109194275255040972930743959552") + IntervalMonthDayNano("950737950171172051122527404032") | ++------------------------------------------------------------------------------------------------------------------+ +| 0 years 26 mons 3 days 4 hours 0 mins 0.000000000 secs | ++------------------------------------------------------------------------------------------------------------------+ + +select INTERVAL '1 year 2 months 3 days 4 hours' - INTERVAL '1 year'; + ++------------------------------------------------------------------------------------------------------------------+ +| IntervalMonthDayNano("1109194275255040972930743959552") - IntervalMonthDayNano("950737950171172051122527404032") | ++------------------------------------------------------------------------------------------------------------------+ +| 0 years 2 mons 3 days 4 hours 0 mins 0.000000000 secs | ++------------------------------------------------------------------------------------------------------------------+ + diff --git a/tests/cases/standalone/common/select/dummy.sql b/tests/cases/standalone/common/select/dummy.sql index 5bf16da86433..e4c10cdd748f 100644 --- a/tests/cases/standalone/common/select/dummy.sql +++ b/tests/cases/standalone/common/select/dummy.sql @@ -29,3 +29,9 @@ select b from test_unixtime; select TO_UNIXTIME(b) from test_unixtime; DROP TABLE test_unixtime; + +select INTERVAL '1 year 2 months 3 days 4 hours 5 minutes 6 seconds 100 microseconds'; + +select INTERVAL '1 year 2 months 3 days 4 hours' + INTERVAL '1 year'; + +select INTERVAL '1 year 2 months 3 days 4 hours' - INTERVAL '1 year';
feat
impl interval type (#1952)
ae95f23e05246cce8fb402cd9cf648e07b85e34a
2023-10-10 13:10:16
JeremyHi
feat: add metrics for region server (#2552)
false
diff --git a/src/datanode/src/metrics.rs b/src/datanode/src/metrics.rs index 755b84a40d95..858506c89efa 100644 --- a/src/datanode/src/metrics.rs +++ b/src/datanode/src/metrics.rs @@ -14,5 +14,7 @@ //! datanode metrics -pub const HANDLE_SQL_ELAPSED: &str = "datanode.handle_sql_elapsed"; -pub const HANDLE_PROMQL_ELAPSED: &str = "datanode.handle_promql_elapsed"; +/// The elapsed time of handling a request in the region_server. +pub const HANDLE_REGION_REQUEST_ELAPSED: &str = "datanode.handle_region_request_elapsed"; +/// Region request type label. +pub const REGION_REQUEST_TYPE: &str = "datanode.region_request_type"; diff --git a/src/datanode/src/region_server.rs b/src/datanode/src/region_server.rs index ef489499bfac..e073baa87e5e 100644 --- a/src/datanode/src/region_server.rs +++ b/src/datanode/src/region_server.rs @@ -28,7 +28,7 @@ use common_query::physical_plan::DfPhysicalPlanAdapter; use common_query::{DfPhysicalPlan, Output}; use common_recordbatch::SendableRecordBatchStream; use common_runtime::Runtime; -use common_telemetry::{info, warn}; +use common_telemetry::{info, timer, warn}; use dashmap::DashMap; use datafusion::catalog::schema::SchemaProvider; use datafusion::catalog::{CatalogList, CatalogProvider}; @@ -227,7 +227,11 @@ impl RegionServerInner { region_id: RegionId, request: RegionRequest, ) -> Result<Output> { - // TODO(ruihang): add some metrics + let request_type = request.request_type(); + let _timer = timer!( + crate::metrics::HANDLE_REGION_REQUEST_ELAPSED, + &[(crate::metrics::REGION_REQUEST_TYPE, request_type),] + ); let region_change = match &request { RegionRequest::Create(create) => RegionChange::Register(create.engine.clone()), diff --git a/src/store-api/src/region_request.rs b/src/store-api/src/region_request.rs index 04251b81aeed..b6f06b790dab 100644 --- a/src/store-api/src/region_request.rs +++ b/src/store-api/src/region_request.rs @@ -43,6 +43,23 @@ pub enum RegionRequest { } impl RegionRequest { + /// Returns the type name of the [RegionRequest]. + #[inline] + pub fn request_type(&self) -> &'static str { + match &self { + RegionRequest::Put(_) => "put", + RegionRequest::Delete(_) => "delete", + RegionRequest::Create(_) => "create", + RegionRequest::Drop(_) => "drop", + RegionRequest::Open(_) => "open", + RegionRequest::Close(_) => "close", + RegionRequest::Alter(_) => "alter", + RegionRequest::Flush(_) => "flush", + RegionRequest::Compact(_) => "compact", + RegionRequest::Truncate(_) => "truncate", + } + } + /// Convert [Body](region_request::Body) to a group of [RegionRequest] with region id. /// Inserts/Deletes request might become multiple requests. Others are one-to-one. pub fn try_from_request_body(body: region_request::Body) -> Result<Vec<(RegionId, Self)>> {
feat
add metrics for region server (#2552)
15912afd966bb4b4b43cacf8e55f2498f53f0cb4
2023-08-25 09:42:59
WU Jingdi
fix: the inconsistent order of input/output in range select (#2229)
false
diff --git a/src/query/src/range_select/plan.rs b/src/query/src/range_select/plan.rs index 26918b4f81a7..4ea5b940d420 100644 --- a/src/query/src/range_select/plan.rs +++ b/src/query/src/range_select/plan.rs @@ -84,6 +84,14 @@ pub struct RangeSelect { pub by: Vec<Expr>, pub schema: DFSchemaRef, pub by_schema: DFSchemaRef, + /// If the `schema` of the `RangeSelect` happens to be the same as the content of the upper-level Projection Plan, + /// the final output needs to be `project` through `schema_project`, + /// so that we can omit the upper-level Projection Plan. + pub schema_project: Option<Vec<usize>>, + /// The schema before run projection, follow the order of `range expr | time index | by columns` + /// `schema_before_project ---- schema_project ----> schema` + /// if `schema_project==None` then `schema_before_project==schema` + pub schema_before_project: DFSchemaRef, } impl RangeSelect { @@ -93,6 +101,7 @@ impl RangeSelect { align: Duration, time_index: Expr, by: Vec<Expr>, + projection_expr: &[Expr], ) -> Result<Self> { let mut fields = range_expr .iter() @@ -118,18 +127,54 @@ impl RangeSelect { let by_fields = exprlist_to_fields(by.iter().collect::<Vec<_>>(), &input).context(DataFusionSnafu)?; fields.extend(by_fields.clone()); - let schema = DFSchema::new_with_metadata(fields, input.schema().metadata().clone()) - .context(DataFusionSnafu)?; - let by_schema = DFSchema::new_with_metadata(by_fields, input.schema().metadata().clone()) - .context(DataFusionSnafu)?; + let schema_before_project = Arc::new( + DFSchema::new_with_metadata(fields, input.schema().metadata().clone()) + .context(DataFusionSnafu)?, + ); + let by_schema = Arc::new( + DFSchema::new_with_metadata(by_fields, input.schema().metadata().clone()) + .context(DataFusionSnafu)?, + ); + // If the result of the project plan happens to be the schema of the range plan, no project plan is required + // that need project is identical to range plan schema. + // 1. all exprs in project must belong to range schema + // 2. range schema and project exprs must have same size + let schema_project = projection_expr + .iter() + .map(|project_expr| { + if let Expr::Column(column) = project_expr { + schema_before_project + .index_of_column_by_name(column.relation.as_ref(), &column.name) + .unwrap_or(None) + .ok_or(()) + } else { + Err(()) + } + }) + .collect::<std::result::Result<Vec<usize>, ()>>() + .ok(); + let schema = if let Some(project) = &schema_project { + let project_field = project + .iter() + .map(|i| schema_before_project.fields()[*i].clone()) + .collect(); + Arc::new( + DFSchema::new_with_metadata(project_field, input.schema().metadata().clone()) + .context(DataFusionSnafu)?, + ) + } else { + schema_before_project.clone() + }; Ok(Self { input, range_expr, align, time_index: time_index_name, - schema: Arc::new(schema), - by_schema: Arc::new(by_schema), + schema, + by_schema, by, + schema_project, + schema_before_project, }) } } @@ -179,6 +224,8 @@ impl UserDefinedLogicalNodeCore for RangeSelect { schema: self.schema.clone(), by: self.by.clone(), by_schema: self.by_schema.clone(), + schema_project: self.schema_project.clone(), + schema_before_project: self.schema_before_project.clone(), } } } @@ -204,7 +251,7 @@ impl RangeSelect { session_state: &SessionState, ) -> DfResult<Arc<dyn ExecutionPlan>> { let fields: Vec<_> = self - .schema + .schema_before_project .fields() .iter() .map(|field| Field::new(field.name(), field.data_type().clone(), field.is_nullable())) @@ -270,6 +317,12 @@ impl RangeSelect { }) }) .collect::<DfResult<Vec<_>>>()?; + let schema_before_project = Arc::new(Schema::new(fields)); + let schema = if let Some(project) = &self.schema_project { + Arc::new(schema_before_project.project(project)?) + } else { + schema_before_project.clone() + }; Ok(Arc::new(RangeSelectExec { input: exec_input, range_exec, @@ -281,9 +334,11 @@ impl RangeSelect { session_state, )?, time_index: self.time_index.clone(), - schema: Arc::new(Schema::new(fields)), + schema, by_schema: Arc::new(Schema::new(by_fields)), metric: ExecutionPlanMetricsSet::new(), + schema_before_project, + schema_project: self.schema_project.clone(), })) } } @@ -305,6 +360,8 @@ pub struct RangeSelectExec { schema: SchemaRef, by_schema: SchemaRef, metric: ExecutionPlanMetricsSet, + schema_project: Option<Vec<usize>>, + schema_before_project: SchemaRef, } impl DisplayAs for RangeSelectExec { @@ -367,6 +424,8 @@ impl ExecutionPlan for RangeSelectExec { schema: self.schema.clone(), by_schema: self.by_schema.clone(), metric: self.metric.clone(), + schema_before_project: self.schema_before_project.clone(), + schema_project: self.schema_project.clone(), })) } @@ -405,6 +464,8 @@ impl ExecutionPlan for RangeSelectExec { row_converter, modify_map: HashMap::new(), metric: baseline_metric, + schema_project: self.schema_project.clone(), + schema_before_project: self.schema_before_project.clone(), })) } @@ -441,6 +502,8 @@ struct RangeSelectStream { /// The number of rows of the final output output_num_rows: usize, metric: BaselineMetrics, + schema_project: Option<Vec<usize>>, + schema_before_project: SchemaRef, } struct SeriesState { @@ -604,11 +667,20 @@ impl RangeSelectStream { columns.push(ScalarValue::iter_to_array(column_scalar)?); } let ts_column = ts_builder.finish(); - // output schema follow the order of range expr | time index | by columns - let ts_column = compute::cast(&ts_column, self.schema.field(columns.len()).data_type())?; + // output schema before project follow the order of range expr | time index | by columns + let ts_column = compute::cast( + &ts_column, + self.schema_before_project.field(columns.len()).data_type(), + )?; columns.push(ts_column); columns.extend(self.row_converter.convert_rows(by_rows)?); - Ok(RecordBatch::try_new(self.schema(), columns)?) + let output = RecordBatch::try_new(self.schema_before_project.clone(), columns)?; + let project_output = if let Some(project) = &self.schema_project { + output.project(project)? + } else { + output + }; + Ok(project_output) } } @@ -718,6 +790,12 @@ mod test { expected: String, ) { let memory_exec = Arc::new(prepare_test_data()); + let schema = Arc::new(Schema::new(vec![ + Field::new("MIN(value)", DataType::Int64, true), + Field::new("MAX(value)", DataType::Int64, true), + Field::new(TIME_INDEX_COLUMN, TimestampMillisecondType::DATA_TYPE, true), + Field::new("host", DataType::Utf8, true), + ])); let range_select_exec = Arc::new(RangeSelectExec { input: memory_exec, range_exec: vec![ @@ -743,12 +821,9 @@ mod test { align, by: vec![Arc::new(Column::new("host", 2))], time_index: TIME_INDEX_COLUMN.to_string(), - schema: Arc::new(Schema::new(vec![ - Field::new("MIN(value)", DataType::Int64, true), - Field::new("MAX(value)", DataType::Int64, true), - Field::new(TIME_INDEX_COLUMN, TimestampMillisecondType::DATA_TYPE, true), - Field::new("host", DataType::Utf8, true), - ])), + schema: schema.clone(), + schema_before_project: schema.clone(), + schema_project: None, by_schema: Arc::new(Schema::new(vec![Field::new("host", DataType::Utf8, true)])), metric: ExecutionPlanMetricsSet::new(), }); diff --git a/src/query/src/range_select/plan_rewrite.rs b/src/query/src/range_select/plan_rewrite.rs index 89e9a908ce5e..b4d02daf76e0 100644 --- a/src/query/src/range_select/plan_rewrite.rs +++ b/src/query/src/range_select/plan_rewrite.rs @@ -205,28 +205,18 @@ impl RangePlanRewriter { if range_rewriter.by.is_empty() { range_rewriter.by = default_by; } + let range_select = RangeSelect::try_new( + input.clone(), + range_rewriter.range_fn, + range_rewriter.align, + time_index, + range_rewriter.by, + &new_expr, + )?; + let no_additional_project = range_select.schema_project.is_some(); let range_plan = LogicalPlan::Extension(Extension { - node: Arc::new(RangeSelect::try_new( - input.clone(), - range_rewriter.range_fn, - range_rewriter.align, - time_index, - range_rewriter.by, - )?), + node: Arc::new(range_select), }); - // If the result of the project plan happens to be the schema of the range plan, no project plan is required - // that need project is identical to range plan schema. - // 1. all exprs in project must belong to range schema - // 2. range schema and project exprs must have same size - let all_in_range_schema = new_expr.iter().all(|expr| { - if let Expr::Column(column) = expr { - range_plan.schema().has_column(column) - } else { - false - } - }); - let no_additional_project = - all_in_range_schema && new_expr.len() == range_plan.schema().fields().len(); if no_additional_project { Ok(Some(range_plan)) } else { @@ -417,7 +407,7 @@ mod test { async fn range_no_project() { let query = r#"SELECT timestamp, tag_0, tag_1, avg(field_0 + field_1) RANGE '5m' FROM test ALIGN '1h' by (tag_0,tag_1);"#; let expected = String::from( - "RangeSelect: range_exprs=[RangeFn { expr:AVG(test.field_0 + test.field_1) range:300s fill: }], align=3600s time_index=timestamp [AVG(test.field_0 + test.field_1):Float64;N, timestamp:Timestamp(Millisecond, None), tag_0:Utf8, tag_1:Utf8]\ + "RangeSelect: range_exprs=[RangeFn { expr:AVG(test.field_0 + test.field_1) range:300s fill: }], align=3600s time_index=timestamp [timestamp:Timestamp(Millisecond, None), tag_0:Utf8, tag_1:Utf8, AVG(test.field_0 + test.field_1):Float64;N]\ \n TableScan: test [tag_0:Utf8, tag_1:Utf8, tag_2:Utf8, tag_3:Utf8, tag_4:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N, field_1:Float64;N, field_2:Float64;N, field_3:Float64;N, field_4:Float64;N]" ); query_plan_compare(query, expected).await; diff --git a/tests/cases/standalone/common/select/range_select.result b/tests/cases/standalone/common/select/range_select.result index e39746bdb2ef..202abea35e96 100644 --- a/tests/cases/standalone/common/select/range_select.result +++ b/tests/cases/standalone/common/select/range_select.result @@ -30,30 +30,30 @@ Affected Rows: 18 SELECT ts, host, min(val) RANGE '10s', max(val) RANGE '10s' FROM host ALIGN '5s' ORDER BY host, ts; -+---------------+---------------+---------------------+-------+ -| MIN(host.val) | MAX(host.val) | ts | host | -+---------------+---------------+---------------------+-------+ -| 0.0 | 0.0 | 1970-01-01T00:00:00 | host1 | -| 0.0 | 1.0 | 1970-01-01T00:00:05 | host1 | -| 1.0 | 2.0 | 1970-01-01T00:00:10 | host1 | -| 2.0 | 3.0 | 1970-01-01T00:00:15 | host1 | -| 3.0 | 4.0 | 1970-01-01T00:00:20 | host1 | -| 4.0 | 5.0 | 1970-01-01T00:00:25 | host1 | -| 5.0 | 6.0 | 1970-01-01T00:00:30 | host1 | -| 6.0 | 7.0 | 1970-01-01T00:00:35 | host1 | -| 7.0 | 8.0 | 1970-01-01T00:00:40 | host1 | -| 8.0 | 8.0 | 1970-01-01T00:00:45 | host1 | -| 9.0 | 9.0 | 1970-01-01T00:00:00 | host2 | -| 9.0 | 10.0 | 1970-01-01T00:00:05 | host2 | -| 10.0 | 11.0 | 1970-01-01T00:00:10 | host2 | -| 11.0 | 12.0 | 1970-01-01T00:00:15 | host2 | -| 12.0 | 13.0 | 1970-01-01T00:00:20 | host2 | -| 13.0 | 14.0 | 1970-01-01T00:00:25 | host2 | -| 14.0 | 15.0 | 1970-01-01T00:00:30 | host2 | -| 15.0 | 16.0 | 1970-01-01T00:00:35 | host2 | -| 16.0 | 17.0 | 1970-01-01T00:00:40 | host2 | -| 17.0 | 17.0 | 1970-01-01T00:00:45 | host2 | -+---------------+---------------+---------------------+-------+ ++---------------------+-------+---------------+---------------+ +| ts | host | MIN(host.val) | MAX(host.val) | ++---------------------+-------+---------------+---------------+ +| 1970-01-01T00:00:00 | host1 | 0.0 | 0.0 | +| 1970-01-01T00:00:05 | host1 | 0.0 | 1.0 | +| 1970-01-01T00:00:10 | host1 | 1.0 | 2.0 | +| 1970-01-01T00:00:15 | host1 | 2.0 | 3.0 | +| 1970-01-01T00:00:20 | host1 | 3.0 | 4.0 | +| 1970-01-01T00:00:25 | host1 | 4.0 | 5.0 | +| 1970-01-01T00:00:30 | host1 | 5.0 | 6.0 | +| 1970-01-01T00:00:35 | host1 | 6.0 | 7.0 | +| 1970-01-01T00:00:40 | host1 | 7.0 | 8.0 | +| 1970-01-01T00:00:45 | host1 | 8.0 | 8.0 | +| 1970-01-01T00:00:00 | host2 | 9.0 | 9.0 | +| 1970-01-01T00:00:05 | host2 | 9.0 | 10.0 | +| 1970-01-01T00:00:10 | host2 | 10.0 | 11.0 | +| 1970-01-01T00:00:15 | host2 | 11.0 | 12.0 | +| 1970-01-01T00:00:20 | host2 | 12.0 | 13.0 | +| 1970-01-01T00:00:25 | host2 | 13.0 | 14.0 | +| 1970-01-01T00:00:30 | host2 | 14.0 | 15.0 | +| 1970-01-01T00:00:35 | host2 | 15.0 | 16.0 | +| 1970-01-01T00:00:40 | host2 | 16.0 | 17.0 | +| 1970-01-01T00:00:45 | host2 | 17.0 | 17.0 | ++---------------------+-------+---------------+---------------+ SELECT ts, host, min(val / 2.0)/2 RANGE '10s', max(val / 2.0)/2 RANGE '10s' FROM host ALIGN '5s' ORDER BY host, ts; @@ -82,122 +82,122 @@ SELECT ts, host, min(val / 2.0)/2 RANGE '10s', max(val / 2.0)/2 RANGE '10s' FROM | 1970-01-01T00:00:45 | host2 | 4.25 | 4.25 | +---------------------+-------+---------------------------------------+---------------------------------------+ -SELECT ts, host, covar(val, val) RANGE '10s' FROM host ALIGN '5s' ORDER BY host, ts; - -+-------------------------------+---------------------+-------+ -| COVARIANCE(host.val,host.val) | ts | host | -+-------------------------------+---------------------+-------+ -| | 1970-01-01T00:00:00 | host1 | -| 0.5 | 1970-01-01T00:00:05 | host1 | -| 0.5 | 1970-01-01T00:00:10 | host1 | -| 0.5 | 1970-01-01T00:00:15 | host1 | -| 0.5 | 1970-01-01T00:00:20 | host1 | -| 0.5 | 1970-01-01T00:00:25 | host1 | -| 0.5 | 1970-01-01T00:00:30 | host1 | -| 0.5 | 1970-01-01T00:00:35 | host1 | -| 0.5 | 1970-01-01T00:00:40 | host1 | -| | 1970-01-01T00:00:45 | host1 | -| | 1970-01-01T00:00:00 | host2 | -| 0.5 | 1970-01-01T00:00:05 | host2 | -| 0.5 | 1970-01-01T00:00:10 | host2 | -| 0.5 | 1970-01-01T00:00:15 | host2 | -| 0.5 | 1970-01-01T00:00:20 | host2 | -| 0.5 | 1970-01-01T00:00:25 | host2 | -| 0.5 | 1970-01-01T00:00:30 | host2 | -| 0.5 | 1970-01-01T00:00:35 | host2 | -| 0.5 | 1970-01-01T00:00:40 | host2 | -| | 1970-01-01T00:00:45 | host2 | -+-------------------------------+---------------------+-------+ - -SELECT ts, host, covar(sin(val), cos(val)) RANGE '10s' FROM host ALIGN '5s' ORDER BY host, ts; - -+-----------------------------------------+---------------------+-------+ -| COVARIANCE(sin(host.val),cos(host.val)) | ts | host | -+-----------------------------------------+---------------------+-------+ -| | 1970-01-01T00:00:00 | host1 | -| -0.1934111356975278 | 1970-01-01T00:00:05 | host1 | -| -0.032436271150495265 | 1970-01-01T00:00:10 | host1 | -| 0.2204076389548557 | 1970-01-01T00:00:15 | host1 | -| -0.15100761225328058 | 1970-01-01T00:00:20 | host1 | -| -0.09472495868737531 | 1970-01-01T00:00:25 | host1 | -| 0.2298465960529006 | 1970-01-01T00:00:30 | host1 | -| -0.09657490898971162 | 1970-01-01T00:00:35 | host1 | -| -0.1494679103211072 | 1970-01-01T00:00:40 | host1 | -| | 1970-01-01T00:00:45 | host1 | -| | 1970-01-01T00:00:00 | host2 | -| -0.03444910384248826 | 1970-01-01T00:00:05 | host2 | -| -0.19230433390872206 | 1970-01-01T00:00:10 | host2 | -| 0.19450278426332843 | 1970-01-01T00:00:15 | host2 | -| 0.03042089716713121 | 1970-01-01T00:00:20 | host2 | -| -0.21982190450538358 | 1970-01-01T00:00:25 | host2 | -| 0.1525354831602356 | 1970-01-01T00:00:30 | host2 | -| 0.09286758694873963 | 1970-01-01T00:00:35 | host2 | -| -0.22982858821320498 | 1970-01-01T00:00:40 | host2 | -| | 1970-01-01T00:00:45 | host2 | -+-----------------------------------------+---------------------+-------+ +SELECT ts, covar(val, val) RANGE '10s', host FROM host ALIGN '5s' ORDER BY host, ts; + ++---------------------+-------------------------------+-------+ +| ts | COVARIANCE(host.val,host.val) | host | ++---------------------+-------------------------------+-------+ +| 1970-01-01T00:00:00 | | host1 | +| 1970-01-01T00:00:05 | 0.5 | host1 | +| 1970-01-01T00:00:10 | 0.5 | host1 | +| 1970-01-01T00:00:15 | 0.5 | host1 | +| 1970-01-01T00:00:20 | 0.5 | host1 | +| 1970-01-01T00:00:25 | 0.5 | host1 | +| 1970-01-01T00:00:30 | 0.5 | host1 | +| 1970-01-01T00:00:35 | 0.5 | host1 | +| 1970-01-01T00:00:40 | 0.5 | host1 | +| 1970-01-01T00:00:45 | | host1 | +| 1970-01-01T00:00:00 | | host2 | +| 1970-01-01T00:00:05 | 0.5 | host2 | +| 1970-01-01T00:00:10 | 0.5 | host2 | +| 1970-01-01T00:00:15 | 0.5 | host2 | +| 1970-01-01T00:00:20 | 0.5 | host2 | +| 1970-01-01T00:00:25 | 0.5 | host2 | +| 1970-01-01T00:00:30 | 0.5 | host2 | +| 1970-01-01T00:00:35 | 0.5 | host2 | +| 1970-01-01T00:00:40 | 0.5 | host2 | +| 1970-01-01T00:00:45 | | host2 | ++---------------------+-------------------------------+-------+ + +SELECT covar(ceil(val), floor(val)) RANGE '10s', ts, host FROM host ALIGN '5s' ORDER BY host, ts; + ++--------------------------------------------+---------------------+-------+ +| COVARIANCE(ceil(host.val),floor(host.val)) | ts | host | ++--------------------------------------------+---------------------+-------+ +| | 1970-01-01T00:00:00 | host1 | +| 0.5 | 1970-01-01T00:00:05 | host1 | +| 0.5 | 1970-01-01T00:00:10 | host1 | +| 0.5 | 1970-01-01T00:00:15 | host1 | +| 0.5 | 1970-01-01T00:00:20 | host1 | +| 0.5 | 1970-01-01T00:00:25 | host1 | +| 0.5 | 1970-01-01T00:00:30 | host1 | +| 0.5 | 1970-01-01T00:00:35 | host1 | +| 0.5 | 1970-01-01T00:00:40 | host1 | +| | 1970-01-01T00:00:45 | host1 | +| | 1970-01-01T00:00:00 | host2 | +| 0.5 | 1970-01-01T00:00:05 | host2 | +| 0.5 | 1970-01-01T00:00:10 | host2 | +| 0.5 | 1970-01-01T00:00:15 | host2 | +| 0.5 | 1970-01-01T00:00:20 | host2 | +| 0.5 | 1970-01-01T00:00:25 | host2 | +| 0.5 | 1970-01-01T00:00:30 | host2 | +| 0.5 | 1970-01-01T00:00:35 | host2 | +| 0.5 | 1970-01-01T00:00:40 | host2 | +| | 1970-01-01T00:00:45 | host2 | ++--------------------------------------------+---------------------+-------+ SELECT ts, host, covar((sin(val) + cos(val))/2.0 + 1.0, 2.0) RANGE '10s' FROM host ALIGN '5s' ORDER BY host, ts; -+--------------------------------------------------------------------------------+---------------------+-------+ -| COVARIANCE(sin(host.val) + cos(host.val) / Float64(2) + Float64(1),Float64(2)) | ts | host | -+--------------------------------------------------------------------------------+---------------------+-------+ -| | 1970-01-01T00:00:00 | host1 | -| 0.0 | 1970-01-01T00:00:05 | host1 | -| 0.0 | 1970-01-01T00:00:10 | host1 | -| 0.0 | 1970-01-01T00:00:15 | host1 | -| 0.0 | 1970-01-01T00:00:20 | host1 | -| 0.0 | 1970-01-01T00:00:25 | host1 | -| 0.0 | 1970-01-01T00:00:30 | host1 | -| 0.0 | 1970-01-01T00:00:35 | host1 | -| 0.0 | 1970-01-01T00:00:40 | host1 | -| | 1970-01-01T00:00:45 | host1 | -| | 1970-01-01T00:00:00 | host2 | -| 0.0 | 1970-01-01T00:00:05 | host2 | -| 0.0 | 1970-01-01T00:00:10 | host2 | -| 0.0 | 1970-01-01T00:00:15 | host2 | -| 0.0 | 1970-01-01T00:00:20 | host2 | -| 0.0 | 1970-01-01T00:00:25 | host2 | -| 0.0 | 1970-01-01T00:00:30 | host2 | -| 0.0 | 1970-01-01T00:00:35 | host2 | -| 0.0 | 1970-01-01T00:00:40 | host2 | -| | 1970-01-01T00:00:45 | host2 | -+--------------------------------------------------------------------------------+---------------------+-------+ - -SELECT ts, host, min(val) RANGE '10s', max(val) RANGE '10s' FROM host ALIGN '1000s' ORDER BY host, ts; - -+---------------+---------------+---------------------+-------+ -| MIN(host.val) | MAX(host.val) | ts | host | -+---------------+---------------+---------------------+-------+ -| 0.0 | 0.0 | 1970-01-01T00:00:00 | host1 | -| 9.0 | 9.0 | 1970-01-01T00:00:00 | host2 | -+---------------+---------------+---------------------+-------+ ++---------------------+-------+--------------------------------------------------------------------------------+ +| ts | host | COVARIANCE(sin(host.val) + cos(host.val) / Float64(2) + Float64(1),Float64(2)) | ++---------------------+-------+--------------------------------------------------------------------------------+ +| 1970-01-01T00:00:00 | host1 | | +| 1970-01-01T00:00:05 | host1 | 0.0 | +| 1970-01-01T00:00:10 | host1 | 0.0 | +| 1970-01-01T00:00:15 | host1 | 0.0 | +| 1970-01-01T00:00:20 | host1 | 0.0 | +| 1970-01-01T00:00:25 | host1 | 0.0 | +| 1970-01-01T00:00:30 | host1 | 0.0 | +| 1970-01-01T00:00:35 | host1 | 0.0 | +| 1970-01-01T00:00:40 | host1 | 0.0 | +| 1970-01-01T00:00:45 | host1 | | +| 1970-01-01T00:00:00 | host2 | | +| 1970-01-01T00:00:05 | host2 | 0.0 | +| 1970-01-01T00:00:10 | host2 | 0.0 | +| 1970-01-01T00:00:15 | host2 | 0.0 | +| 1970-01-01T00:00:20 | host2 | 0.0 | +| 1970-01-01T00:00:25 | host2 | 0.0 | +| 1970-01-01T00:00:30 | host2 | 0.0 | +| 1970-01-01T00:00:35 | host2 | 0.0 | +| 1970-01-01T00:00:40 | host2 | 0.0 | +| 1970-01-01T00:00:45 | host2 | | ++---------------------+-------+--------------------------------------------------------------------------------+ + +SELECT ts, min(val) RANGE '10s', host, max(val) RANGE '10s' FROM host ALIGN '1000s' ORDER BY host, ts; + ++---------------------+---------------+-------+---------------+ +| ts | MIN(host.val) | host | MAX(host.val) | ++---------------------+---------------+-------+---------------+ +| 1970-01-01T00:00:00 | 0.0 | host1 | 0.0 | +| 1970-01-01T00:00:00 | 9.0 | host2 | 9.0 | ++---------------------+---------------+-------+---------------+ SELECT ts, host, min(val) RANGE '10s', max(val) RANGE '5s' FROM host ALIGN '5s' ORDER BY host, ts; -+---------------+---------------+---------------------+-------+ -| MIN(host.val) | MAX(host.val) | ts | host | -+---------------+---------------+---------------------+-------+ -| 0.0 | 0.0 | 1970-01-01T00:00:00 | host1 | -| 0.0 | 1.0 | 1970-01-01T00:00:05 | host1 | -| 1.0 | 2.0 | 1970-01-01T00:00:10 | host1 | -| 2.0 | 3.0 | 1970-01-01T00:00:15 | host1 | -| 3.0 | 4.0 | 1970-01-01T00:00:20 | host1 | -| 4.0 | 5.0 | 1970-01-01T00:00:25 | host1 | -| 5.0 | 6.0 | 1970-01-01T00:00:30 | host1 | -| 6.0 | 7.0 | 1970-01-01T00:00:35 | host1 | -| 7.0 | 8.0 | 1970-01-01T00:00:40 | host1 | -| 8.0 | | 1970-01-01T00:00:45 | host1 | -| 9.0 | 9.0 | 1970-01-01T00:00:00 | host2 | -| 9.0 | 10.0 | 1970-01-01T00:00:05 | host2 | -| 10.0 | 11.0 | 1970-01-01T00:00:10 | host2 | -| 11.0 | 12.0 | 1970-01-01T00:00:15 | host2 | -| 12.0 | 13.0 | 1970-01-01T00:00:20 | host2 | -| 13.0 | 14.0 | 1970-01-01T00:00:25 | host2 | -| 14.0 | 15.0 | 1970-01-01T00:00:30 | host2 | -| 15.0 | 16.0 | 1970-01-01T00:00:35 | host2 | -| 16.0 | 17.0 | 1970-01-01T00:00:40 | host2 | -| 17.0 | | 1970-01-01T00:00:45 | host2 | -+---------------+---------------+---------------------+-------+ ++---------------------+-------+---------------+---------------+ +| ts | host | MIN(host.val) | MAX(host.val) | ++---------------------+-------+---------------+---------------+ +| 1970-01-01T00:00:00 | host1 | 0.0 | 0.0 | +| 1970-01-01T00:00:05 | host1 | 0.0 | 1.0 | +| 1970-01-01T00:00:10 | host1 | 1.0 | 2.0 | +| 1970-01-01T00:00:15 | host1 | 2.0 | 3.0 | +| 1970-01-01T00:00:20 | host1 | 3.0 | 4.0 | +| 1970-01-01T00:00:25 | host1 | 4.0 | 5.0 | +| 1970-01-01T00:00:30 | host1 | 5.0 | 6.0 | +| 1970-01-01T00:00:35 | host1 | 6.0 | 7.0 | +| 1970-01-01T00:00:40 | host1 | 7.0 | 8.0 | +| 1970-01-01T00:00:45 | host1 | 8.0 | | +| 1970-01-01T00:00:00 | host2 | 9.0 | 9.0 | +| 1970-01-01T00:00:05 | host2 | 9.0 | 10.0 | +| 1970-01-01T00:00:10 | host2 | 10.0 | 11.0 | +| 1970-01-01T00:00:15 | host2 | 11.0 | 12.0 | +| 1970-01-01T00:00:20 | host2 | 12.0 | 13.0 | +| 1970-01-01T00:00:25 | host2 | 13.0 | 14.0 | +| 1970-01-01T00:00:30 | host2 | 14.0 | 15.0 | +| 1970-01-01T00:00:35 | host2 | 15.0 | 16.0 | +| 1970-01-01T00:00:40 | host2 | 16.0 | 17.0 | +| 1970-01-01T00:00:45 | host2 | 17.0 | | ++---------------------+-------+---------------+---------------+ SELECT ts, host, (min(val)+max(val))/4 RANGE '10s' FROM host ALIGN '5s' ORDER BY host, ts; @@ -318,30 +318,30 @@ Affected Rows: 18 SELECT ts, host, min(val) RANGE '10s', max(val) RANGE '10s' FROM host_sec ALIGN '5s' ORDER BY host, ts; -+-------------------+-------------------+---------------------+-------+ -| MIN(host_sec.val) | MAX(host_sec.val) | ts | host | -+-------------------+-------------------+---------------------+-------+ -| 0.0 | 0.0 | 1970-01-01T00:00:00 | host1 | -| 0.0 | 1.0 | 1970-01-01T00:00:05 | host1 | -| 1.0 | 2.0 | 1970-01-01T00:00:10 | host1 | -| 2.0 | 3.0 | 1970-01-01T00:00:15 | host1 | -| 3.0 | 4.0 | 1970-01-01T00:00:20 | host1 | -| 4.0 | 5.0 | 1970-01-01T00:00:25 | host1 | -| 5.0 | 6.0 | 1970-01-01T00:00:30 | host1 | -| 6.0 | 7.0 | 1970-01-01T00:00:35 | host1 | -| 7.0 | 8.0 | 1970-01-01T00:00:40 | host1 | -| 8.0 | 8.0 | 1970-01-01T00:00:45 | host1 | -| 9.0 | 9.0 | 1970-01-01T00:00:00 | host2 | -| 9.0 | 10.0 | 1970-01-01T00:00:05 | host2 | -| 10.0 | 11.0 | 1970-01-01T00:00:10 | host2 | -| 11.0 | 12.0 | 1970-01-01T00:00:15 | host2 | -| 12.0 | 13.0 | 1970-01-01T00:00:20 | host2 | -| 13.0 | 14.0 | 1970-01-01T00:00:25 | host2 | -| 14.0 | 15.0 | 1970-01-01T00:00:30 | host2 | -| 15.0 | 16.0 | 1970-01-01T00:00:35 | host2 | -| 16.0 | 17.0 | 1970-01-01T00:00:40 | host2 | -| 17.0 | 17.0 | 1970-01-01T00:00:45 | host2 | -+-------------------+-------------------+---------------------+-------+ ++---------------------+-------+-------------------+-------------------+ +| ts | host | MIN(host_sec.val) | MAX(host_sec.val) | ++---------------------+-------+-------------------+-------------------+ +| 1970-01-01T00:00:00 | host1 | 0.0 | 0.0 | +| 1970-01-01T00:00:05 | host1 | 0.0 | 1.0 | +| 1970-01-01T00:00:10 | host1 | 1.0 | 2.0 | +| 1970-01-01T00:00:15 | host1 | 2.0 | 3.0 | +| 1970-01-01T00:00:20 | host1 | 3.0 | 4.0 | +| 1970-01-01T00:00:25 | host1 | 4.0 | 5.0 | +| 1970-01-01T00:00:30 | host1 | 5.0 | 6.0 | +| 1970-01-01T00:00:35 | host1 | 6.0 | 7.0 | +| 1970-01-01T00:00:40 | host1 | 7.0 | 8.0 | +| 1970-01-01T00:00:45 | host1 | 8.0 | 8.0 | +| 1970-01-01T00:00:00 | host2 | 9.0 | 9.0 | +| 1970-01-01T00:00:05 | host2 | 9.0 | 10.0 | +| 1970-01-01T00:00:10 | host2 | 10.0 | 11.0 | +| 1970-01-01T00:00:15 | host2 | 11.0 | 12.0 | +| 1970-01-01T00:00:20 | host2 | 12.0 | 13.0 | +| 1970-01-01T00:00:25 | host2 | 13.0 | 14.0 | +| 1970-01-01T00:00:30 | host2 | 14.0 | 15.0 | +| 1970-01-01T00:00:35 | host2 | 15.0 | 16.0 | +| 1970-01-01T00:00:40 | host2 | 16.0 | 17.0 | +| 1970-01-01T00:00:45 | host2 | 17.0 | 17.0 | ++---------------------+-------+-------------------+-------------------+ DROP TABLE host_sec; diff --git a/tests/cases/standalone/common/select/range_select.sql b/tests/cases/standalone/common/select/range_select.sql index ef472e924203..016c91392d78 100644 --- a/tests/cases/standalone/common/select/range_select.sql +++ b/tests/cases/standalone/common/select/range_select.sql @@ -28,13 +28,13 @@ SELECT ts, host, min(val) RANGE '10s', max(val) RANGE '10s' FROM host ALIGN '5s' SELECT ts, host, min(val / 2.0)/2 RANGE '10s', max(val / 2.0)/2 RANGE '10s' FROM host ALIGN '5s' ORDER BY host, ts; -SELECT ts, host, covar(val, val) RANGE '10s' FROM host ALIGN '5s' ORDER BY host, ts; +SELECT ts, covar(val, val) RANGE '10s', host FROM host ALIGN '5s' ORDER BY host, ts; -SELECT ts, host, covar(sin(val), cos(val)) RANGE '10s' FROM host ALIGN '5s' ORDER BY host, ts; +SELECT covar(ceil(val), floor(val)) RANGE '10s', ts, host FROM host ALIGN '5s' ORDER BY host, ts; SELECT ts, host, covar((sin(val) + cos(val))/2.0 + 1.0, 2.0) RANGE '10s' FROM host ALIGN '5s' ORDER BY host, ts; -SELECT ts, host, min(val) RANGE '10s', max(val) RANGE '10s' FROM host ALIGN '1000s' ORDER BY host, ts; +SELECT ts, min(val) RANGE '10s', host, max(val) RANGE '10s' FROM host ALIGN '1000s' ORDER BY host, ts; SELECT ts, host, min(val) RANGE '10s', max(val) RANGE '5s' FROM host ALIGN '5s' ORDER BY host, ts;
fix
the inconsistent order of input/output in range select (#2229)