Commit Hash
stringlengths 40
40
| Author
stringclasses 38
values | Date
stringlengths 19
19
| Description
stringlengths 8
113
| Body
stringlengths 10
22.2k
| Footers
stringclasses 56
values | Commit Message
stringlengths 28
22.3k
| Git Diff
stringlengths 140
3.61M
β |
|---|---|---|---|---|---|---|---|
c3e9016b74964ec4e73e671008ce1f6273a827ad
|
Dom Dwyer
|
2023-02-17 23:55:16
|
rename ingester QueryExec metric
|
This metric records the duration of time spent gathering the partitions
for a query, and not the time spent returning results.
| null |
refactor: rename ingester QueryExec metric
This metric records the duration of time spent gathering the partitions
for a query, and not the time spent returning results.
|
diff --git a/ingester2/src/init.rs b/ingester2/src/init.rs
index e1ac17d957..81cf6acb2e 100644
--- a/ingester2/src/init.rs
+++ b/ingester2/src/init.rs
@@ -37,7 +37,7 @@ use crate::{
completion_observer::NopObserver, handle::PersistHandle,
hot_partitions::HotPartitionPersister,
},
- query::{instrumentation::QueryExecInstrumentation, tracing::QueryExecTracing},
+ query::{exec_instrumentation::QueryExecInstrumentation, tracing::QueryExecTracing},
server::grpc::GrpcDelegate,
timestamp_oracle::TimestampOracle,
wal::{rotate_task::periodic_rotation, wal_sink::WalSink},
diff --git a/ingester2/src/query/instrumentation.rs b/ingester2/src/query/exec_instrumentation.rs
similarity index 96%
rename from ingester2/src/query/instrumentation.rs
rename to ingester2/src/query/exec_instrumentation.rs
index bd85aeb998..4a05367d81 100644
--- a/ingester2/src/query/instrumentation.rs
+++ b/ingester2/src/query/exec_instrumentation.rs
@@ -1,3 +1,5 @@
+//! Instrumentation of [`QueryExec`] implementers.
+
use async_trait::async_trait;
use data_types::{NamespaceId, TableId};
use iox_time::{SystemProvider, TimeProvider};
@@ -27,8 +29,8 @@ impl<T> QueryExecInstrumentation<T> {
pub(crate) fn new(name: &'static str, inner: T, metrics: &metric::Registry) -> Self {
// Record query duration metrics, broken down by query execution result
let query_duration: Metric<DurationHistogram> = metrics.register_metric(
- "ingester_flight_query_duration",
- "flight request query execution duration",
+ "ingester_query_exec_duration",
+ "duration of time spent selecting partitions for a query",
);
let query_duration_success =
query_duration.recorder(&[("handler", name), ("result", "success")]);
@@ -118,7 +120,7 @@ mod tests {
// Validate the histogram with the specified attributes saw
// an observation
let histogram = metrics
- .get_instrument::<Metric<DurationHistogram>>("ingester_flight_query_duration")
+ .get_instrument::<Metric<DurationHistogram>>("ingester_query_exec_duration")
.expect("failed to find metric")
.get_observer(&Attributes::from(&$want_metric_attr))
.expect("failed to find attributes")
diff --git a/ingester2/src/query/mod.rs b/ingester2/src/query/mod.rs
index 14c9b8f7eb..34262518cd 100644
--- a/ingester2/src/query/mod.rs
+++ b/ingester2/src/query/mod.rs
@@ -8,7 +8,7 @@ pub(crate) mod partition_response;
pub(crate) mod response;
// Instrumentation
-pub(crate) mod instrumentation;
+pub(crate) mod exec_instrumentation;
pub(crate) mod tracing;
#[cfg(test)]
|
7e31b2638d2ea6f08d98982b658c407b0c359d2e
|
Andrew Lamb
|
2023-02-23 00:43:31
|
Understandable compactor2 config report (#7028)
|
* fix: Understandable compactor2 config report
* fix: do not log postgres dsn
---------
|
Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
|
fix: Understandable compactor2 config report (#7028)
* fix: Understandable compactor2 config report
* fix: do not log postgres dsn
---------
Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
|
diff --git a/compactor2/src/components/report.rs b/compactor2/src/components/report.rs
index 8793b4151b..27c714fd96 100644
--- a/compactor2/src/components/report.rs
+++ b/compactor2/src/components/report.rs
@@ -11,7 +11,8 @@ pub fn log_config(config: &Config) {
// use struct unpack so we don't forget any members
let Config {
shard_id,
- metric_registry,
+ // no need to print the internal state of the registry
+ metric_registry: _,
catalog,
parquet_store_real,
parquet_store_scratchpad,
@@ -55,12 +56,11 @@ pub fn log_config(config: &Config) {
.unwrap_or("None");
info!(
shard_id=shard_id.get(),
- ?metric_registry,
- ?catalog,
- ?parquet_store_real,
- ?parquet_store_scratchpad,
- ?exec,
- ?time_provider,
+ %catalog,
+ %parquet_store_real,
+ %parquet_store_scratchpad,
+ %exec,
+ %time_provider,
?backoff_config,
partition_concurrency=partition_concurrency.get(),
job_concurrency=job_concurrency.get(),
diff --git a/iox_catalog/src/interface.rs b/iox_catalog/src/interface.rs
index d0ee15005f..6bf8a165fa 100644
--- a/iox_catalog/src/interface.rs
+++ b/iox_catalog/src/interface.rs
@@ -12,7 +12,7 @@ use iox_time::TimeProvider;
use snafu::{OptionExt, Snafu};
use std::{
collections::{BTreeMap, HashMap, HashSet},
- fmt::Debug,
+ fmt::{Debug, Display},
sync::Arc,
};
use uuid::Uuid;
@@ -198,7 +198,7 @@ impl SoftDeletedRows {
/// Methods for working with the catalog.
#[async_trait]
-pub trait Catalog: Send + Sync + Debug {
+pub trait Catalog: Send + Sync + Debug + Display {
/// Setup catalog for usage and apply possible migrations.
async fn setup(&self) -> Result<(), Error>;
diff --git a/iox_catalog/src/mem.rs b/iox_catalog/src/mem.rs
index 155e8fbbff..4e196960e3 100644
--- a/iox_catalog/src/mem.rs
+++ b/iox_catalog/src/mem.rs
@@ -26,7 +26,7 @@ use sqlx::types::Uuid;
use std::{
collections::{HashMap, HashSet},
convert::TryFrom,
- fmt::Formatter,
+ fmt::{Display, Formatter},
sync::Arc,
};
use tokio::sync::{Mutex, OwnedMutexGuard};
@@ -111,6 +111,12 @@ impl Drop for MemTxn {
}
}
+impl Display for MemCatalog {
+ fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
+ write!(f, "Memory")
+ }
+}
+
#[async_trait]
impl Catalog for MemCatalog {
async fn setup(&self) -> Result<(), Error> {
diff --git a/iox_catalog/src/postgres.rs b/iox_catalog/src/postgres.rs
index 015969a115..0548fd2be3 100644
--- a/iox_catalog/src/postgres.rs
+++ b/iox_catalog/src/postgres.rs
@@ -28,7 +28,7 @@ use sqlx::{
Acquire, ConnectOptions, Executor, Postgres, Row,
};
use sqlx_hotswap_pool::HotSwapPool;
-use std::{collections::HashMap, str::FromStr, sync::Arc, time::Duration};
+use std::{collections::HashMap, fmt::Display, str::FromStr, sync::Arc, time::Duration};
static MIGRATOR: Migrator = sqlx::migrate!();
@@ -101,8 +101,9 @@ impl Default for PostgresConnectionOptions {
pub struct PostgresCatalog {
metrics: Arc<metric::Registry>,
pool: HotSwapPool<Postgres>,
- schema_name: String,
time_provider: Arc<dyn TimeProvider>,
+ // Connection options for display
+ options: PostgresConnectionOptions,
}
// struct to get return value from "select count(id) ..." query
@@ -121,14 +122,29 @@ impl PostgresCatalog {
.await
.map_err(|e| Error::SqlxError { source: e })?;
- let schema_name = options.schema_name;
Ok(Self {
pool,
metrics,
- schema_name,
time_provider: Arc::new(SystemProvider::new()),
+ options,
})
}
+
+ fn schema_name(&self) -> &str {
+ &self.options.schema_name
+ }
+}
+
+impl Display for PostgresCatalog {
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ write!(
+ f,
+ // Do not include dsn in log as it may have credentials
+ // that should not end up in the log
+ "Postgres(dsn=OMITTED, schema_name='{}')",
+ self.schema_name()
+ )
+ }
}
/// transaction for [`PostgresCatalog`].
@@ -281,7 +297,7 @@ impl Catalog for PostgresCatalog {
//
// This makes the migrations/20210217134322_create_schema.sql step unnecessary; we need to
// keep that file because migration files are immutable.
- let create_schema_query = format!("CREATE SCHEMA IF NOT EXISTS {};", &self.schema_name);
+ let create_schema_query = format!("CREATE SCHEMA IF NOT EXISTS {};", self.schema_name());
self.pool
.execute(sqlx::query(&create_schema_query))
.await
@@ -2592,7 +2608,7 @@ mod tests {
assert_eq!(tz, "UTC");
let pool = postgres.pool.clone();
- let schema_name = postgres.schema_name.clone();
+ let schema_name = postgres.schema_name().to_string();
let postgres: Arc<dyn Catalog> = Arc::new(postgres);
diff --git a/iox_catalog/src/sqlite.rs b/iox_catalog/src/sqlite.rs
index c925fe25ff..8928d0c8e8 100644
--- a/iox_catalog/src/sqlite.rs
+++ b/iox_catalog/src/sqlite.rs
@@ -19,8 +19,8 @@ use data_types::{
Tombstone, TombstoneId, TopicId, TopicMetadata, TRANSITION_SHARD_ID, TRANSITION_SHARD_INDEX,
};
use serde::{Deserialize, Serialize};
-use std::collections::HashMap;
use std::ops::Deref;
+use std::{collections::HashMap, fmt::Display};
use iox_time::{SystemProvider, TimeProvider};
use metric::Registry;
@@ -53,6 +53,7 @@ pub struct SqliteCatalog {
metrics: Arc<Registry>,
pool: Pool<Sqlite>,
time_provider: Arc<dyn TimeProvider>,
+ options: SqliteConnectionOptions,
}
// struct to get return value from "select count(id) ..." query
@@ -212,10 +213,17 @@ impl SqliteCatalog {
metrics,
pool,
time_provider: Arc::new(SystemProvider::new()),
+ options,
})
}
}
+impl Display for SqliteCatalog {
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ write!(f, "Sqlite(dsn='{}')", self.options.dsn)
+ }
+}
+
#[async_trait]
impl Catalog for SqliteCatalog {
async fn setup(&self) -> Result<()> {
diff --git a/iox_query/src/exec.rs b/iox_query/src/exec.rs
index 13433dc280..269732d3b6 100644
--- a/iox_query/src/exec.rs
+++ b/iox_query/src/exec.rs
@@ -17,7 +17,7 @@ use parquet_file::storage::StorageId;
use trace::span::{SpanExt, SpanRecorder};
mod cross_rt_stream;
-use std::{collections::HashMap, sync::Arc};
+use std::{collections::HashMap, fmt::Display, sync::Arc};
use datafusion::{
self,
@@ -52,6 +52,16 @@ pub struct ExecutorConfig {
pub mem_pool_size: usize,
}
+impl Display for ExecutorConfig {
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ write!(
+ f,
+ "num_threads={}, target_query_partitions={}, mem_pool_size={}",
+ self.num_threads, self.target_query_partitions, self.mem_pool_size
+ )
+ }
+}
+
#[derive(Debug)]
pub struct DedicatedExecutors {
/// Executor for running user queries
@@ -105,6 +115,12 @@ pub struct Executor {
runtime: Arc<RuntimeEnv>,
}
+impl Display for Executor {
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ write!(f, "Executor({})", self.config)
+ }
+}
+
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum ExecutorType {
/// Run using the pool for queries
diff --git a/iox_time/src/lib.rs b/iox_time/src/lib.rs
index 55520b0dde..2fc93a0782 100644
--- a/iox_time/src/lib.rs
+++ b/iox_time/src/lib.rs
@@ -12,6 +12,7 @@
use chrono::{DateTime, TimeZone, Timelike, Utc};
use parking_lot::{lock_api::RwLockUpgradableReadGuard, RwLock};
use std::{
+ fmt::{Debug, Display},
future::Future,
ops::{Add, Sub},
pin::Pin,
@@ -62,7 +63,7 @@ impl Sub<Self> for Time {
}
}
-impl std::fmt::Debug for Time {
+impl Debug for Time {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
std::fmt::Display::fmt(self, f)
}
@@ -181,7 +182,7 @@ impl Time {
}
}
-pub trait TimeProvider: std::fmt::Debug + Send + Sync + 'static {
+pub trait TimeProvider: Debug + Display + Send + Sync + 'static {
/// Returns the current `Time`. No guarantees are made about monotonicity
fn now(&self) -> Time;
@@ -222,6 +223,12 @@ impl SystemProvider {
}
}
+impl Display for SystemProvider {
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ write!(f, "System")
+ }
+}
+
impl TimeProvider for SystemProvider {
fn now(&self) -> Time {
Time(Utc::now())
@@ -279,6 +286,12 @@ impl MockProvider {
}
}
+impl Display for MockProvider {
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ write!(f, "Mock")
+ }
+}
+
impl TimeProvider for MockProvider {
fn now(&self) -> Time {
self.inner.read().now
diff --git a/parquet_file/src/storage.rs b/parquet_file/src/storage.rs
index 9a7704b3e9..d407566d7d 100644
--- a/parquet_file/src/storage.rs
+++ b/parquet_file/src/storage.rs
@@ -26,6 +26,7 @@ use object_store::{DynObjectStore, ObjectMeta};
use observability_deps::tracing::*;
use schema::Projection;
use std::{
+ fmt::Display,
sync::Arc,
time::{Duration, Instant},
};
@@ -170,6 +171,16 @@ pub struct ParquetStorage {
id: StorageId,
}
+impl Display for ParquetStorage {
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ write!(
+ f,
+ "ParquetStorage(id={:?}, object_store={}",
+ self.id, self.object_store
+ )
+ }
+}
+
impl ParquetStorage {
/// Initialise a new [`ParquetStorage`] using `object_store` as the
/// persistence layer.
|
bda2310ca1a5ac385a88f623a8bbe7f4c9b96f39
|
Marco Neumann
|
2023-02-17 12:41:39
|
extract chunks from phys. plan (#7018)
|
* feat: extract chunks from phys. plan
For #6098.
* test: ensure that `extract_chunks` does NOT scan through other nodes
| null |
feat: extract chunks from phys. plan (#7018)
* feat: extract chunks from phys. plan
For #6098.
* test: ensure that `extract_chunks` does NOT scan through other nodes
|
diff --git a/iox_query/src/physical_optimizer/chunk_extraction.rs b/iox_query/src/physical_optimizer/chunk_extraction.rs
new file mode 100644
index 0000000000..d891c78f19
--- /dev/null
+++ b/iox_query/src/physical_optimizer/chunk_extraction.rs
@@ -0,0 +1,233 @@
+use std::sync::Arc;
+
+use datafusion::physical_plan::{
+ empty::EmptyExec, file_format::ParquetExec, union::UnionExec, visit_execution_plan,
+ ExecutionPlan, ExecutionPlanVisitor,
+};
+use schema::Schema;
+
+use crate::{
+ provider::{PartitionedFileExt, RecordBatchesExec},
+ QueryChunk,
+};
+
+/// Extract chunks and schema from plans created with [`chunks_to_physical_nodes`].
+///
+/// Returns `None` if no chunks (or an [`EmptyExec`] in case that no chunks where passed to
+/// [`chunks_to_physical_nodes`]) were found or if the chunk data is inconsistent.
+///
+/// Note that this only works on the direct output of [`chunks_to_physical_nodes`]. If the plan is wrapped into
+/// additional nodes (like de-duplication, filtering, projection) then NO data will be returned.
+///
+/// [`chunks_to_physical_nodes`]: crate::provider::chunks_to_physical_nodes
+#[allow(dead_code)]
+pub fn extract_chunks(plan: &dyn ExecutionPlan) -> Option<(Schema, Vec<Arc<dyn QueryChunk>>)> {
+ let mut visitor = ExtractChunksVisitor::default();
+ visit_execution_plan(plan, &mut visitor).ok()?;
+ visitor.schema.map(|schema| (schema, visitor.chunks))
+}
+
+#[derive(Debug, Default)]
+struct ExtractChunksVisitor {
+ chunks: Vec<Arc<dyn QueryChunk>>,
+ schema: Option<Schema>,
+}
+
+impl ExtractChunksVisitor {
+ fn add_schema(&mut self, schema: &Schema) -> Result<(), ()> {
+ if let Some(existing) = &self.schema {
+ if existing != schema {
+ return Err(());
+ }
+ } else {
+ self.schema = Some(schema.clone());
+ }
+
+ Ok(())
+ }
+
+ fn add_chunk(&mut self, chunk: Arc<dyn QueryChunk>) -> Result<(), ()> {
+ self.add_schema(chunk.schema())?;
+ self.chunks.push(chunk);
+ Ok(())
+ }
+}
+
+impl ExecutionPlanVisitor for ExtractChunksVisitor {
+ type Error = ();
+
+ fn pre_visit(&mut self, plan: &dyn ExecutionPlan) -> Result<bool, Self::Error> {
+ let plan_any = plan.as_any();
+
+ if let Some(record_batches_exec) = plan_any.downcast_ref::<RecordBatchesExec>() {
+ for chunk in record_batches_exec.chunks() {
+ self.add_chunk(Arc::clone(chunk))?;
+ }
+ } else if let Some(parquet_exec) = plan_any.downcast_ref::<ParquetExec>() {
+ for group in &parquet_exec.base_config().file_groups {
+ for file in group {
+ let ext = file
+ .extensions
+ .as_ref()
+ .and_then(|any| any.downcast_ref::<PartitionedFileExt>())
+ .ok_or(())?;
+ self.add_chunk(Arc::clone(&ext.0))?;
+ }
+ }
+ } else if let Some(empty_exec) = plan_any.downcast_ref::<EmptyExec>() {
+ // should not produce dummy data
+ if empty_exec.produce_one_row() {
+ return Err(());
+ }
+
+ let schema = Schema::try_from(empty_exec.schema()).map_err(|_| ())?;
+ self.add_schema(&schema)?;
+ } else if plan_any.downcast_ref::<UnionExec>().is_some() {
+ // continue visiting
+ } else {
+ // unsupported node
+ return Err(());
+ }
+
+ Ok(true)
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use crate::{
+ provider::chunks_to_physical_nodes, test::TestChunk, util::df_physical_expr, QueryChunkMeta,
+ };
+ use arrow::datatypes::{DataType, Field, Schema as ArrowSchema};
+ use data_types::ChunkId;
+ use datafusion::{
+ execution::context::TaskContext,
+ physical_plan::filter::FilterExec,
+ prelude::{col, lit, SessionConfig, SessionContext},
+ };
+ use predicate::Predicate;
+
+ use super::*;
+
+ #[test]
+ fn test_roundtrip_empty() {
+ let schema = chunk(1).schema().clone();
+ assert_roundtrip(schema, vec![]);
+ }
+
+ #[test]
+ fn test_roundtrip_single_record_batch() {
+ let chunk1 = chunk(1);
+ assert_roundtrip(chunk1.schema().clone(), vec![Arc::new(chunk1)]);
+ }
+
+ #[test]
+ fn test_roundtrip_single_parquet() {
+ let chunk1 = chunk(1).with_dummy_parquet_file();
+ assert_roundtrip(chunk1.schema().clone(), vec![Arc::new(chunk1)]);
+ }
+
+ #[test]
+ fn test_roundtrip_many_chunks() {
+ let chunk1 = chunk(1).with_dummy_parquet_file();
+ let chunk2 = chunk(2).with_dummy_parquet_file();
+ let chunk3 = chunk(3).with_dummy_parquet_file();
+ let chunk4 = chunk(4);
+ let chunk5 = chunk(5);
+ assert_roundtrip(
+ chunk1.schema().clone(),
+ vec![
+ Arc::new(chunk1),
+ Arc::new(chunk2),
+ Arc::new(chunk3),
+ Arc::new(chunk4),
+ Arc::new(chunk5),
+ ],
+ );
+ }
+
+ #[test]
+ fn test_different_schemas() {
+ let some_chunk = chunk(1);
+ let iox_schema = some_chunk.schema();
+ let schema1 = iox_schema.as_arrow();
+ let schema2 = iox_schema.select_by_indices(&[]).as_arrow();
+ let plan = UnionExec::new(vec![
+ Arc::new(EmptyExec::new(false, schema1)),
+ Arc::new(EmptyExec::new(false, schema2)),
+ ]);
+ assert!(extract_chunks(&plan).is_none());
+ }
+
+ #[test]
+ fn test_empty_exec_with_rows() {
+ let schema = chunk(1).schema().as_arrow();
+ let plan = EmptyExec::new(true, schema);
+ assert!(extract_chunks(&plan).is_none());
+ }
+
+ #[test]
+ fn test_empty_exec_no_iox_schema() {
+ let schema = Arc::new(ArrowSchema::new(vec![Field::new(
+ "x",
+ DataType::Float64,
+ true,
+ )]));
+ let plan = EmptyExec::new(false, schema);
+ assert!(extract_chunks(&plan).is_none());
+ }
+
+ #[test]
+ fn test_stop_at_other_node_types() {
+ let chunk1 = chunk(1);
+ let schema = chunk1.schema().clone();
+ let plan = chunks_to_physical_nodes(
+ &schema,
+ None,
+ vec![Arc::new(chunk1)],
+ Predicate::default(),
+ task_ctx(),
+ );
+ let plan = FilterExec::try_new(
+ df_physical_expr(plan.as_ref(), col("tag1").eq(lit("foo"))).unwrap(),
+ plan,
+ )
+ .unwrap();
+ assert!(extract_chunks(&plan).is_none());
+ }
+
+ #[track_caller]
+ fn assert_roundtrip(schema: Schema, chunks: Vec<Arc<dyn QueryChunk>>) {
+ let plan = chunks_to_physical_nodes(
+ &schema,
+ None,
+ chunks.clone(),
+ Predicate::default(),
+ task_ctx(),
+ );
+ let (schema2, chunks2) = extract_chunks(plan.as_ref()).expect("data found");
+ assert_eq!(schema, schema2);
+ assert_eq!(chunk_ids(&chunks), chunk_ids(&chunks2));
+ }
+
+ fn task_ctx() -> Arc<TaskContext> {
+ let session_ctx =
+ SessionContext::with_config(SessionConfig::default().with_target_partitions(2));
+ Arc::new(TaskContext::from(&session_ctx))
+ }
+
+ fn chunk_ids(chunks: &[Arc<dyn QueryChunk>]) -> Vec<ChunkId> {
+ let mut ids = chunks.iter().map(|c| c.id()).collect::<Vec<_>>();
+ ids.sort();
+ ids
+ }
+
+ fn chunk(id: u128) -> TestChunk {
+ TestChunk::new("table")
+ .with_id(id)
+ .with_tag_column("tag1")
+ .with_tag_column("tag2")
+ .with_i64_field_column("field")
+ .with_time_column()
+ }
+}
diff --git a/iox_query/src/physical_optimizer/mod.rs b/iox_query/src/physical_optimizer/mod.rs
index eef746e173..346c5a0454 100644
--- a/iox_query/src/physical_optimizer/mod.rs
+++ b/iox_query/src/physical_optimizer/mod.rs
@@ -4,6 +4,7 @@ use datafusion::{execution::context::SessionState, physical_optimizer::PhysicalO
use self::union::one_union::OneUnion;
+mod chunk_extraction;
mod union;
#[cfg(test)]
diff --git a/iox_query/src/provider.rs b/iox_query/src/provider.rs
index c1621094d0..b4063f171e 100644
--- a/iox_query/src/provider.rs
+++ b/iox_query/src/provider.rs
@@ -42,9 +42,8 @@ mod physical;
mod record_batch_exec;
use self::overlap::group_potential_duplicates;
pub use deduplicate::{DeduplicateExec, RecordBatchDeduplicator};
-pub(crate) use physical::chunks_to_physical_nodes;
+pub(crate) use physical::{chunks_to_physical_nodes, PartitionedFileExt};
-#[cfg(test)]
pub(crate) use record_batch_exec::RecordBatchesExec;
#[derive(Debug, Snafu)]
diff --git a/iox_query/src/provider/physical.rs b/iox_query/src/provider/physical.rs
index 0badef5b5a..62cd4a83ff 100644
--- a/iox_query/src/provider/physical.rs
+++ b/iox_query/src/provider/physical.rs
@@ -23,7 +23,7 @@ use std::{
};
/// Extension for [`PartitionedFile`] to hold the original [`QueryChunk`].
-struct PartitionedFileExt(Arc<dyn QueryChunk>);
+pub struct PartitionedFileExt(pub Arc<dyn QueryChunk>);
/// Holds a list of chunks that all have the same "URL" and
/// will be scanned using the same ParquetExec.
diff --git a/iox_query/src/provider/record_batch_exec.rs b/iox_query/src/provider/record_batch_exec.rs
index 95cfbcc03f..5f39a15851 100644
--- a/iox_query/src/provider/record_batch_exec.rs
+++ b/iox_query/src/provider/record_batch_exec.rs
@@ -72,7 +72,6 @@ impl RecordBatchesExec {
}
/// Chunks that make up this node.
- #[allow(dead_code)]
pub fn chunks(&self) -> impl Iterator<Item = &Arc<dyn QueryChunk>> {
self.chunks.iter().map(|(chunk, _batches)| chunk)
}
|
d07658282c17dda0b028b7b084f3b46e1909d3f3
|
Luke Bond
|
2022-11-30 13:14:39
|
add router config parameter for retention (#6278)
|
* chore: remove unused/moved ns_autocreation dml handler
* feat(router): expose new ns retention as config
* fix: forgot to set default value for router retention arg
* chore: make new namespace retention param an option
| null |
feat: add router config parameter for retention (#6278)
* chore: remove unused/moved ns_autocreation dml handler
* feat(router): expose new ns retention as config
* fix: forgot to set default value for router retention arg
* chore: make new namespace retention param an option
|
diff --git a/clap_blocks/src/lib.rs b/clap_blocks/src/lib.rs
index 3cf8f93ad4..61b8eb3d1e 100644
--- a/clap_blocks/src/lib.rs
+++ b/clap_blocks/src/lib.rs
@@ -17,6 +17,7 @@ pub mod compactor;
pub mod ingester;
pub mod object_store;
pub mod querier;
+pub mod router;
pub mod run_config;
pub mod socket_addr;
pub mod write_buffer;
diff --git a/clap_blocks/src/router.rs b/clap_blocks/src/router.rs
new file mode 100644
index 0000000000..ac7a856bce
--- /dev/null
+++ b/clap_blocks/src/router.rs
@@ -0,0 +1,42 @@
+//! CLI config for router
+
+/// CLI config for router
+#[derive(Debug, Clone, clap::Parser)]
+#[allow(missing_copy_implementations)]
+pub struct RouterConfig {
+ /// Query pool name to dispatch writes to.
+ #[clap(
+ long = "query-pool",
+ env = "INFLUXDB_IOX_QUERY_POOL_NAME",
+ default_value = "iox-shared",
+ action
+ )]
+ pub query_pool_name: String,
+
+ /// The maximum number of simultaneous requests the HTTP server is
+ /// configured to accept.
+ ///
+ /// This number of requests, multiplied by the maximum request body size the
+ /// HTTP server is configured with gives the rough amount of memory a HTTP
+ /// server will use to buffer request bodies in memory.
+ ///
+ /// A default maximum of 200 requests, multiplied by the default 10MiB
+ /// maximum for HTTP request bodies == ~2GiB.
+ #[clap(
+ long = "max-http-requests",
+ env = "INFLUXDB_IOX_MAX_HTTP_REQUESTS",
+ default_value = "200",
+ action
+ )]
+ pub http_request_limit: usize,
+
+ /// Retention period to use when auto-creating namespaces.
+ /// For infinite retention, leave this unset and it will default to `None`.
+ /// Setting it to zero will not make it infinite.
+ #[clap(
+ long = "new-namespace-retention-hours",
+ env = "INFLUXDB_IOX_NEW_NAMESPACE_RETENTION_HOURS",
+ action
+ )]
+ pub new_namespace_retention_hours: Option<u64>,
+}
diff --git a/influxdb_iox/src/commands/run/all_in_one.rs b/influxdb_iox/src/commands/run/all_in_one.rs
index 1ec6c5d7ed..00adedb793 100644
--- a/influxdb_iox/src/commands/run/all_in_one.rs
+++ b/influxdb_iox/src/commands/run/all_in_one.rs
@@ -9,6 +9,7 @@ use clap_blocks::{
ingester::IngesterConfig,
object_store::{make_object_store, ObjectStoreConfig},
querier::{IngesterAddresses, QuerierConfig},
+ router::RouterConfig,
run_config::RunConfig,
socket_addr::SocketAddr,
write_buffer::WriteBufferConfig,
@@ -412,6 +413,12 @@ impl Config {
persist_partition_rows_max: 500_000,
};
+ let router_config = RouterConfig {
+ query_pool_name: QUERY_POOL_NAME.to_string(),
+ http_request_limit: 1_000,
+ new_namespace_retention_hours: None, // infinite retention
+ };
+
// create a CompactorConfig for the all in one server based on
// settings from other configs. Can't use `#clap(flatten)` as the
// parameters are redundant with ingester's
@@ -455,6 +462,7 @@ impl Config {
catalog_dsn,
write_buffer_config,
ingester_config,
+ router_config,
compactor_config,
querier_config,
}
@@ -472,6 +480,7 @@ struct SpecializedConfig {
catalog_dsn: CatalogDsnConfig,
write_buffer_config: WriteBufferConfig,
ingester_config: IngesterConfig,
+ router_config: RouterConfig,
compactor_config: CompactorConfig,
querier_config: QuerierConfig,
}
@@ -485,6 +494,7 @@ pub async fn command(config: Config) -> Result<()> {
catalog_dsn,
write_buffer_config,
ingester_config,
+ router_config,
compactor_config,
querier_config,
} = config.specialize();
@@ -539,8 +549,7 @@ pub async fn command(config: Config) -> Result<()> {
Arc::clone(&catalog),
Arc::clone(&object_store),
&write_buffer_config,
- QUERY_POOL_NAME,
- 1_000, // max 1,000 concurrent HTTP requests
+ &router_config,
)
.await?;
diff --git a/influxdb_iox/src/commands/run/router.rs b/influxdb_iox/src/commands/run/router.rs
index 4d59e13715..bf87a2e8a2 100644
--- a/influxdb_iox/src/commands/run/router.rs
+++ b/influxdb_iox/src/commands/run/router.rs
@@ -4,6 +4,7 @@ use crate::process_info::setup_metric_registry;
use super::main;
use clap_blocks::object_store::make_object_store;
+use clap_blocks::router::RouterConfig;
use clap_blocks::{
catalog_dsn::CatalogDsnConfig, run_config::RunConfig, write_buffer::WriteBufferConfig,
};
@@ -64,31 +65,8 @@ pub struct Config {
#[clap(flatten)]
pub(crate) write_buffer_config: WriteBufferConfig,
- /// Query pool name to dispatch writes to.
- #[clap(
- long = "query-pool",
- env = "INFLUXDB_IOX_QUERY_POOL_NAME",
- default_value = "iox-shared",
- action
- )]
- pub(crate) query_pool_name: String,
-
- /// The maximum number of simultaneous requests the HTTP server is
- /// configured to accept.
- ///
- /// This number of requests, multiplied by the maximum request body size the
- /// HTTP server is configured with gives the rough amount of memory a HTTP
- /// server will use to buffer request bodies in memory.
- ///
- /// A default maximum of 200 requests, multiplied by the default 10MiB
- /// maximum for HTTP request bodies == ~2GiB.
- #[clap(
- long = "max-http-requests",
- env = "INFLUXDB_IOX_MAX_HTTP_REQUESTS",
- default_value = "200",
- action
- )]
- pub(crate) http_request_limit: usize,
+ #[clap(flatten)]
+ pub(crate) router_config: RouterConfig,
}
pub async fn command(config: Config) -> Result<()> {
@@ -116,8 +94,7 @@ pub async fn command(config: Config) -> Result<()> {
catalog,
object_store,
&config.write_buffer_config,
- &config.query_pool_name,
- config.http_request_limit,
+ &config.router_config,
)
.await?;
diff --git a/ioxd_router/src/lib.rs b/ioxd_router/src/lib.rs
index cffaccf2b9..2e5c031bf0 100644
--- a/ioxd_router/src/lib.rs
+++ b/ioxd_router/src/lib.rs
@@ -1,5 +1,5 @@
use async_trait::async_trait;
-use clap_blocks::write_buffer::WriteBufferConfig;
+use clap_blocks::{router::RouterConfig, write_buffer::WriteBufferConfig};
use data_types::{NamespaceName, PartitionTemplate, TemplatePart};
use hashbrown::HashMap;
use hyper::{Body, Request, Response};
@@ -171,8 +171,7 @@ pub async fn create_router_server_type(
catalog: Arc<dyn Catalog>,
object_store: Arc<DynObjectStore>,
write_buffer_config: &WriteBufferConfig,
- query_pool_name: &str,
- request_limit: usize,
+ router_config: &RouterConfig,
) -> Result<Arc<dyn ServerType>> {
// Initialise the sharded write buffer and instrument it with DML handler
// metrics.
@@ -246,7 +245,7 @@ pub async fn create_router_server_type(
.unwrap_or_else(|| panic!("no topic named {} in catalog", write_buffer_config.topic()));
let query_id = txn
.query_pools()
- .create_or_get(query_pool_name)
+ .create_or_get(&router_config.query_pool_name)
.await
.map(|v| v.id)
.unwrap_or_else(|e| {
@@ -264,7 +263,9 @@ pub async fn create_router_server_type(
Arc::clone(&catalog),
topic_id,
query_id,
- None,
+ router_config
+ .new_namespace_retention_hours
+ .map(|hours| hours as i64 * 60 * 60 * 1_000_000_000),
);
//
////////////////////////////////////////////////////////////////////////////
@@ -298,7 +299,7 @@ pub async fn create_router_server_type(
// Initialise the API delegates
let http = HttpDelegate::new(
common_state.run_config().max_http_request_size,
- request_limit,
+ router_config.http_request_limit,
namespace_resolver,
handler_stack,
&metrics,
diff --git a/router/src/dml_handlers/ns_autocreation.rs b/router/src/dml_handlers/ns_autocreation.rs
deleted file mode 100644
index eea205001a..0000000000
--- a/router/src/dml_handlers/ns_autocreation.rs
+++ /dev/null
@@ -1,231 +0,0 @@
-use std::{fmt::Debug, marker::PhantomData, sync::Arc};
-
-use async_trait::async_trait;
-use data_types::{NamespaceName, DeletePredicate, QueryPoolId, TopicId};
-use iox_catalog::interface::Catalog;
-use observability_deps::tracing::*;
-use thiserror::Error;
-use trace::ctx::SpanContext;
-
-use super::DmlHandler;
-use crate::namespace_cache::NamespaceCache;
-
-/// An error auto-creating the request namespace.
-#[derive(Debug, Error)]
-pub enum NamespaceCreationError {
- /// An error returned from a namespace creation request.
- #[error("failed to create namespace: {0}")]
- Create(iox_catalog::interface::Error),
-}
-
-/// A layer to populate the [`Catalog`] with all the namespaces the router
-/// observes.
-///
-/// Uses a [`NamespaceCache`] to limit issuing create requests to namespaces the
-/// router has not yet observed a schema for.
-#[derive(Debug)]
-pub struct NamespaceAutocreation<C, T> {
- catalog: Arc<dyn Catalog>,
- cache: C,
-
- topic_id: TopicId,
- query_id: QueryPoolId,
- _input: PhantomData<T>,
-}
-
-impl<C, T> NamespaceAutocreation<C, T> {
- /// Return a new [`NamespaceAutocreation`] layer that ensures a requested
- /// namespace exists in `catalog`.
- ///
- /// If the namespace does not exist, it is created with the specified
- /// `topic_id`, `query_id` and `retention` policy.
- ///
- /// Namespaces are looked up in `cache`, skipping the creation request to
- /// the catalog if there's a hit.
- pub fn new(
- catalog: Arc<dyn Catalog>,
- cache: C,
- topic_id: TopicId,
- query_id: QueryPoolId,
- retention: String,
- ) -> Self {
- Self {
- catalog,
- cache,
- topic_id,
- query_id,
- retention,
- _input: Default::default(),
- }
- }
-}
-
-#[async_trait]
-impl<C, T> DmlHandler for NamespaceAutocreation<C, T>
-where
- C: NamespaceCache,
- T: Debug + Send + Sync,
-{
- type WriteError = NamespaceCreationError;
- type DeleteError = NamespaceCreationError;
-
- // This handler accepts any write input type, returning it to the caller
- // unmodified.
- type WriteInput = T;
- type WriteOutput = T;
-
- /// Write `batches` to `namespace`.
- async fn write(
- &self,
- namespace: &'_ NamespaceName<'static>,
- batches: Self::WriteInput,
- _span_ctx: Option<SpanContext>,
- ) -> Result<Self::WriteOutput, Self::WriteError> {
- // If the namespace does not exist in the schema cache (populated by the
- // schema validator) request an (idempotent) creation.
- if self.cache.get_schema(namespace).is_none() {
- trace!(%namespace, "namespace auto-create cache miss");
-
- let mut repos = self.catalog.repositories().await;
-
- match repos
- .namespaces()
- .create(
- namespace.as_str(),
- &self.retention,
- self.topic_id,
- self.query_id,
- )
- .await
- {
- Ok(_) => {
- debug!(%namespace, "created namespace");
- }
- Err(iox_catalog::interface::Error::NameExists { .. }) => {
- // Either the cache has not yet converged to include this
- // namespace, or another thread raced populating the catalog
- // and beat this thread to it.
- debug!(%namespace, "spurious namespace create failed");
- }
- Err(e) => {
- error!(error=%e, %namespace, "failed to auto-create namespace");
- return Err(NamespaceCreationError::Create(e));
- }
- }
- }
-
- Ok(batches)
- }
-
- /// Delete the data specified in `delete`.
- async fn delete(
- &self,
- _namespace: &NamespaceName<'static>,
- _table_name: &str,
- _predicate: &DeletePredicate,
- _span_ctx: Option<SpanContext>,
- ) -> Result<(), Self::DeleteError> {
- Ok(())
- }
-}
-
-#[cfg(test)]
-mod tests {
- use std::sync::Arc;
-
- use data_types::{Namespace, NamespaceId, NamespaceSchema};
- use iox_catalog::mem::MemCatalog;
-
- use super::*;
- use crate::namespace_cache::MemoryNamespaceCache;
-
- #[tokio::test]
- async fn test_cache_hit() {
- let ns = NamespaceName::try_from("bananas").unwrap();
-
- // Prep the cache before the test to cause a hit
- let cache = Arc::new(MemoryNamespaceCache::default());
- cache.put_schema(
- ns.clone(),
- NamespaceSchema {
- id: NamespaceId::new(1),
- topic_id: TopicId::new(2),
- query_pool_id: QueryPoolId::new(3),
- tables: Default::default(),
- max_columns_per_table: 4,
- },
- );
-
- let metrics = Arc::new(metric::Registry::new());
- let catalog: Arc<dyn Catalog> = Arc::new(MemCatalog::new(metrics));
-
- let creator = NamespaceAutocreation::new(
- Arc::clone(&catalog),
- cache,
- TopicId::new(42),
- QueryPoolId::new(42),
- );
-
- // Drive the code under test
- creator
- .write(&ns, (), None)
- .await
- .expect("handler should succeed");
-
- // The cache hit should mean the catalog SHOULD NOT see a create request
- // for the namespace.
- let mut repos = catalog.repositories().await;
- assert!(
- repos
- .namespaces()
- .get_by_name(ns.as_str())
- .await
- .expect("lookup should not error")
- .is_none(),
- "expected no request to the catalog"
- );
- }
-
- #[tokio::test]
- async fn test_cache_miss() {
- let ns = NamespaceName::try_from("bananas").unwrap();
-
- let cache = Arc::new(MemoryNamespaceCache::default());
- let metrics = Arc::new(metric::Registry::new());
- let catalog: Arc<dyn Catalog> = Arc::new(MemCatalog::new(metrics));
-
- let creator = NamespaceAutocreation::new(
- Arc::clone(&catalog),
- cache,
- TopicId::new(42),
- QueryPoolId::new(42),
- );
-
- creator
- .write(&ns, (), None)
- .await
- .expect("handler should succeed");
-
- // The cache miss should mean the catalog MUST see a create request for
- // the namespace.
- let mut repos = catalog.repositories().await;
- let got = repos
- .namespaces()
- .get_by_name(ns.as_str())
- .await
- .expect("lookup should not error")
- .expect("creation request should be sent to catalog");
-
- assert_eq!(
- got,
- Namespace {
- id: NamespaceId::new(1),
- name: ns.to_string(),
- topic_id: TopicId::new(42),
- query_pool_id: QueryPoolId::new(42),
- max_tables: iox_catalog::DEFAULT_MAX_TABLES,
- max_columns_per_table: iox_catalog::DEFAULT_MAX_COLUMNS_PER_TABLE,
- }
- );
- }
-}
|
f26b54beecda1321d97c16616667c75ea2042dba
|
Dom Dwyer
|
2023-01-24 19:15:44
|
set sensible RPC timeouts
|
Copies these over from the client_util package.
| null |
refactor(router): set sensible RPC timeouts
Copies these over from the client_util package.
|
diff --git a/router/src/dml_handlers/rpc_write/lazy_connector.rs b/router/src/dml_handlers/rpc_write/lazy_connector.rs
index 6db80017e9..92e4b88c48 100644
--- a/router/src/dml_handlers/rpc_write/lazy_connector.rs
+++ b/router/src/dml_handlers/rpc_write/lazy_connector.rs
@@ -14,6 +14,8 @@ use tonic::transport::{Channel, Endpoint};
use super::{client::WriteClient, RpcWriteError};
const RETRY_INTERVAL: Duration = Duration::from_secs(1);
+const CONNECT_TIMEOUT: Duration = Duration::from_secs(1);
+const REQUEST_TIMEOUT: Duration = Duration::from_secs(30);
/// Lazy [`Channel`] connector.
///
@@ -33,6 +35,9 @@ pub struct LazyConnector {
impl LazyConnector {
/// Lazily connect to `addr`.
pub fn new(addr: Endpoint) -> Self {
+ let addr = addr
+ .connect_timeout(CONNECT_TIMEOUT)
+ .timeout(REQUEST_TIMEOUT);
let connection = Default::default();
Self {
addr: addr.clone(),
|
429e1c53192f072345cd39c68d8e66dd28bf21dd
|
Dom Dwyer
|
2023-02-20 21:19:16
|
remove unnecessary dyn / boxing
|
Remove an extraneous heap allocation / dynamic dispatch for each query -
the result type never changes, so there's no benefit to boxing the
returned stream.
| null |
perf(ingester): remove unnecessary dyn / boxing
Remove an extraneous heap allocation / dynamic dispatch for each query -
the result type never changes, so there's no benefit to boxing the
returned stream.
|
diff --git a/ingester2/src/server/grpc/query.rs b/ingester2/src/server/grpc/query.rs
index 7d393aa826..0d6b2acc39 100644
--- a/ingester2/src/server/grpc/query.rs
+++ b/ingester2/src/server/grpc/query.rs
@@ -8,7 +8,7 @@ use arrow_flight::{
};
use data_types::{NamespaceId, PartitionId, TableId};
use flatbuffers::FlatBufferBuilder;
-use futures::{stream::BoxStream, Stream, StreamExt, TryStreamExt};
+use futures::{Stream, StreamExt, TryStreamExt};
use generated_types::influxdata::iox::ingester::v1::{self as proto, PartitionStatus};
use metric::U64Counter;
use observability_deps::tracing::*;
@@ -260,7 +260,7 @@ fn encode_partition(
// [`PartitionResponse`]: crate::query::partition_response::PartitionResponse
completed_persistence_count: u64,
ingester_id: IngesterId,
-) -> std::result::Result<FlightData, FlightError> {
+) -> Result<FlightData, FlightError> {
let mut bytes = bytes::BytesMut::new();
let app_metadata = proto::IngesterQueryResponseMetadata {
partition_id: partition_id.get(),
@@ -299,35 +299,32 @@ fn build_none_flight_msg() -> Vec<u8> {
fn encode_response(
response: QueryResponse,
ingester_id: IngesterId,
-) -> BoxStream<'static, std::result::Result<FlightData, FlightError>> {
- response
- .into_partition_stream()
- .flat_map(move |partition| {
- let partition_id = partition.id();
- let completed_persistence_count = partition.completed_persistence_count();
- let head = futures::stream::once(async move {
- encode_partition(
- partition_id,
- PartitionStatus {
- parquet_max_sequence_number: None,
- },
- completed_persistence_count,
- ingester_id,
- )
- });
-
- match partition.into_record_batch_stream() {
- Some(stream) => {
- let stream = stream.map_err(|e| FlightError::ExternalError(Box::new(e)));
-
- let tail = FlightDataEncoderBuilder::new().build(stream);
-
- head.chain(tail).boxed()
- }
- None => head.boxed(),
+) -> impl Stream<Item = Result<FlightData, FlightError>> {
+ response.into_partition_stream().flat_map(move |partition| {
+ let partition_id = partition.id();
+ let completed_persistence_count = partition.completed_persistence_count();
+ let head = futures::stream::once(async move {
+ encode_partition(
+ partition_id,
+ PartitionStatus {
+ parquet_max_sequence_number: None,
+ },
+ completed_persistence_count,
+ ingester_id,
+ )
+ });
+
+ match partition.into_record_batch_stream() {
+ Some(stream) => {
+ let stream = stream.map_err(|e| FlightError::ExternalError(Box::new(e)));
+
+ let tail = FlightDataEncoderBuilder::new().build(stream);
+
+ head.chain(tail).boxed()
}
- })
- .boxed()
+ None => head.boxed(),
+ }
+ })
}
#[cfg(test)]
|
4414c6940b2a80d5a07e744b305cbba7d2ef132d
|
Dom Dwyer
|
2023-08-29 16:24:10
|
move MST et al into module
|
Adds a "mst" (merkle search tree) submodule in anti_entropy, and moves
all the MST code into it.
This makes space for a gossip-based sync primitive to live here too.
| null |
refactor: move MST et al into module
Adds a "mst" (merkle search tree) submodule in anti_entropy, and moves
all the MST code into it.
This makes space for a gossip-based sync primitive to live here too.
|
diff --git a/router/benches/namespace_schema_cache.rs b/router/benches/namespace_schema_cache.rs
index 80a014a57c..dc8fb215e9 100644
--- a/router/benches/namespace_schema_cache.rs
+++ b/router/benches/namespace_schema_cache.rs
@@ -11,7 +11,7 @@ use data_types::{
use iox_catalog::{interface::Catalog, mem::MemCatalog};
use once_cell::sync::Lazy;
use router::{
- gossip::anti_entropy::{actor::AntiEntropyActor, merkle::MerkleTree},
+ gossip::anti_entropy::mst::{actor::AntiEntropyActor, merkle::MerkleTree},
namespace_cache::{MemoryNamespaceCache, NamespaceCache, ReadThroughCache, ShardedCache},
};
diff --git a/router/src/gossip/anti_entropy/mod.rs b/router/src/gossip/anti_entropy/mod.rs
index 59586bc632..8dddfadc2c 100644
--- a/router/src/gossip/anti_entropy/mod.rs
+++ b/router/src/gossip/anti_entropy/mod.rs
@@ -1,165 +1,3 @@
//! Anti-entropy primitives providing eventual consistency over gossip.
-pub mod actor;
-pub mod handle;
-pub mod merkle;
-
-#[cfg(test)]
-mod tests {
- use std::{collections::BTreeMap, sync::Arc};
-
- use crate::{
- gossip::anti_entropy::{actor::AntiEntropyActor, merkle::MerkleTree},
- namespace_cache::{MemoryNamespaceCache, NamespaceCache},
- };
-
- use data_types::{
- ColumnId, ColumnSchema, ColumnType, ColumnsByName, NamespaceId, NamespaceName,
- NamespaceSchema, TableId, TableSchema,
- };
- use proptest::prelude::*;
-
- /// A set of table and column names from which arbitrary names are selected
- /// in prop tests, instead of using random values that have a low
- /// probability of overlap.
- const TEST_TABLE_NAME_SET: &[&str] = &[
- "bananas", "quiero", "un", "platano", "donkey", "goose", "egg", "mr_toro",
- ];
-
- prop_compose! {
- /// Generate a series of ColumnSchema assigned randomised IDs with a
- /// stable mapping of `id -> data type`.
- ///
- /// This generates at most 255 unique columns.
- pub fn arbitrary_column_schema_stable()(id in 0_i16..255) -> ColumnSchema {
- // Provide a stable mapping of ID to data type to avoid column type
- // conflicts by reducing the ID to the data type discriminant range
- // and using that to assign the data type.
- let col_type = ColumnType::try_from((id % 7) + 1).expect("valid discriminator range");
-
- ColumnSchema { id: ColumnId::new(id as _), column_type: col_type }
- }
- }
-
- prop_compose! {
- /// Generate an arbitrary TableSchema with up to 255 columns that
- /// contain stable `column name -> data type` and `column name -> column
- /// id` mappings.
- pub fn arbitrary_table_schema()(
- id in any::<i64>(),
- columns in proptest::collection::hash_set(
- arbitrary_column_schema_stable(),
- (0, 255) // Set size range
- ),
- ) -> TableSchema {
- // Map the column schemas into `name -> schema`, generating a
- // column name derived from the column ID to ensure a consistent
- // mapping of name -> id, and in turn, name -> data type.
- let columns = columns.into_iter()
- .map(|v| (format!("col-{}", v.id.get()), v))
- .collect::<BTreeMap<String, ColumnSchema>>();
-
- let columns = ColumnsByName::from(columns);
- TableSchema {
- id: TableId::new(id),
- partition_template: Default::default(),
- columns,
- }
- }
- }
-
- prop_compose! {
- /// Generate an arbitrary NamespaceSchema that contains tables from
- /// [`TEST_TABLE_NAME_SET`], containing up to 255 columns with stable
- /// `name -> (id, data type)` mappings.
- ///
- /// Namespace IDs are allocated from the specified strategy.
- pub fn arbitrary_namespace_schema(namespace_ids: impl Strategy<Value = i64>)(
- namespace_id in namespace_ids,
- tables in proptest::collection::btree_map(
- proptest::sample::select(TEST_TABLE_NAME_SET),
- arbitrary_table_schema(),
- (0, 10) // Set size range
- ),
- max_columns_per_table in any::<usize>(),
- max_tables in any::<usize>(),
- retention_period_ns in any::<Option<i64>>(),
- ) -> NamespaceSchema {
- let tables = tables.into_iter().map(|(k, v)| (k.to_string(), v)).collect();
- NamespaceSchema {
- id: NamespaceId::new(namespace_id),
- tables,
- max_columns_per_table,
- max_tables,
- retention_period_ns,
- partition_template: Default::default(),
- }
- }
- }
-
- fn name_for_schema(schema: &NamespaceSchema) -> NamespaceName<'static> {
- NamespaceName::try_from(format!("ns-{}", schema.id)).unwrap()
- }
-
- proptest! {
- /// Assert that two distinct namespace cache instances return identical
- /// content hashes after applying a given set of cache updates.
- #[test]
- fn prop_content_hash_diverge_converge(
- // A variable number of cache entry updates for 2 namespace IDs
- updates in prop::collection::vec(arbitrary_namespace_schema(
- prop_oneof![Just(1), Just(2)]), // IDs assigned
- 0..10 // Number of updates
- ),
- // An arbitrary namespace with an ID that lies outside of `updates`.
- last_update in arbitrary_namespace_schema(42_i64..100),
- ) {
- tokio::runtime::Runtime::new().unwrap().block_on(async move {
- let cache_a = Arc::new(MemoryNamespaceCache::default());
- let cache_b = Arc::new(MemoryNamespaceCache::default());
-
- let (actor_a, handle_a) = AntiEntropyActor::new(Arc::clone(&cache_a));
- let (actor_b, handle_b) = AntiEntropyActor::new(Arc::clone(&cache_b));
-
- // Start the MST actors
- tokio::spawn(actor_a.run());
- tokio::spawn(actor_b.run());
-
- let ns_a = MerkleTree::new(cache_a, handle_a.clone());
- let ns_b = MerkleTree::new(cache_b, handle_b.clone());
-
- // Invariant: two empty namespace caches have the same content hash.
- assert_eq!(handle_a.content_hash().await, handle_b.content_hash().await);
-
- for update in updates {
- // Generate a unique, deterministic name for this namespace.
- let name = name_for_schema(&update);
-
- // Apply the update (which may be a no-op) to both.
- ns_a.put_schema(name.clone(), update.clone());
- ns_b.put_schema(name, update);
-
- // Invariant: after applying the same update, the content hashes
- // MUST match (even if this update was a no-op / not an update)
- assert_eq!(handle_a.content_hash().await, handle_b.content_hash().await);
- }
-
- // At this point all updates have been applied to both caches.
- //
- // Add a new cache entry that doesn't yet exist, and assert this
- // causes the caches to diverge, and then once again reconverge.
- let name = name_for_schema(&last_update);
- ns_a.put_schema(name.clone(), last_update.clone());
-
- // Invariant: last_update definitely added new cache content,
- // therefore the cache content hashes MUST diverge.
- assert_ne!(handle_a.content_hash().await, handle_b.content_hash().await);
-
- // Invariant: applying the update to the other cache converges their
- // content hashes.
- ns_b.put_schema(name, last_update);
- assert_eq!(handle_a.content_hash().await, handle_b.content_hash().await);
- });
- }
- }
-}
+pub mod mst;
diff --git a/router/src/gossip/anti_entropy/actor.rs b/router/src/gossip/anti_entropy/mst/actor.rs
similarity index 100%
rename from router/src/gossip/anti_entropy/actor.rs
rename to router/src/gossip/anti_entropy/mst/actor.rs
diff --git a/router/src/gossip/anti_entropy/handle.rs b/router/src/gossip/anti_entropy/mst/handle.rs
similarity index 100%
rename from router/src/gossip/anti_entropy/handle.rs
rename to router/src/gossip/anti_entropy/mst/handle.rs
diff --git a/router/src/gossip/anti_entropy/merkle.rs b/router/src/gossip/anti_entropy/mst/merkle.rs
similarity index 100%
rename from router/src/gossip/anti_entropy/merkle.rs
rename to router/src/gossip/anti_entropy/mst/merkle.rs
diff --git a/router/src/gossip/anti_entropy/mst/mod.rs b/router/src/gossip/anti_entropy/mst/mod.rs
new file mode 100644
index 0000000000..131ebc613d
--- /dev/null
+++ b/router/src/gossip/anti_entropy/mst/mod.rs
@@ -0,0 +1,167 @@
+//! Decoupled, concurrency safe management of a [`MerkleSearchTree`].
+//!
+//! [`MerkleSearchTree`]: merkle_search_tree::MerkleSearchTree
+
+pub mod actor;
+pub mod handle;
+pub mod merkle;
+
+#[cfg(test)]
+mod tests {
+ use std::{collections::BTreeMap, sync::Arc};
+
+ use crate::{
+ gossip::anti_entropy::mst::{actor::AntiEntropyActor, merkle::MerkleTree},
+ namespace_cache::{MemoryNamespaceCache, NamespaceCache},
+ };
+
+ use data_types::{
+ ColumnId, ColumnSchema, ColumnType, ColumnsByName, NamespaceId, NamespaceName,
+ NamespaceSchema, TableId, TableSchema,
+ };
+ use proptest::prelude::*;
+
+ /// A set of table and column names from which arbitrary names are selected
+ /// in prop tests, instead of using random values that have a low
+ /// probability of overlap.
+ const TEST_TABLE_NAME_SET: &[&str] = &[
+ "bananas", "quiero", "un", "platano", "donkey", "goose", "egg", "mr_toro",
+ ];
+
+ prop_compose! {
+ /// Generate a series of ColumnSchema assigned randomised IDs with a
+ /// stable mapping of `id -> data type`.
+ ///
+ /// This generates at most 255 unique columns.
+ pub fn arbitrary_column_schema_stable()(id in 0_i16..255) -> ColumnSchema {
+ // Provide a stable mapping of ID to data type to avoid column type
+ // conflicts by reducing the ID to the data type discriminant range
+ // and using that to assign the data type.
+ let col_type = ColumnType::try_from((id % 7) + 1).expect("valid discriminator range");
+
+ ColumnSchema { id: ColumnId::new(id as _), column_type: col_type }
+ }
+ }
+
+ prop_compose! {
+ /// Generate an arbitrary TableSchema with up to 255 columns that
+ /// contain stable `column name -> data type` and `column name -> column
+ /// id` mappings.
+ pub fn arbitrary_table_schema()(
+ id in any::<i64>(),
+ columns in proptest::collection::hash_set(
+ arbitrary_column_schema_stable(),
+ (0, 255) // Set size range
+ ),
+ ) -> TableSchema {
+ // Map the column schemas into `name -> schema`, generating a
+ // column name derived from the column ID to ensure a consistent
+ // mapping of name -> id, and in turn, name -> data type.
+ let columns = columns.into_iter()
+ .map(|v| (format!("col-{}", v.id.get()), v))
+ .collect::<BTreeMap<String, ColumnSchema>>();
+
+ let columns = ColumnsByName::from(columns);
+ TableSchema {
+ id: TableId::new(id),
+ partition_template: Default::default(),
+ columns,
+ }
+ }
+ }
+
+ prop_compose! {
+ /// Generate an arbitrary NamespaceSchema that contains tables from
+ /// [`TEST_TABLE_NAME_SET`], containing up to 255 columns with stable
+ /// `name -> (id, data type)` mappings.
+ ///
+ /// Namespace IDs are allocated from the specified strategy.
+ pub fn arbitrary_namespace_schema(namespace_ids: impl Strategy<Value = i64>)(
+ namespace_id in namespace_ids,
+ tables in proptest::collection::btree_map(
+ proptest::sample::select(TEST_TABLE_NAME_SET),
+ arbitrary_table_schema(),
+ (0, 10) // Set size range
+ ),
+ max_columns_per_table in any::<usize>(),
+ max_tables in any::<usize>(),
+ retention_period_ns in any::<Option<i64>>(),
+ ) -> NamespaceSchema {
+ let tables = tables.into_iter().map(|(k, v)| (k.to_string(), v)).collect();
+ NamespaceSchema {
+ id: NamespaceId::new(namespace_id),
+ tables,
+ max_columns_per_table,
+ max_tables,
+ retention_period_ns,
+ partition_template: Default::default(),
+ }
+ }
+ }
+
+ fn name_for_schema(schema: &NamespaceSchema) -> NamespaceName<'static> {
+ NamespaceName::try_from(format!("ns-{}", schema.id)).unwrap()
+ }
+
+ proptest! {
+ /// Assert that two distinct namespace cache instances return identical
+ /// content hashes after applying a given set of cache updates.
+ #[test]
+ fn prop_content_hash_diverge_converge(
+ // A variable number of cache entry updates for 2 namespace IDs
+ updates in prop::collection::vec(arbitrary_namespace_schema(
+ prop_oneof![Just(1), Just(2)]), // IDs assigned
+ 0..10 // Number of updates
+ ),
+ // An arbitrary namespace with an ID that lies outside of `updates`.
+ last_update in arbitrary_namespace_schema(42_i64..100),
+ ) {
+ tokio::runtime::Runtime::new().unwrap().block_on(async move {
+ let cache_a = Arc::new(MemoryNamespaceCache::default());
+ let cache_b = Arc::new(MemoryNamespaceCache::default());
+
+ let (actor_a, handle_a) = AntiEntropyActor::new(Arc::clone(&cache_a));
+ let (actor_b, handle_b) = AntiEntropyActor::new(Arc::clone(&cache_b));
+
+ // Start the MST actors
+ tokio::spawn(actor_a.run());
+ tokio::spawn(actor_b.run());
+
+ let ns_a = MerkleTree::new(cache_a, handle_a.clone());
+ let ns_b = MerkleTree::new(cache_b, handle_b.clone());
+
+ // Invariant: two empty namespace caches have the same content hash.
+ assert_eq!(handle_a.content_hash().await, handle_b.content_hash().await);
+
+ for update in updates {
+ // Generate a unique, deterministic name for this namespace.
+ let name = name_for_schema(&update);
+
+ // Apply the update (which may be a no-op) to both.
+ ns_a.put_schema(name.clone(), update.clone());
+ ns_b.put_schema(name, update);
+
+ // Invariant: after applying the same update, the content hashes
+ // MUST match (even if this update was a no-op / not an update)
+ assert_eq!(handle_a.content_hash().await, handle_b.content_hash().await);
+ }
+
+ // At this point all updates have been applied to both caches.
+ //
+ // Add a new cache entry that doesn't yet exist, and assert this
+ // causes the caches to diverge, and then once again reconverge.
+ let name = name_for_schema(&last_update);
+ ns_a.put_schema(name.clone(), last_update.clone());
+
+ // Invariant: last_update definitely added new cache content,
+ // therefore the cache content hashes MUST diverge.
+ assert_ne!(handle_a.content_hash().await, handle_b.content_hash().await);
+
+ // Invariant: applying the update to the other cache converges their
+ // content hashes.
+ ns_b.put_schema(name, last_update);
+ assert_eq!(handle_a.content_hash().await, handle_b.content_hash().await);
+ });
+ }
+ }
+}
|
933ab1f8c792bf7ccfc7fdcef17699147ec47671
|
Dom Dwyer
|
2022-12-15 15:14:23
|
optimal persist parallelism
|
This commit changes the behaviour of the persist system to enable
optimal parallelism of persist operations, and improve the accuracy of
the outstanding job bound / back-pressure.
Previously all persist operations for a given partition were
consistently hashed to a single worker task. This serialised persistence
per partition, ensuring all updates to the partition sort key were
serialised. However, this also unnecessarily serialises persist
operations that do not need to update the sort key, reducing the
potential throughput of the system; in the worst case of a single
partition receiving all the writes, only one worker would be persisting,
and the other N-1 workers would be idle.
After this change, the sort key is inspected when enqueuing the persist
operation and if it can be determined that no sort key update is
necessary (the typical case), then the persist task is placed into a
global work queue from which all workers consume. This allows for
maximal parallelisation of these jobs, and the removes the per-worker
head-of-line blocking.
In the case that the sort key does need updating, these jobs continue to
be consistently hashed to a single worker, ensuring serialised sort key
updates only where necessary.
To support these changes, the back-pressure system has been changed to
account for all outstanding persist jobs in the system, regardless of
type or assigned worker - a logical, bounded queue is composed together
of a semaphore limiting the number of persist tasks overall, and a
series of physical, unbounded queues - one to each worker & the global
queue. The overall system remains bounded by the
INFLUXDB_IOX_PERSIST_QUEUE_DEPTH value, and is now simpler to reason
about (it is independent of the number of workers, etc).
| null |
feat(ingester2): optimal persist parallelism
This commit changes the behaviour of the persist system to enable
optimal parallelism of persist operations, and improve the accuracy of
the outstanding job bound / back-pressure.
Previously all persist operations for a given partition were
consistently hashed to a single worker task. This serialised persistence
per partition, ensuring all updates to the partition sort key were
serialised. However, this also unnecessarily serialises persist
operations that do not need to update the sort key, reducing the
potential throughput of the system; in the worst case of a single
partition receiving all the writes, only one worker would be persisting,
and the other N-1 workers would be idle.
After this change, the sort key is inspected when enqueuing the persist
operation and if it can be determined that no sort key update is
necessary (the typical case), then the persist task is placed into a
global work queue from which all workers consume. This allows for
maximal parallelisation of these jobs, and the removes the per-worker
head-of-line blocking.
In the case that the sort key does need updating, these jobs continue to
be consistently hashed to a single worker, ensuring serialised sort key
updates only where necessary.
To support these changes, the back-pressure system has been changed to
account for all outstanding persist jobs in the system, regardless of
type or assigned worker - a logical, bounded queue is composed together
of a semaphore limiting the number of persist tasks overall, and a
series of physical, unbounded queues - one to each worker & the global
queue. The overall system remains bounded by the
INFLUXDB_IOX_PERSIST_QUEUE_DEPTH value, and is now simpler to reason
about (it is independent of the number of workers, etc).
|
diff --git a/Cargo.lock b/Cargo.lock
index b168c01a1c..10194cad3d 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -346,6 +346,17 @@ version = "1.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9b34d609dfbaf33d6889b2b7106d3ca345eacad44200913df5ba02bfd31d2ba9"
+[[package]]
+name = "async-channel"
+version = "1.8.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "cf46fee83e5ccffc220104713af3292ff9bc7c64c7de289f66dae8e38d826833"
+dependencies = [
+ "concurrent-queue",
+ "event-listener",
+ "futures-core",
+]
+
[[package]]
name = "async-compression"
version = "0.3.15"
@@ -923,6 +934,15 @@ dependencies = [
"workspace-hack",
]
+[[package]]
+name = "concurrent-queue"
+version = "2.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "bd7bef69dc86e3c610e4e7aed41035e2a7ed12e72dd7530f61327a6579a4390b"
+dependencies = [
+ "crossbeam-utils",
+]
+
[[package]]
name = "console"
version = "0.15.2"
@@ -2522,6 +2542,7 @@ dependencies = [
"arrow-flight",
"arrow_util",
"assert_matches",
+ "async-channel",
"async-trait",
"backoff",
"bytes",
diff --git a/clap_blocks/src/ingester2.rs b/clap_blocks/src/ingester2.rs
index f8341659de..7b511e3509 100644
--- a/clap_blocks/src/ingester2.rs
+++ b/clap_blocks/src/ingester2.rs
@@ -39,16 +39,15 @@ pub struct Ingester2Config {
)]
pub persist_max_parallelism: usize,
- /// The maximum number of persist tasks that can be queued for each worker.
+ /// The maximum number of persist tasks that can be queued at any one time.
///
- /// Note that each partition is consistently hashed to the same worker -
- /// this can cause uneven distribution of persist tasks across workers in
- /// workloads with skewed / hot partitions.
+ /// Once this limit is reached, ingest is blocked until the persist backlog
+ /// is reduced.
#[clap(
- long = "persist-worker-queue-depth",
- env = "INFLUXDB_IOX_PERSIST_WORKER_QUEUE_DEPTH",
+ long = "persist-queue-depth",
+ env = "INFLUXDB_IOX_PERSIST_QUEUE_DEPTH",
default_value = "250",
action
)]
- pub persist_worker_queue_depth: usize,
+ pub persist_queue_depth: usize,
}
diff --git a/ingester2/Cargo.toml b/ingester2/Cargo.toml
index 3d30334624..8cec746db3 100644
--- a/ingester2/Cargo.toml
+++ b/ingester2/Cargo.toml
@@ -9,6 +9,7 @@ license.workspace = true
arrow = { workspace = true, features = ["prettyprint"] }
arrow-flight = { workspace = true }
arrow_util = { version = "0.1.0", path = "../arrow_util" }
+async-channel = "1.8.0"
async-trait = "0.1.58"
backoff = { version = "0.1.0", path = "../backoff" }
bytes = "1.3.0"
diff --git a/ingester2/src/init.rs b/ingester2/src/init.rs
index 9bb01621af..be032b2ae9 100644
--- a/ingester2/src/init.rs
+++ b/ingester2/src/init.rs
@@ -154,7 +154,7 @@ pub async fn new(
wal_rotation_period: Duration,
persist_executor: Arc<Executor>,
persist_workers: usize,
- persist_worker_queue_depth: usize,
+ persist_queue_depth: usize,
object_store: ParquetStorage,
) -> Result<IngesterGuard<impl IngesterRpcInterface>, InitError> {
// Create the transition shard.
@@ -243,7 +243,7 @@ pub async fn new(
// Parquet files, and upload them to object storage.
let (persist_handle, persist_state) = PersistHandle::new(
persist_workers,
- persist_worker_queue_depth,
+ persist_queue_depth,
persist_executor,
object_store,
Arc::clone(&catalog),
diff --git a/ingester2/src/persist/backpressure.rs b/ingester2/src/persist/backpressure.rs
index a996d9e905..1a8f0084aa 100644
--- a/ingester2/src/persist/backpressure.rs
+++ b/ingester2/src/persist/backpressure.rs
@@ -10,7 +10,7 @@ use crossbeam_utils::CachePadded;
use observability_deps::tracing::*;
use parking_lot::Mutex;
use tokio::{
- sync::mpsc,
+ sync::Semaphore,
task::JoinHandle,
time::{Interval, MissedTickBehavior},
};
@@ -41,8 +41,8 @@ pub(crate) enum CurrentState {
/// * There are no outstanding enqueue operations (no thread is blocked adding
/// an item to any work queue).
///
-/// * All queues have at least half of their capacity free (being at most,
-/// half full).
+/// * The number of outstanding persist jobs is less than 50% of
+/// `persist_queue_depth`
///
/// These conditions are evaluated periodically, at the interval specified in
/// [`EVALUATE_SATURATION_INTERVAL`].
@@ -58,9 +58,9 @@ pub(crate) struct PersistState {
/// reads.
state: CachePadded<AtomicUsize>,
- /// Tracks the number of async tasks waiting within
- /// [`PersistHandle::queue_persist()`], asynchronously blocking to enqueue a
- /// persist job.
+ /// Tracks the number of [`WaitGuard`] instances, which in turn tracks the
+ /// number of async tasks waiting within [`PersistHandle::queue_persist()`]
+ /// to obtain a semaphore permit and enqueue a persist job.
///
/// This is modified using [`Ordering::SeqCst`] as performance is not a
/// priority for code paths that modify it.
@@ -69,24 +69,40 @@ pub(crate) struct PersistState {
/// super::handle::PersistHandle::queue_persist()
waiting_to_enqueue: Arc<AtomicUsize>,
+ /// The persist task semaphore with a maximum of `persist_queue_depth`
+ /// permits allocatable.
+ sem: Arc<Semaphore>,
+ persist_queue_depth: usize,
+
/// The handle to the current saturation evaluation/recovery task, if any.
recovery_handle: Mutex<Option<JoinHandle<()>>>,
}
-/// Initialise a [`PersistState`] with [`CurrentState::Ok`].
-impl Default for PersistState {
- fn default() -> Self {
+impl PersistState {
+ /// Initialise a [`PersistState`] with [`CurrentState::Ok`], with a total
+ /// number of tasks bounded to `persist_queue_depth` and permits issued from
+ /// `sem`.
+ pub(crate) fn new(persist_queue_depth: usize, sem: Arc<Semaphore>) -> Self {
+ // The persist_queue_depth should be the maximum number of permits
+ // available in the semaphore.
+ assert!(persist_queue_depth >= sem.available_permits());
+ // This makes no sense and later we divide by this value.
+ assert!(
+ persist_queue_depth > 0,
+ "persist queue depth must be non-zero"
+ );
+
let s = Self {
state: Default::default(),
waiting_to_enqueue: Arc::new(AtomicUsize::new(0)),
recovery_handle: Default::default(),
+ persist_queue_depth,
+ sem,
};
s.set(CurrentState::Ok);
s
}
-}
-impl PersistState {
/// Set the reported state of the [`PersistState`].
fn set(&self, s: CurrentState) -> bool {
// Set the new state, retaining the most recent state.
@@ -134,27 +150,25 @@ impl PersistState {
}
/// Mark the persist system as saturated, returning a [`WaitGuard`] that
- /// MUST be held during any subsequent async-blocking enqueue request
- /// ([`mpsc::Sender::send()`] and the like).
+ /// MUST be held during any subsequent async-blocking to acquire a permit
+ /// from the persist semaphore.
///
- /// Holding the guard over the `send()` await allows the saturation
+ /// Holding the guard over the `acquire()` await allows the saturation
/// evaluation to track the number of threads with an ongoing enqueue wait.
- pub(super) fn set_saturated<T>(s: Arc<Self>, persist_queues: Vec<mpsc::Sender<T>>) -> WaitGuard
- where
- T: Send + 'static,
- {
- // Increment the number of tasks waiting to push into a queue.
+ pub(super) fn set_saturated(s: Arc<Self>) -> WaitGuard {
+ // Increment the number of tasks waiting to obtain a permit and push
+ // into any queue.
//
// INVARIANT: this increment MUST happen-before returning the guard, and
- // waiting on the queue send(), and before starting the saturation
- // monitor task so that it observes this waiter.
+ // waiting on the semaphore acquire(), and before starting the
+ // saturation monitor task so that it observes this waiter.
let _ = s.waiting_to_enqueue.fetch_add(1, Ordering::SeqCst);
// Attempt to set the system to "saturated".
let first = s.set(CurrentState::Saturated);
if first {
// This is the first thread to mark the system as saturated.
- warn!("persist queues saturated, blocking ingest");
+ warn!("persist queue saturated, blocking ingest");
// Always check the state of the system EVALUATE_SATURATION_INTERVAL
// duration of time after the last completed evaluation - do not
@@ -163,12 +177,13 @@ impl PersistState {
let mut interval = tokio::time::interval(EVALUATE_SATURATION_INTERVAL);
interval.set_missed_tick_behavior(MissedTickBehavior::Delay);
- // Spawn a task that marks the system as not saturated after the queues
- // have processed some of the backlog.
+ // Spawn a task that marks the system as not saturated after the
+ // workers have processed some of the backlog.
let h = tokio::spawn(saturation_monitor_task(
interval,
Arc::clone(&s),
- persist_queues,
+ s.persist_queue_depth,
+ Arc::clone(&s.sem),
));
// Retain the task handle to avoid leaking it if dropped.
*s.recovery_handle.lock() = Some(h);
@@ -193,10 +208,10 @@ impl Drop for PersistState {
}
}
-/// A guard that decrements the number of writers waiting to enqueue an item
-/// into the persistence queue when dropped.
+/// A guard that decrements the number of writers waiting to obtain a permit
+/// from the persistence semaphore.
///
-/// This MUST be held whilst calling [`mpsc::Sender::send()`].
+/// This MUST be held whilst calling [`Semaphore::acquire()`].
#[must_use = "must hold wait guard while waiting for enqueue"]
pub(super) struct WaitGuard(Arc<AtomicUsize>);
@@ -206,19 +221,18 @@ impl Drop for WaitGuard {
}
}
-/// A task that monitors the `waiters` and `queues` to determine when the
-/// persist system is no longer saturated.
+/// A task that monitors the `waiters` and `sem` to determine when the persist
+/// system is no longer saturated.
///
/// Once the system is no longer saturated (as determined according to the
/// documentation for [`PersistState`]), the [`PersistState`] is set to
/// [`CurrentState::Ok`].
-async fn saturation_monitor_task<T>(
+async fn saturation_monitor_task(
mut interval: Interval,
state: Arc<PersistState>,
- queues: Vec<mpsc::Sender<T>>,
-) where
- T: Send,
-{
+ persist_queue_depth: usize,
+ sem: Arc<Semaphore>,
+) {
loop {
// Wait before evaluating the state of the system.
interval.tick().await;
@@ -226,40 +240,44 @@ async fn saturation_monitor_task<T>(
// INVARIANT: this task only ever runs when the system is saturated.
assert!(state.is_saturated());
- // First check if any tasks are waiting to enqueue an item (an
- // indication that one or more queues is full).
+ // First check if any tasks are waiting to obtain a permit and enqueue
+ // an item (an indication that one or more queues is full).
let n_waiting = state.waiting_to_enqueue.load(Ordering::SeqCst);
if n_waiting > 0 {
- debug!(
+ warn!(
n_waiting,
"waiting for outstanding persist jobs to be enqueued"
);
continue;
}
- // No async task WAS currently waiting to enqueue a persist job when
- // checking above, but one may want to immediately enqueue one now (or
- // later).
+ // No async task WAS currently waiting for a permit to enqueue a persist
+ // job when checking above, but one may want to immediately await one
+ // now (or later).
//
// In order to minimise health flip-flopping, only mark the persist
- // system as healthy once there is some capacity in the queues to accept
- // new persist jobs. This avoids a queue having 1 slot free, only to be
- // immediately filled and the system pause again.
+ // system as healthy once there is some capacity in the semaphore to
+ // accept new persist jobs. This avoids the semaphore having 1 permit
+ // free, only to be immediately acquired and the system pause again.
//
- // This check below ensures that all queues are at least half empty
+ // This check below ensures that the semaphore is at least half capacity
// before marking the system as recovered.
- let n_queues = queues
- .iter()
- .filter(|q| !has_sufficient_capacity(q.capacity(), q.max_capacity()))
- .count();
- if n_queues != 0 {
- debug!(n_queues, "waiting for queues to drain");
+ let available = sem.available_permits();
+ let outstanding = persist_queue_depth.checked_sub(available).unwrap();
+ if !has_sufficient_capacity(available, persist_queue_depth) {
+ warn!(
+ available,
+ outstanding, "waiting for outstanding persist jobs to reduce"
+ );
continue;
}
// There are no outstanding enqueue waiters, and all queues are at half
// capacity or better.
- info!("persist queue saturation reduced, resuming ingest");
+ info!(
+ available,
+ outstanding, "persist queue saturation reduced, resuming ingest"
+ );
// INVARIANT: there is only ever one task that monitors the queue state
// and transitions the persist state to OK, therefore this task is
@@ -279,12 +297,6 @@ fn has_sufficient_capacity(capacity: usize, max_capacity: usize) -> bool {
assert!(capacity <= max_capacity);
let want_at_least = (max_capacity + 1) / 2;
- trace!(
- available = capacity,
- max = max_capacity,
- want_at_least,
- "evaluating queue backlog"
- );
capacity >= want_at_least
}
@@ -297,6 +309,7 @@ mod tests {
use super::*;
+ const QUEUE_DEPTH: usize = 42;
const POLL_INTERVAL: Duration = Duration::from_millis(5);
#[test]
@@ -323,7 +336,8 @@ mod tests {
/// first thread that changes the state observes the "first=true" response.
#[test]
fn test_state_transitions() {
- let s = PersistState::default();
+ let sem = Arc::new(Semaphore::new(QUEUE_DEPTH));
+ let s = PersistState::new(QUEUE_DEPTH, sem);
assert_eq!(s.get(), CurrentState::Ok);
assert!(!s.is_saturated());
@@ -355,14 +369,15 @@ mod tests {
/// waiters (as tracked by the [`WaitGuard`]).
#[tokio::test]
async fn test_saturation_recovery_enqueue_waiters() {
- let s = Arc::new(PersistState::default());
+ let sem = Arc::new(Semaphore::new(QUEUE_DEPTH));
+ let s = Arc::new(PersistState::new(QUEUE_DEPTH, Arc::clone(&sem)));
// Use no queues to ensure only the waiters are blocking recovery.
assert!(!s.is_saturated());
- let w1 = PersistState::set_saturated::<()>(Arc::clone(&s), vec![]);
- let w2 = PersistState::set_saturated::<()>(Arc::clone(&s), vec![]);
+ let w1 = PersistState::set_saturated(Arc::clone(&s));
+ let w2 = PersistState::set_saturated(Arc::clone(&s));
assert!(s.is_saturated());
@@ -371,10 +386,11 @@ mod tests {
s.recovery_handle.lock().take().unwrap().abort();
// Spawn a replacement that ticks way more often to speed up the test.
- let h = tokio::spawn(saturation_monitor_task::<()>(
+ let h = tokio::spawn(saturation_monitor_task(
tokio::time::interval(POLL_INTERVAL),
Arc::clone(&s),
- vec![],
+ QUEUE_DEPTH,
+ sem,
));
// Drop a waiter and ensure the system is still saturated.
@@ -433,22 +449,17 @@ mod tests {
/// marking the system as healthy.
#[tokio::test]
async fn test_saturation_recovery_queue_capacity() {
- let s = Arc::new(PersistState::default());
-
- async fn fill(q: &mpsc::Sender<()>, times: usize) {
- for _ in 0..times {
- q.send(()).await.unwrap();
- }
- }
+ let sem = Arc::new(Semaphore::new(QUEUE_DEPTH));
+ let s = Arc::new(PersistState::new(QUEUE_DEPTH, Arc::clone(&sem)));
// Use no waiters to ensure only the queue slots are blocking recovery.
- let (tx1, mut rx1) = mpsc::channel(5);
- let (tx2, mut rx2) = mpsc::channel(5);
+ // Take half the permits. Holding this number of permits should allow
+ // the state to transition to healthy.
+ let _half_the_permits = sem.acquire_many(QUEUE_DEPTH as u32 / 2).await.unwrap();
- // Place some items in the queues
- fill(&tx1, 3).await; // Over the threshold of 5/2 = 2.5, rounded down to 2.
- fill(&tx2, 3).await; // Over the threshold of 5/2 = 2.5, rounded down to 2.
+ // Obtain a permit, pushing it over the "healthy" limit.
+ let permit = sem.acquire().await.unwrap();
assert!(!s.is_saturated());
assert!(s.set(CurrentState::Saturated));
@@ -457,10 +468,11 @@ mod tests {
// Spawn the recovery task directly, not via set_saturated() for
// simplicity - the test above asserts the task is started by a call to
// set_saturated().
- let h = tokio::spawn(saturation_monitor_task::<()>(
+ let h = tokio::spawn(saturation_monitor_task(
tokio::time::interval(POLL_INTERVAL),
Arc::clone(&s),
- vec![tx1, tx2],
+ QUEUE_DEPTH,
+ Arc::clone(&sem),
));
// Wait a little and ensure the state hasn't changed.
@@ -470,18 +482,8 @@ mod tests {
tokio::time::sleep(POLL_INTERVAL * 4).await;
assert!(s.is_saturated());
- // Drain one of the queues to below the saturation point.
- rx1.recv().await.expect("no recovery task running");
-
- // Wait a little and ensure the state still hasn't changed.
- //
- // While this could also be a false negative, if this assert fires there
- // is a legitimate problem.
- tokio::time::sleep(POLL_INTERVAL * 4).await;
- assert!(s.is_saturated());
-
- // Drain the remaining queue below the threshold for recovery.
- rx2.recv().await.expect("no recovery task running");
+ // Drop the permit so that the outstanding permits drops below the threshold for recovery.
+ drop(permit);
// Wait up to 5 seconds to observe the system recovery.
async {
diff --git a/ingester2/src/persist/context.rs b/ingester2/src/persist/context.rs
index 15c41af88a..8dab09823f 100644
--- a/ingester2/src/persist/context.rs
+++ b/ingester2/src/persist/context.rs
@@ -11,7 +11,10 @@ use observability_deps::tracing::*;
use parking_lot::Mutex;
use parquet_file::metadata::IoxMetadata;
use schema::sort::SortKey;
-use tokio::{sync::oneshot, time::Instant};
+use tokio::{
+ sync::{oneshot, OwnedSemaphorePermit},
+ time::Instant,
+};
use uuid::Uuid;
use crate::{
@@ -36,6 +39,7 @@ pub(super) struct PersistRequest {
partition: Arc<Mutex<PartitionData>>,
data: PersistingData,
enqueued_at: Instant,
+ permit: OwnedSemaphorePermit,
}
impl PersistRequest {
@@ -44,6 +48,7 @@ impl PersistRequest {
pub(super) fn new(
partition: Arc<Mutex<PartitionData>>,
data: PersistingData,
+ permit: OwnedSemaphorePermit,
enqueued_at: Instant,
) -> (Self, oneshot::Receiver<()>) {
let (tx, rx) = oneshot::channel();
@@ -53,6 +58,7 @@ impl PersistRequest {
partition,
data,
enqueued_at,
+ permit,
},
rx,
)
@@ -104,6 +110,13 @@ pub(super) struct Context {
/// the queue).
enqueued_at: Instant,
dequeued_at: Instant,
+
+ /// The persistence permit for this work.
+ ///
+ /// This permit MUST be retained for the entire duration of the persistence
+ /// work, and MUST be released at the end of the persistence AFTER any
+ /// references to the persisted data are released.
+ permit: OwnedSemaphorePermit,
}
impl Context {
@@ -122,6 +135,7 @@ impl Context {
partition,
data,
enqueued_at,
+ permit,
} = req;
let p = Arc::clone(&partition);
@@ -147,6 +161,7 @@ impl Context {
complete,
enqueued_at,
dequeued_at: Instant::now(),
+ permit,
}
};
@@ -399,6 +414,11 @@ impl Context {
"persisted partition"
);
+ // Explicitly drop the permit before notifying the caller, so that if
+ // there's no headroom in the queue, the caller that is woken by the
+ // notification is able to push into the queue immediately.
+ drop(self.permit);
+
// Notify the observer of this persistence task, if any.
let _ = self.complete.send(());
}
diff --git a/ingester2/src/persist/handle.rs b/ingester2/src/persist/handle.rs
index e3e6abcbf6..b78e1b8bd7 100644
--- a/ingester2/src/persist/handle.rs
+++ b/ingester2/src/persist/handle.rs
@@ -1,20 +1,19 @@
use std::sync::Arc;
+use async_channel::RecvError;
use iox_catalog::interface::Catalog;
-use iox_query::exec::Executor;
+use iox_query::{exec::Executor, QueryChunkMeta};
use observability_deps::tracing::*;
use parking_lot::Mutex;
use parquet_file::storage::ParquetStorage;
+use schema::sort::adjust_sort_key_columns;
use sharder::JumpHash;
use tokio::{
- sync::{
- mpsc::{self, error::TrySendError},
- oneshot,
- },
+ sync::{mpsc, oneshot, Semaphore, TryAcquireError},
time::Instant,
};
-use crate::buffer_tree::partition::{persisting::PersistingData, PartitionData};
+use crate::buffer_tree::partition::{persisting::PersistingData, PartitionData, SortKeyState};
use super::{
backpressure::PersistState,
@@ -33,15 +32,15 @@ use super::{
/// The [`PersistHandle`] can be cheaply cloned and passed over thread/task
/// boundaries to enqueue persist tasks in parallel.
///
-/// Dropping all [`PersistHandle`] instances immediately stops all workers.
+/// Dropping all [`PersistHandle`] instances immediately stops all persist
+/// workers and drops all outstanding persist tasks.
///
/// # Topology
///
-/// The [`PersistHandle`] uses an internal work group to parallelise persistence
-/// operations up to `n_workers` number of parallel tasks.
-///
-/// Submitting a persistence request selects a worker for the persist job and
-/// places the job into a bounded queue (of up to `worker_queue_depth` items).
+/// The persist system is exposes a logical queue of persist tasks with up to
+/// `persist_queue_depth` slots. Each persist task is executed on a worker in
+/// the worker pool in order to parallelise persistence operations up to
+/// `n_workers` number of parallel tasks.
///
/// ```text
/// βββββββββββββββ
@@ -50,12 +49,11 @@ use super::{
/// ββ¬ββββββββββββββ
/// βββββββ¬ββββββββ
/// β
+/// persist_queue_depth
+/// β
/// β
/// ββββββββββββββββββΌβββββββββββββββββ
/// β β β
-/// β β β
-/// worker_queue_depth worker_queue_depth worker_queue_depth
-/// β β β
/// βΌ βΌ βΌ
/// ββββββββββββββ ββββββββββββββ ββββββββββββββ
/// β Worker 1 β β Worker 2 β β Worker N β
@@ -68,19 +66,18 @@ use super::{
/// β β β β β β β
/// ```
///
+/// Internally, several queues are used, and reordering of persist tasks is
+/// allowed for efficiency / performance reasons, allowing maximal
+/// parallelisation of work given the constraints of an individual persist job.
+///
/// Compaction is performed in the provided [`Executor`] re-org thread-pool and
/// is shared across all workers.
///
-/// At any one time, the number of outstanding persist tasks is bounded by:
-///
-/// ```text
-///
-/// workers * worker_queue_depth
-///
-/// ```
-///
-/// At any one time, there may be at most `worker_queue_depth` number of
-/// outstanding persist jobs for a single partition or worker.
+/// While the logical queue bounds the number of outstanding persist tasks to at
+/// most `persist_queue_depth`, once this limit is reached threads attempting to
+/// push into the queue block for a free slot to become available in the queue -
+/// this can increase the number of outstanding jobs in the system beyond
+/// `persist_queue_depth` - see "Overload & Back-pressure" below.
///
/// # Parallelism & Partition Serialisation
///
@@ -95,17 +92,17 @@ use super::{
///
/// # Overload & Back-pressure
///
-/// The persist queue is bounded, but the caller must prevent new persist jobs
-/// from being generated and blocked whilst waiting to add the persist job to
-/// the bounded queue, otherwise the system is effectively unbounded. If an
-/// unbounded number of threads block on [`PersistHandle::queue_persist()`]
-/// waiting to successfully enqueue the job, then there is no bound on
-/// outstanding persist jobs at all.
+/// The logical persist queue is bounded, but the caller must prevent new
+/// persist jobs from being generated and blocked whilst waiting to add the
+/// persist job to the bounded queue, otherwise the system is effectively
+/// unbounded. If an unbounded number of threads block on
+/// [`PersistHandle::queue_persist()`] waiting to successfully enqueue the job,
+/// then there is no bound on outstanding persist jobs at all.
///
-/// To prevent this, the persistence system exposes an indicator of saturation
-/// (readable via the [`PersistState`]) that the caller MUST use to prevent the
-/// generation of new persist tasks (for example, by blocking any further
-/// ingest) on a best-effort basis.
+/// To enforce the bound, the persistence system exposes an indicator of
+/// saturation (readable via the [`PersistState`]) that the caller MUST use to
+/// prevent the generation of new persist tasks on a best-effort basis (for
+/// example; by blocking any further ingest).
///
/// When the persist queue is saturated, the [`PersistState::is_saturated()`]
/// returns true. Once the backlog of persist jobs is reduced, the
@@ -119,16 +116,38 @@ pub(crate) struct PersistHandle {
/// THe state/dependencies shared across all worker tasks.
inner: Arc<Inner>,
- /// A consistent hash implementation used to consistently map buffers from
- /// one partition to the same worker queue.
- ///
- /// This ensures persistence is serialised per-partition, but in parallel
- /// across partitions (up to the number of worker tasks).
- persist_queues: Arc<JumpHash<mpsc::Sender<PersistRequest>>>,
-
/// Task handles for the worker tasks, aborted on drop of all
/// [`PersistHandle`] instances.
- tasks: Arc<Vec<AbortOnDrop<()>>>,
+ worker_tasks: Arc<Vec<AbortOnDrop<()>>>,
+
+ /// While the persistence system exposes the concept of a "persistence
+ /// queue" externally, it is actually a set of per-worker queues, and the
+ /// global task queue.
+ ///
+ /// Each individual queue is unbounded, with the logical "persistence queue"
+ /// bound by a semaphore to limit the number of global outstanding persist
+ /// operations.
+
+ /// The persist task semaphore that bounds the number of outstanding persist
+ /// operations across all queues.
+ ///
+ /// Before enqueuing an item into any of the (unbounded) worker queues, or
+ /// (unbounded) global queue, the caller MUST obtain a semaphore permit.
+ sem: Arc<Semaphore>,
+
+ /// A global queue of persist tasks that may be executed on any worker in
+ /// parallel with any other persist task.
+ ///
+ /// For correctness, only persist operations that do not cause a sort key
+ /// update should be enqueued in the global work queue.
+ global_queue: async_channel::Sender<PersistRequest>,
+
+ /// A consistent hash implementation used to consistently map persist tasks
+ /// from one partition to the same worker queue.
+ ///
+ /// These queues are used for persist tasks that modify the partition's sort
+ /// key, ensuring sort key updates are serialised per-partition.
+ worker_queues: Arc<JumpHash<mpsc::UnboundedSender<PersistRequest>>>,
/// Records the saturation state of the persist system.
persist_state: Arc<PersistState>,
@@ -138,21 +157,19 @@ impl PersistHandle {
/// Initialise a new persist actor & obtain the first handle.
pub(crate) fn new(
n_workers: usize,
- worker_queue_depth: usize,
+ persist_queue_depth: usize,
exec: Arc<Executor>,
store: ParquetStorage,
catalog: Arc<dyn Catalog>,
) -> (Self, Arc<PersistState>) {
assert_ne!(n_workers, 0, "must run at least 1 persist worker");
- assert_ne!(worker_queue_depth, 0, "worker queue depth must be non-zero");
+ assert_ne!(
+ persist_queue_depth, 0,
+ "persist queue depth must be non-zero"
+ );
// Log the important configuration parameters of the persist subsystem.
- info!(
- n_workers,
- worker_queue_depth,
- max_queued_tasks = (n_workers * worker_queue_depth),
- "initialised persist task"
- );
+ info!(n_workers, persist_queue_depth, "initialised persist task");
let inner = Arc::new(Inner {
exec,
@@ -160,24 +177,43 @@ impl PersistHandle {
catalog,
});
+ // Initialise the global queue.
+ //
+ // Persist tasks that do not require a sort key update are enqueued into
+ // this queue, from which all workers consume.
+ let (global_tx, global_rx) = async_channel::unbounded();
+
let (tx_handles, tasks): (Vec<_>, Vec<_>) = (0..n_workers)
.map(|_| {
let inner = Arc::clone(&inner);
- let (tx, rx) = mpsc::channel(worker_queue_depth);
- (tx, AbortOnDrop(tokio::spawn(run_task(inner, rx))))
+
+ // Initialise the worker queue that is not shared across workers
+ // allowing the persist code to address a single worker.
+ let (tx, rx) = mpsc::unbounded_channel();
+ (
+ tx,
+ AbortOnDrop(tokio::spawn(run_task(inner, global_rx.clone(), rx))),
+ )
})
.unzip();
assert!(!tasks.is_empty());
- // Initialise the saturation state as "not saturated".
- let persist_state = Default::default();
+ // Initialise the semaphore that bounds the total number of persist jobs
+ // in the system.
+ let sem = Arc::new(Semaphore::new(persist_queue_depth));
+
+ // Initialise the saturation state as "not saturated" and provide it
+ // with the task semaphore and total permit count.
+ let persist_state = Arc::new(PersistState::new(persist_queue_depth, Arc::clone(&sem)));
(
Self {
inner,
- persist_queues: Arc::new(JumpHash::new(tx_handles)),
- tasks: Arc::new(tasks),
+ sem,
+ global_queue: global_tx,
+ worker_queues: Arc::new(JumpHash::new(tx_handles)),
+ worker_tasks: Arc::new(tasks),
persist_state: Arc::clone(&persist_state),
},
persist_state,
@@ -187,15 +223,19 @@ impl PersistHandle {
/// Place `data` from `partition` into the persistence queue.
///
/// This call (asynchronously) waits for space to become available in the
- /// assigned worker queue.
+ /// persistence queue.
///
/// Once persistence is complete, the partition will be locked and the sort
- /// key will be updated, and [`PartitionData::mark_persisted()`] is called
- /// with `data`.
+ /// key will be updated (if necessary), and
+ /// [`PartitionData::mark_persisted()`] is called with `data` to mark the
+ /// task as complete.
///
/// Once all persistence related tasks for `data` are complete, the returned
/// channel publishes a notification.
///
+ /// Persist tasks may be re-ordered w.r.t their submission order for
+ /// performance reasons.
+ ///
/// # Panics
///
/// Panics if the assigned persist worker task has stopped.
@@ -209,49 +249,117 @@ impl PersistHandle {
partition: Arc<Mutex<PartitionData>>,
data: PersistingData,
) -> oneshot::Receiver<()> {
- debug!(
- partition_id = data.partition_id().get(),
- "enqueuing persistence task"
- );
+ let partition_id = data.partition_id().get();
+ debug!(partition_id, "enqueuing persistence task");
- // Build the persist task request.
let enqueued_at = Instant::now();
- let (r, notify) = PersistRequest::new(partition, data, enqueued_at);
-
- // Select a worker to dispatch this request to.
- let queue = self.persist_queues.hash(r.partition_id());
-
- // Try and enqueue the persist task immediately.
- match queue.try_send(r) {
- Ok(()) => {} // Success!
- Err(TrySendError::Closed(_)) => panic!("persist worker has stopped"),
- Err(TrySendError::Full(r)) => {
- // The worker's queue is full. Mark the persist system as being
- // saturated, requiring some time to clear outstanding persist
- // operations.
+
+ // Try and acquire the persist task permit immediately.
+ let permit = match Arc::clone(&self.sem).try_acquire_owned() {
+ Ok(p) => p, // Success!
+ Err(TryAcquireError::Closed) => panic!("persist work semaphore is closed"),
+ Err(TryAcquireError::NoPermits) => {
+ // The persist system is saturated. Mark the persist system as
+ // being saturated to observers.
//
- // The returned guard MUST be held during the send() await
- // below.
- let _guard = PersistState::set_saturated(
- Arc::clone(&self.persist_state),
- self.persist_queues.shards().to_owned(),
- );
+ // The returned guard MUST be held during the acquire_owned()
+ // await below.
+ let _guard = PersistState::set_saturated(Arc::clone(&self.persist_state));
// TODO(test): the guard is held over the await point below
- // Park this task waiting to enqueue the persist whilst holding
+ // Park this task waiting to obtain the permit whilst holding
// the guard above.
//
- // If this send() is aborted, the guard is dropped and the
- // number of waiters is decremented. If the send() is
- // successful, the guard is dropped immediately when leaving
- // this scope.
- queue.send(r).await.expect("persist worker stopped");
+ // If this acquire_owned() is aborted, the guard is dropped and
+ // the number of waiters is decremented. If the acquire_owned()
+ // is successful, the guard is dropped immediately when leaving
+ // this scope, after the permit has been granted.
+
+ Arc::clone(&self.sem)
+ .acquire_owned()
+ .await
+ .expect("persist work semaphore is closed")
}
};
+ // If the persist job has a known sort key, and it can be determined
+ // that the persist job does not require updating that sort key, it can
+ // be enqueued in the global queue and executed on any worker.
+ //
+ // Conversely if the sort key is not yet known (unresolved deferred
+ // load) do not wait in this handle to resolve it, and instead
+ // pessimistically enqueue the task into a specific worker queue by
+ // consistently hashing the partition ID, serialising the execution of
+ // the persist job w.r.t other persist jobs for the same partition. It
+ // is still executed in parallel with other persist jobs for other
+ // partitions.
+ //
+ // Likewise if it can be determined that a sort key is necessary, it
+ // must be serialised via the same mechanism.
+ //
+ // Do NOT attempt to fetch the sort key in this handler, as doing so
+ // would cause a large number of requests against the catalog in a short
+ // period of time (bounded by the number of tasks being enqueued).
+ // Instead, the workers fetch the sort key, bounding the number of
+ // queries to at most `n_workers`.
+
+ let sort_key = match partition.lock().sort_key() {
+ SortKeyState::Deferred(v) => v.peek().flatten(),
+ SortKeyState::Provided(v) => v.as_ref().cloned(),
+ };
+
+ // Build the persist task request.
+ let schema = data.schema();
+ let (r, notify) = PersistRequest::new(Arc::clone(&partition), data, permit, enqueued_at);
+
+ match sort_key {
+ Some(v) => {
+ // A sort key is known for this partition. If it can be shown
+ // that this persist job does not require a sort key update, it
+ // can be parallelised with impunity.
+ let data_primary_key = schema.primary_key();
+ if let Some(new_sort_key) = adjust_sort_key_columns(&v, &data_primary_key).1 {
+ // This persist operation will require a sort key update.
+ trace!(
+ partition_id,
+ old_sort_key = %v,
+ %new_sort_key,
+ "persist job will require sort key update"
+ );
+ self.assign_worker(r);
+ } else {
+ // This persist operation will not require a sort key
+ // update.
+ debug!(partition_id, "enqueue persist job to global work queue");
+ self.global_queue.send(r).await.expect("no persist workers");
+ }
+ }
+ None => {
+ // If no sort key is known (either because it was unresolved, or
+ // not yet set), the task must be serialised w.r.t other persist
+ // jobs for the same partition.
+ trace!(partition_id, "persist job has no known sort key");
+ self.assign_worker(r);
+ }
+ }
+
notify
}
+
+ fn assign_worker(&self, r: PersistRequest) {
+ debug!(
+ partition_id = r.partition_id().get(),
+ "enqueue persist job to assigned worker"
+ );
+
+ // Consistently map partition tasks for this partition ID to the
+ // same worker.
+ self.worker_queues
+ .hash(r.partition_id())
+ .send(r)
+ .expect("persist worker stopped");
+ }
}
#[derive(Debug)]
@@ -270,8 +378,44 @@ pub(super) struct Inner {
pub(super) catalog: Arc<dyn Catalog>,
}
-async fn run_task(inner: Arc<Inner>, mut rx: mpsc::Receiver<PersistRequest>) {
- while let Some(req) = rx.recv().await {
+async fn run_task(
+ inner: Arc<Inner>,
+ global_queue: async_channel::Receiver<PersistRequest>,
+ mut rx: mpsc::UnboundedReceiver<PersistRequest>,
+) {
+ loop {
+ let req = tokio::select! {
+ // Bias the channel polling to prioritise work in the
+ // worker-specific queue.
+ //
+ // This causes the worker to do the work assigned to it specifically
+ // first, falling back to taking jobs from the global queue if it
+ // has no assigned work.
+ //
+ // This allows persist jobs to be reordered w.r.t the order in which
+ // they were enqueued with queue_persist().
+ biased;
+
+ v = rx.recv() => {
+ match v {
+ Some(v) => v,
+ None => {
+ // The worker channel is closed.
+ return
+ }
+ }
+ }
+ v = global_queue.recv() => {
+ match v {
+ Ok(v) => v,
+ Err(RecvError) => {
+ // The global channel is closed.
+ return
+ },
+ }
+ }
+ };
+
let ctx = Context::new(req, Arc::clone(&inner));
let compacted = ctx.compact().await;
@@ -280,3 +424,392 @@ async fn run_task(inner: Arc<Inner>, mut rx: mpsc::Receiver<PersistRequest>) {
.await;
}
}
+
+#[cfg(test)]
+mod tests {
+ use std::{sync::Arc, time::Duration};
+
+ use assert_matches::assert_matches;
+ use data_types::{NamespaceId, PartitionId, PartitionKey, TableId};
+ use dml::DmlOperation;
+ use iox_catalog::mem::MemCatalog;
+ use lazy_static::lazy_static;
+ use object_store::memory::InMemory;
+ use parquet_file::storage::StorageId;
+ use schema::sort::SortKey;
+ use test_helpers::timeout::FutureTimeout;
+ use tokio::sync::mpsc::error::TryRecvError;
+
+ use crate::{
+ buffer_tree::{
+ namespace::{name_resolver::mock::MockNamespaceNameProvider, NamespaceName},
+ partition::resolver::mock::MockPartitionProvider,
+ table::{name_resolver::mock::MockTableNameProvider, TableName},
+ BufferTree,
+ },
+ deferred_load::DeferredLoad,
+ dml_sink::DmlSink,
+ test_util::make_write_op,
+ };
+
+ use super::*;
+
+ const PARTITION_ID: PartitionId = PartitionId::new(42);
+ const NAMESPACE_ID: NamespaceId = NamespaceId::new(24);
+ const TABLE_ID: TableId = TableId::new(2442);
+ const TABLE_NAME: &str = "banana-report";
+ const NAMESPACE_NAME: &str = "platanos";
+
+ lazy_static! {
+ static ref EXEC: Arc<Executor> = Arc::new(Executor::new_testing());
+ static ref PARTITION_KEY: PartitionKey = PartitionKey::from("bananas");
+ static ref NAMESPACE_NAME_LOADER: Arc<DeferredLoad<NamespaceName>> =
+ Arc::new(DeferredLoad::new(Duration::from_secs(1), async {
+ NamespaceName::from(NAMESPACE_NAME)
+ }));
+ static ref TABLE_NAME_LOADER: Arc<DeferredLoad<TableName>> =
+ Arc::new(DeferredLoad::new(Duration::from_secs(1), async {
+ TableName::from(TABLE_NAME)
+ }));
+ }
+
+ /// Construct a partition with the above constants, with the given sort key,
+ /// and containing a single write.
+ async fn new_partition(
+ partition_id: PartitionId,
+ sort_key: SortKeyState,
+ ) -> Arc<Mutex<PartitionData>> {
+ let buffer_tree = BufferTree::new(
+ Arc::new(MockNamespaceNameProvider::new(NAMESPACE_NAME)),
+ Arc::new(MockTableNameProvider::new(TABLE_NAME)),
+ Arc::new(
+ MockPartitionProvider::default().with_partition(PartitionData::new(
+ partition_id,
+ PARTITION_KEY.clone(),
+ NAMESPACE_ID,
+ Arc::clone(&NAMESPACE_NAME_LOADER),
+ TABLE_ID,
+ Arc::clone(&TABLE_NAME_LOADER),
+ sort_key,
+ )),
+ ),
+ Default::default(),
+ );
+
+ buffer_tree
+ .apply(DmlOperation::Write(make_write_op(
+ &PARTITION_KEY,
+ NAMESPACE_ID,
+ TABLE_NAME,
+ TABLE_ID,
+ 0,
+ r#"banana-report,good=yes level=1000 4242424242"#,
+ )))
+ .await
+ .expect("failed to write partition test dataa");
+
+ Arc::clone(&buffer_tree.partitions().next().unwrap())
+ }
+
+ /// A test that ensures the correct destination of a partition that has no
+ /// assigned sort key yet (a new partition) that was directly assigned. This
+ /// will need a sort key update.
+ #[tokio::test]
+ async fn test_persist_sort_key_provided_none() {
+ let storage = ParquetStorage::new(Arc::new(InMemory::default()), StorageId::from("iox"));
+ let metrics = Arc::new(metric::Registry::default());
+ let catalog = Arc::new(MemCatalog::new(metrics));
+
+ let (mut handle, state) = PersistHandle::new(1, 2, Arc::clone(&EXEC), storage, catalog);
+ assert!(!state.is_saturated());
+
+ // Kill the workers, and replace the queues so we can inspect the
+ // enqueue output.
+ handle.worker_tasks = Arc::new(vec![]);
+
+ let (global_tx, _global_rx) = async_channel::unbounded();
+ handle.global_queue = global_tx;
+
+ let (worker1_tx, mut worker1_rx) = mpsc::unbounded_channel();
+ let (worker2_tx, mut worker2_rx) = mpsc::unbounded_channel();
+ handle.worker_queues = Arc::new(JumpHash::new([worker1_tx, worker2_tx]));
+
+ // Generate a partition with no known sort key.
+ let p = new_partition(PARTITION_ID, SortKeyState::Provided(None)).await;
+ let data = p.lock().mark_persisting().unwrap();
+
+ // Enqueue it
+ let notify = handle.queue_persist(p, data).await;
+
+ // And assert it wound up in a worker queue.
+ assert!(handle.global_queue.is_empty());
+
+ // Remember which queue it wound up in.
+ let mut assigned_worker = &mut worker1_rx;
+ let msg = match assigned_worker.try_recv() {
+ Ok(v) => v,
+ Err(TryRecvError::Disconnected) => panic!("worker channel is closed"),
+ Err(TryRecvError::Empty) => {
+ assigned_worker = &mut worker2_rx;
+ assigned_worker
+ .try_recv()
+ .expect("message was not found in either worker")
+ }
+ };
+ assert_eq!(msg.partition_id(), PARTITION_ID);
+
+ // Drop the message, and ensure the notification becomes inactive.
+ drop(msg);
+ assert_matches!(
+ notify.with_timeout_panic(Duration::from_secs(5)).await,
+ Err(_)
+ );
+
+ // Enqueue another partition for the same ID.
+ let p = new_partition(PARTITION_ID, SortKeyState::Provided(None)).await;
+ let data = p.lock().mark_persisting().unwrap();
+
+ // Enqueue it
+ let _notify = handle.queue_persist(p, data).await;
+
+ // And ensure it was mapped to the same worker.
+ let msg = assigned_worker
+ .try_recv()
+ .expect("message was not found in either worker");
+ assert_eq!(msg.partition_id(), PARTITION_ID);
+ }
+
+ /// A test that ensures the correct destination of a partition that has no
+ /// assigned sort key yet (a new partition) that was resolved by the
+ /// deferred load. This will need a sort key update.
+ #[tokio::test]
+ async fn test_persist_sort_key_deferred_resolved_none_update_necessary() {
+ let storage = ParquetStorage::new(Arc::new(InMemory::default()), StorageId::from("iox"));
+ let metrics = Arc::new(metric::Registry::default());
+ let catalog = Arc::new(MemCatalog::new(metrics));
+
+ let (mut handle, state) = PersistHandle::new(1, 2, Arc::clone(&EXEC), storage, catalog);
+ assert!(!state.is_saturated());
+
+ // Kill the workers, and replace the queues so we can inspect the
+ // enqueue output.
+ handle.worker_tasks = Arc::new(vec![]);
+
+ let (global_tx, _global_rx) = async_channel::unbounded();
+ handle.global_queue = global_tx;
+
+ let (worker1_tx, mut worker1_rx) = mpsc::unbounded_channel();
+ let (worker2_tx, mut worker2_rx) = mpsc::unbounded_channel();
+ handle.worker_queues = Arc::new(JumpHash::new([worker1_tx, worker2_tx]));
+
+ // Generate a partition with a resolved, but empty sort key.
+ let p = new_partition(
+ PARTITION_ID,
+ SortKeyState::Deferred(Arc::new(DeferredLoad::new(Duration::from_secs(1), async {
+ None
+ }))),
+ )
+ .await;
+ let (loader, data) = {
+ let mut p = p.lock();
+ (p.sort_key().clone(), p.mark_persisting().unwrap())
+ };
+ // Ensure the key is resolved.
+ assert_matches!(loader.get().await, None);
+
+ // Enqueue it
+ let notify = handle.queue_persist(p, data).await;
+
+ // And assert it wound up in a worker queue.
+ assert!(handle.global_queue.is_empty());
+
+ // Remember which queue it wound up in.
+ let mut assigned_worker = &mut worker1_rx;
+ let msg = match assigned_worker.try_recv() {
+ Ok(v) => v,
+ Err(TryRecvError::Disconnected) => panic!("worker channel is closed"),
+ Err(TryRecvError::Empty) => {
+ assigned_worker = &mut worker2_rx;
+ assigned_worker
+ .try_recv()
+ .expect("message was not found in either worker")
+ }
+ };
+ assert_eq!(msg.partition_id(), PARTITION_ID);
+
+ // Drop the message, and ensure the notification becomes inactive.
+ drop(msg);
+ assert_matches!(
+ notify.with_timeout_panic(Duration::from_secs(5)).await,
+ Err(_)
+ );
+
+ // Enqueue another partition for the same ID and same (resolved)
+ // deferred load instance.
+ let p = new_partition(PARTITION_ID, loader).await;
+ let data = p.lock().mark_persisting().unwrap();
+
+ // Enqueue it
+ let _notify = handle.queue_persist(p, data).await;
+
+ // And ensure it was mapped to the same worker.
+ let msg = assigned_worker
+ .try_recv()
+ .expect("message was not found in either worker");
+ assert_eq!(msg.partition_id(), PARTITION_ID);
+ }
+
+ /// A test that ensures the correct destination of a partition that has an
+ /// assigned sort key, but the data differs and a sort key update is
+ /// necessary.
+ #[tokio::test]
+ async fn test_persist_sort_key_deferred_resolved_some_update_necessary() {
+ let storage = ParquetStorage::new(Arc::new(InMemory::default()), StorageId::from("iox"));
+ let metrics = Arc::new(metric::Registry::default());
+ let catalog = Arc::new(MemCatalog::new(metrics));
+
+ let (mut handle, state) = PersistHandle::new(1, 2, Arc::clone(&EXEC), storage, catalog);
+ assert!(!state.is_saturated());
+
+ // Kill the workers, and replace the queues so we can inspect the
+ // enqueue output.
+ handle.worker_tasks = Arc::new(vec![]);
+
+ let (global_tx, _global_rx) = async_channel::unbounded();
+ handle.global_queue = global_tx;
+
+ let (worker1_tx, mut worker1_rx) = mpsc::unbounded_channel();
+ let (worker2_tx, mut worker2_rx) = mpsc::unbounded_channel();
+ handle.worker_queues = Arc::new(JumpHash::new([worker1_tx, worker2_tx]));
+
+ // Generate a partition with a resolved sort key that does not reflect
+ // the data within the partition's buffer.
+ let p = new_partition(
+ PARTITION_ID,
+ SortKeyState::Deferred(Arc::new(DeferredLoad::new(Duration::from_secs(1), async {
+ Some(SortKey::from_columns(["time", "some-other-column"]))
+ }))),
+ )
+ .await;
+ let (loader, data) = {
+ let mut p = p.lock();
+ (p.sort_key().clone(), p.mark_persisting().unwrap())
+ };
+ // Ensure the key is resolved.
+ assert_matches!(loader.get().await, Some(_));
+
+ // Enqueue it
+ let notify = handle.queue_persist(p, data).await;
+
+ // And assert it wound up in a worker queue.
+ assert!(handle.global_queue.is_empty());
+
+ // Remember which queue it wound up in.
+ let mut assigned_worker = &mut worker1_rx;
+ let msg = match assigned_worker.try_recv() {
+ Ok(v) => v,
+ Err(TryRecvError::Disconnected) => panic!("worker channel is closed"),
+ Err(TryRecvError::Empty) => {
+ assigned_worker = &mut worker2_rx;
+ assigned_worker
+ .try_recv()
+ .expect("message was not found in either worker")
+ }
+ };
+ assert_eq!(msg.partition_id(), PARTITION_ID);
+
+ // Drop the message, and ensure the notification becomes inactive.
+ drop(msg);
+ assert_matches!(
+ notify.with_timeout_panic(Duration::from_secs(5)).await,
+ Err(_)
+ );
+
+ // Enqueue another partition for the same ID and same (resolved)
+ // deferred load instance.
+ let p = new_partition(PARTITION_ID, loader).await;
+ let data = p.lock().mark_persisting().unwrap();
+
+ // Enqueue it
+ let _notify = handle.queue_persist(p, data).await;
+
+ // And ensure it was mapped to the same worker.
+ let msg = assigned_worker
+ .try_recv()
+ .expect("message was not found in either worker");
+ assert_eq!(msg.partition_id(), PARTITION_ID);
+ }
+
+ /// A test that a partition that does not require a sort key update is
+ /// enqueued into the global queue.
+ #[tokio::test]
+ async fn test_persist_sort_key_no_update_necessary() {
+ let storage = ParquetStorage::new(Arc::new(InMemory::default()), StorageId::from("iox"));
+ let metrics = Arc::new(metric::Registry::default());
+ let catalog = Arc::new(MemCatalog::new(metrics));
+
+ let (mut handle, state) = PersistHandle::new(1, 2, Arc::clone(&EXEC), storage, catalog);
+ assert!(!state.is_saturated());
+
+ // Kill the workers, and replace the queues so we can inspect the
+ // enqueue output.
+ handle.worker_tasks = Arc::new(vec![]);
+
+ let (global_tx, global_rx) = async_channel::unbounded();
+ handle.global_queue = global_tx;
+
+ let (worker1_tx, mut worker1_rx) = mpsc::unbounded_channel();
+ let (worker2_tx, mut worker2_rx) = mpsc::unbounded_channel();
+ handle.worker_queues = Arc::new(JumpHash::new([worker1_tx, worker2_tx]));
+
+ // Generate a partition with a resolved sort key that does not reflect
+ // the data within the partition's buffer.
+ let p = new_partition(
+ PARTITION_ID,
+ SortKeyState::Deferred(Arc::new(DeferredLoad::new(Duration::from_secs(1), async {
+ Some(SortKey::from_columns(["time", "good"]))
+ }))),
+ )
+ .await;
+ let (loader, data) = {
+ let mut p = p.lock();
+ (p.sort_key().clone(), p.mark_persisting().unwrap())
+ };
+ // Ensure the key is resolved.
+ assert_matches!(loader.get().await, Some(_));
+
+ // Enqueue it
+ let notify = handle.queue_persist(p, data).await;
+
+ // Assert the task did not get enqueued in a worker
+ assert_matches!(worker1_rx.try_recv(), Err(TryRecvError::Empty));
+ assert_matches!(worker2_rx.try_recv(), Err(TryRecvError::Empty));
+
+ // And assert it wound up in the global queue.
+ let msg = global_rx
+ .try_recv()
+ .expect("task should be in global queue");
+ assert_eq!(msg.partition_id(), PARTITION_ID);
+
+ // Drop the message, and ensure the notification becomes inactive.
+ drop(msg);
+ assert_matches!(
+ notify.with_timeout_panic(Duration::from_secs(5)).await,
+ Err(_)
+ );
+
+ // Enqueue another partition for the same ID and same (resolved)
+ // deferred load instance.
+ let p = new_partition(PARTITION_ID, loader).await;
+ let data = p.lock().mark_persisting().unwrap();
+
+ // Enqueue it
+ let _notify = handle.queue_persist(p, data).await;
+
+ // And ensure it was mapped to the same worker.
+ let msg = global_rx
+ .try_recv()
+ .expect("task should be in global queue");
+ assert_eq!(msg.partition_id(), PARTITION_ID);
+ }
+}
diff --git a/ingester2/src/server/grpc/rpc_write.rs b/ingester2/src/server/grpc/rpc_write.rs
index cb63ede31d..9ead77c4a3 100644
--- a/ingester2/src/server/grpc/rpc_write.rs
+++ b/ingester2/src/server/grpc/rpc_write.rs
@@ -226,12 +226,14 @@ mod tests {
column::{SemanticType, Values},
Column, DatabaseBatch, TableBatch,
};
+ use tokio::sync::Semaphore;
use super::*;
use crate::{dml_sink::mock_sink::MockDmlSink, persist::backpressure::CurrentState};
const NAMESPACE_ID: NamespaceId = NamespaceId::new(42);
const PARTITION_KEY: &str = "bananas";
+ const PERSIST_QUEUE_DEPTH: usize = 42;
macro_rules! test_rpc_write {
(
@@ -248,7 +250,11 @@ mod tests {
MockDmlSink::default().with_apply_return(vec![$sink_ret]),
);
let timestamp = Arc::new(TimestampOracle::new(0));
- let handler = RpcWrite::new(Arc::clone(&mock), timestamp, Default::default());
+
+ let sem = Arc::new(Semaphore::new(PERSIST_QUEUE_DEPTH));
+ let persist_state = Arc::new(PersistState::new(PERSIST_QUEUE_DEPTH, sem));
+
+ let handler = RpcWrite::new(Arc::clone(&mock), timestamp, persist_state);
let ret = handler
.write(Request::new($request))
@@ -359,7 +365,11 @@ mod tests {
async fn test_rpc_write_ordered_timestamps() {
let mock = Arc::new(MockDmlSink::default().with_apply_return(vec![Ok(()), Ok(())]));
let timestamp = Arc::new(TimestampOracle::new(0));
- let handler = RpcWrite::new(Arc::clone(&mock), timestamp, Default::default());
+
+ let sem = Arc::new(Semaphore::new(PERSIST_QUEUE_DEPTH));
+ let persist_state = Arc::new(PersistState::new(PERSIST_QUEUE_DEPTH, sem));
+
+ let handler = RpcWrite::new(Arc::clone(&mock), timestamp, persist_state);
let req = proto::WriteRequest {
payload: Some(DatabaseBatch {
@@ -413,7 +423,10 @@ mod tests {
async fn test_rpc_write_persist_saturation() {
let mock = Arc::new(MockDmlSink::default().with_apply_return(vec![Ok(()), Ok(())]));
let timestamp = Arc::new(TimestampOracle::new(0));
- let persist_state = Default::default();
+
+ let sem = Arc::new(Semaphore::new(PERSIST_QUEUE_DEPTH));
+ let persist_state = Arc::new(PersistState::new(PERSIST_QUEUE_DEPTH, sem));
+
let handler = RpcWrite::new(Arc::clone(&mock), timestamp, Arc::clone(&persist_state));
let req = proto::WriteRequest {
diff --git a/iox_query/src/exec.rs b/iox_query/src/exec.rs
index 61d0fed395..7dc839db4d 100644
--- a/iox_query/src/exec.rs
+++ b/iox_query/src/exec.rs
@@ -130,7 +130,8 @@ impl Executor {
Self::new_with_config_and_executors(config, executors)
}
- /// Get testing executor.
+ /// Get testing executor that runs a on single thread and a low memory bound
+ /// to preserve resources.
pub fn new_testing() -> Self {
let config = ExecutorConfig {
num_threads: 1,
diff --git a/ioxd_ingester2/src/lib.rs b/ioxd_ingester2/src/lib.rs
index fd9be089de..adeea580a4 100644
--- a/ioxd_ingester2/src/lib.rs
+++ b/ioxd_ingester2/src/lib.rs
@@ -156,7 +156,7 @@ pub async fn create_ingester_server_type(
Duration::from_secs(ingester_config.wal_rotation_period_seconds),
exec,
ingester_config.persist_max_parallelism,
- ingester_config.persist_worker_queue_depth,
+ ingester_config.persist_queue_depth,
object_store,
)
.await?;
|
da3b20270bb84cf4536d6e23dde6aa94ad185122
|
Dom Dwyer
|
2023-08-22 12:58:21
|
doc link fixes
|
Fix bad links!
| null |
docs: doc link fixes
Fix bad links!
|
diff --git a/gossip_schema/src/handle.rs b/gossip_schema/src/handle.rs
index 064f8ab7c2..ec4983bca8 100644
--- a/gossip_schema/src/handle.rs
+++ b/gossip_schema/src/handle.rs
@@ -19,7 +19,7 @@ use tokio::{
/// transport limitations) and broadcasts the result to all listening peers.
///
/// Serialisation and processing of the [`Event`] given to the
-/// [`SchemaTx::Broadcast()`] method happen in a background actor task,
+/// [`SchemaTx::broadcast()`] method happen in a background actor task,
/// decoupling the caller from the latency of processing each frame. Dropping
/// the [`SchemaTx`] stops this background actor task.
#[derive(Debug)]
@@ -35,8 +35,8 @@ impl Drop for SchemaTx {
}
impl SchemaTx {
- /// Construct a new [`SchemaChangeObserver`] that publishes gossip messages
- /// over `gossip`, and delegates cache operations to `inner`.
+ /// Construct a new [`SchemaTx`] that publishes gossip messages over
+ /// `gossip`, and delegates cache operations to `inner`.
pub fn new(gossip: gossip::GossipHandle<Topic>) -> Self {
let (tx, rx) = mpsc::channel(100);
@@ -114,6 +114,8 @@ async fn actor_loop(mut rx: mpsc::Receiver<Event>, gossip: gossip::GossipHandle<
/// If any [`Column`] within the message is too large to fit into an update
/// containing only itself, then this method returns `false` indicating
/// oversized columns were dropped from the output.
+///
+/// [`Column`]: generated_types::influxdata::iox::gossip::v1::Column
fn serialise_table_update_frames(
mut msg: TableUpdated,
max_frame_bytes: usize,
|
ade21ad9a1e458c8c0bc6d90b9050c811f40c56f
|
Brandon Pfeifer
|
2022-12-13 11:00:50
|
restrict file permissions by default (#23959)
|
Most of these changes can be overridden by the system
maintainer with environment variables or systemd
override snippets.
| null |
fix: restrict file permissions by default (#23959)
Most of these changes can be overridden by the system
maintainer with environment variables or systemd
override snippets.
|
diff --git a/.circleci/package/fs/usr/lib/influxdb/scripts/influxdb.service b/.circleci/package/fs/usr/lib/influxdb/scripts/influxdb.service
index ee48e6ced4..abaf7ec31d 100644
--- a/.circleci/package/fs/usr/lib/influxdb/scripts/influxdb.service
+++ b/.circleci/package/fs/usr/lib/influxdb/scripts/influxdb.service
@@ -15,6 +15,11 @@ KillMode=control-group
Restart=on-failure
Type=forking
PIDFile=/var/lib/influxdb/influxd.pid
+StateDirectory=influxdb
+StateDirectoryMode=0750
+LogsDirectory=influxdb
+LogsDirectoryMode=0750
+UMask=0027
[Install]
WantedBy=multi-user.target
diff --git a/.circleci/package/fs/usr/lib/influxdb/scripts/init.sh b/.circleci/package/fs/usr/lib/influxdb/scripts/init.sh
index 2d3b32dac2..6f8e323a89 100644
--- a/.circleci/package/fs/usr/lib/influxdb/scripts/init.sh
+++ b/.circleci/package/fs/usr/lib/influxdb/scripts/init.sh
@@ -24,6 +24,13 @@ NAME=influxdb
USER=influxdb
GROUP=influxdb
+if [ -n "${INFLUXD_SERVICE_UMASK:-}" ]
+then
+ umask "${INFLUXD_SERVICE_UMASK}"
+else
+ umask 0027
+fi
+
# Check for sudo or root privileges before continuing
if [ "$UID" != "0" ]; then
echo "You must be root to run this script"
@@ -40,10 +47,11 @@ fi
# PID file for the daemon
PIDFILE=/var/run/influxdb/influxd.pid
-PIDDIR=`dirname $PIDFILE`
-if [ ! -d "$PIDDIR" ]; then
- mkdir -p $PIDDIR
- chown $USER:$GROUP $PIDDIR
+piddir="$(dirname "${PIDFILE}")"
+if [ ! -d "${piddir}" ]; then
+ mkdir -p "${piddir}"
+ chown "${USER}:${GROUP}" "${piddir}"
+ chmod 0750 "${piddir}"
fi
# Max open files
@@ -58,16 +66,20 @@ if [ -z "$STDOUT" ]; then
STDOUT=/var/log/influxdb/influxd.log
fi
-if [ ! -f "$STDOUT" ]; then
- mkdir -p $(dirname $STDOUT)
+outdir="$(dirname "${STDOUT}")"
+if [ ! -d "${outdir}" ]; then
+ mkdir -p "${outdir}"
+ chmod 0750 "${outdir}"
fi
if [ -z "$STDERR" ]; then
STDERR=/var/log/influxdb/influxd.log
fi
-if [ ! -f "$STDERR" ]; then
- mkdir -p $(dirname $STDERR)
+errdir="$(dirname "${STDERR}")"
+if [ ! -d "${errdir}" ]; then
+ mkdir -p "${errdir}"
+ chmod 0750 "${errdir}"
fi
# Override init script variables with DEFAULT values
|
1d64cb1b1e64f807ac1fddb00053c68ba5d63dfc
|
Phil Bracikowski
|
2023-04-14 16:24:50
|
delay initial s3 checker loop, fix dryrun
|
This PR makes 3 improvements.
* It adds the configured sleep interval at the start of the object store
checker to avoid issues with making a remote list immediately at
startup. We see issues with the s3 api.
* the --dry-run flag was stopping deletes of objects from object store,
but the retention flagger was still making updates to the catalog.
These writes to the catalog are surprising when the --dry-run flag is
provided. Now, with --dry-run the catalog is not updated. The logging
instead says how many records would be updated because of retention.
* It decreases logging in should_delete of the checker as it will be
extremely noisey when reporting files it skips. An internal
environment has 3.8 million parquet files, most of which would be
skipped.
* related to #7363
* fixes influxdata/idpe#17451
| null |
fix(garbage_collector): delay initial s3 checker loop, fix dryrun
This PR makes 3 improvements.
* It adds the configured sleep interval at the start of the object store
checker to avoid issues with making a remote list immediately at
startup. We see issues with the s3 api.
* the --dry-run flag was stopping deletes of objects from object store,
but the retention flagger was still making updates to the catalog.
These writes to the catalog are surprising when the --dry-run flag is
provided. Now, with --dry-run the catalog is not updated. The logging
instead says how many records would be updated because of retention.
* It decreases logging in should_delete of the checker as it will be
extremely noisey when reporting files it skips. An internal
environment has 3.8 million parquet files, most of which would be
skipped.
* related to #7363
* fixes influxdata/idpe#17451
|
diff --git a/garbage_collector/src/lib.rs b/garbage_collector/src/lib.rs
index 7dba67642e..484eec3edd 100644
--- a/garbage_collector/src/lib.rs
+++ b/garbage_collector/src/lib.rs
@@ -134,6 +134,7 @@ impl GarbageCollector {
shutdown.clone(),
catalog,
sub_config.retention_sleep_interval_minutes,
+ sub_config.dry_run,
));
Ok(Self {
@@ -262,7 +263,11 @@ mod tests {
async fn deletes_untracked_files_older_than_the_cutoff() {
let setup = OldFileSetup::new();
- let config = build_config(setup.data_dir_arg(), []).await;
+ let config = build_config(
+ setup.data_dir_arg(),
+ ["--objectstore-sleep-interval-minutes=0"],
+ )
+ .await;
tokio::spawn(async {
main(config).await.unwrap();
});
diff --git a/garbage_collector/src/objectstore/checker.rs b/garbage_collector/src/objectstore/checker.rs
index be4dd7e020..9531443194 100644
--- a/garbage_collector/src/objectstore/checker.rs
+++ b/garbage_collector/src/objectstore/checker.rs
@@ -51,7 +51,7 @@ async fn should_delete(
parquet_files: &mut dyn ParquetFileRepo,
) -> Result<bool> {
if cutoff < item.last_modified {
- info!(
+ debug!(
location = %item.location,
deleting = false,
reason = "too new",
@@ -74,7 +74,7 @@ async fn should_delete(
if parquet_file.is_some() {
// We have a reference to this file; do not delete
- info!(
+ debug!(
location = %item.location,
deleting = false,
reason = "exists in catalog",
@@ -82,7 +82,7 @@ async fn should_delete(
);
return Ok(false);
} else {
- info!(
+ debug!(
location = %item.location,
deleting = true,
reason = "not in catalog",
@@ -90,7 +90,7 @@ async fn should_delete(
);
}
} else {
- info!(
+ debug!(
location = %item.location,
deleting = true,
uuid,
@@ -99,7 +99,7 @@ async fn should_delete(
);
}
} else {
- info!(
+ debug!(
location = %item.location,
deleting = true,
file_name = %file_name.as_ref(),
diff --git a/garbage_collector/src/objectstore/lister.rs b/garbage_collector/src/objectstore/lister.rs
index 1d3f0907d1..90d3fe1d4e 100644
--- a/garbage_collector/src/objectstore/lister.rs
+++ b/garbage_collector/src/objectstore/lister.rs
@@ -12,8 +12,11 @@ pub(crate) async fn perform(
checker: mpsc::Sender<ObjectMeta>,
sleep_interval_minutes: u64,
) -> Result<()> {
+ // sleep poll interval to avoid issues with immediately polling the object store at startup
+ sleep(Duration::from_secs(60 * sleep_interval_minutes)).await;
let mut items = object_store.list(None).await.context(ListingSnafu)?;
+ info!("beginning lister loop");
loop {
select! {
_ = shutdown.cancelled() => {
@@ -29,6 +32,7 @@ pub(crate) async fn perform(
None => {
// sleep for the configured time, then list again and go around the loop
// again
+ debug!("no Object store item");
sleep(Duration::from_secs(60 * sleep_interval_minutes)).await;
items = object_store.list(None).await.context(ListingSnafu)?;
continue;
diff --git a/garbage_collector/src/retention/flagger.rs b/garbage_collector/src/retention/flagger.rs
index d3dee69e79..e07fb5e849 100644
--- a/garbage_collector/src/retention/flagger.rs
+++ b/garbage_collector/src/retention/flagger.rs
@@ -9,15 +9,27 @@ pub(crate) async fn perform(
shutdown: CancellationToken,
catalog: Arc<dyn Catalog>,
sleep_interval_minutes: u64,
+ dry_run: bool,
) -> Result<()> {
loop {
- let flagged = catalog
- .repositories()
- .await
- .parquet_files()
- .flag_for_delete_by_retention()
- .await
- .context(FlaggingSnafu)?;
+ let flagged = if !dry_run {
+ catalog
+ .repositories()
+ .await
+ .parquet_files()
+ .flag_for_delete_by_retention()
+ .await
+ .context(FlaggingSnafu)?
+ } else {
+ debug!("dry run enabled for parquet retention flagger");
+ catalog
+ .repositories()
+ .await
+ .parquet_files()
+ .flagged_for_delete_by_retention()
+ .await
+ .context(FlaggingSnafu)?
+ };
info!(flagged_count = %flagged.len(), "iox_catalog::flag_for_delete_by_retention()");
if flagged.is_empty() {
diff --git a/iox_catalog/src/interface.rs b/iox_catalog/src/interface.rs
index c7028dbcfd..d464a84ced 100644
--- a/iox_catalog/src/interface.rs
+++ b/iox_catalog/src/interface.rs
@@ -664,6 +664,9 @@ pub trait ParquetFileRepo: Send + Sync {
/// Flag all parquet files for deletion that are older than their namespace's retention period.
async fn flag_for_delete_by_retention(&mut self) -> Result<Vec<ParquetFileId>>;
+ /// Get all parquet files flagged for deletion that are older than their namespace's retention period.
+ async fn flagged_for_delete_by_retention(&mut self) -> Result<Vec<ParquetFileId>>;
+
/// Get all parquet files for a shard with a max_sequence_number greater than the
/// one passed in. The ingester will use this on startup to see which files were persisted
/// that are greater than its min_unpersisted_number so that it can discard any data in
diff --git a/iox_catalog/src/mem.rs b/iox_catalog/src/mem.rs
index 69d3c702be..a52c1e08d7 100644
--- a/iox_catalog/src/mem.rs
+++ b/iox_catalog/src/mem.rs
@@ -1259,6 +1259,36 @@ impl ParquetFileRepo for MemTxn {
.collect())
}
+ async fn flagged_for_delete_by_retention(&mut self) -> Result<Vec<ParquetFileId>> {
+ let now = Timestamp::from(self.time_provider.now());
+ let stage = self.stage();
+
+ Ok(stage
+ .parquet_files
+ .iter_mut()
+ // don't flag if already flagged for deletion
+ .filter(|f| f.to_delete.is_none())
+ .filter_map(|f| {
+ // table retention, if it exists, overrides namespace retention
+ // TODO - include check of table retention period once implemented
+ stage
+ .namespaces
+ .iter()
+ .find(|n| n.id == f.namespace_id)
+ .and_then(|ns| {
+ ns.retention_period_ns.and_then(|rp| {
+ if f.max_time < now - rp {
+ // compare to flag_for_delete_by_retention which does an update here
+ Some(f.id)
+ } else {
+ None
+ }
+ })
+ })
+ })
+ .collect())
+ }
+
async fn list_by_shard_greater_than(
&mut self,
shard_id: ShardId,
diff --git a/iox_catalog/src/metrics.rs b/iox_catalog/src/metrics.rs
index 5fcd887068..d5bfd5cf48 100644
--- a/iox_catalog/src/metrics.rs
+++ b/iox_catalog/src/metrics.rs
@@ -278,6 +278,7 @@ decorate!(
"parquet_create" = create( &mut self, parquet_file_params: ParquetFileParams) -> Result<ParquetFile>;
"parquet_flag_for_delete" = flag_for_delete(&mut self, id: ParquetFileId) -> Result<()>;
"parquet_flag_for_delete_by_retention" = flag_for_delete_by_retention(&mut self) -> Result<Vec<ParquetFileId>>;
+ "parquet_flagged_for_delete_by_retention" = flagged_for_delete_by_retention(&mut self) -> Result<Vec<ParquetFileId>>;
"parquet_list_by_shard_greater_than" = list_by_shard_greater_than(&mut self, shard_id: ShardId, sequence_number: SequenceNumber) -> Result<Vec<ParquetFile>>;
"parquet_list_by_namespace_not_to_delete" = list_by_namespace_not_to_delete(&mut self, namespace_id: NamespaceId) -> Result<Vec<ParquetFile>>;
"parquet_list_by_table_not_to_delete" = list_by_table_not_to_delete(&mut self, table_id: TableId) -> Result<Vec<ParquetFile>>;
diff --git a/iox_catalog/src/postgres.rs b/iox_catalog/src/postgres.rs
index 6693166be4..751cb0ef50 100644
--- a/iox_catalog/src/postgres.rs
+++ b/iox_catalog/src/postgres.rs
@@ -1856,6 +1856,28 @@ RETURNING *;
Ok(flagged)
}
+ async fn flagged_for_delete_by_retention(&mut self) -> Result<Vec<ParquetFileId>> {
+ let flagged_at = Timestamp::from(self.time_provider.now());
+ let flagged = sqlx::query(
+ r#"
+ SELECT
+ parquet_file.id
+ FROM namespace, parquet_file
+ WHERE namespace.retention_period_ns IS NOT NULL
+ AND parquet_file.to_delete IS NULL
+ AND parquet_file.max_time < $1 - namespace.retention_period_ns
+ AND namespace.id = parquet_file.namespace_id
+ "#,
+ )
+ .bind(flagged_at) // $1
+ .fetch_all(&mut self.inner)
+ .await
+ .map_err(|e| Error::SqlxError { source: e })?;
+
+ let flagged = flagged.into_iter().map(|row| row.get("id")).collect();
+ Ok(flagged)
+ }
+
async fn list_by_shard_greater_than(
&mut self,
shard_id: ShardId,
diff --git a/iox_catalog/src/sqlite.rs b/iox_catalog/src/sqlite.rs
index 732b4d4fde..c657967a26 100644
--- a/iox_catalog/src/sqlite.rs
+++ b/iox_catalog/src/sqlite.rs
@@ -1730,6 +1730,29 @@ RETURNING *;
Ok(flagged)
}
+ async fn flagged_for_delete_by_retention(&mut self) -> Result<Vec<ParquetFileId>> {
+ let flagged_at = Timestamp::from(self.time_provider.now());
+ // TODO - include check of table retention period once implemented
+ let flagged = sqlx::query(
+ r#"
+ SELECT
+ parquet_file.id
+ FROM namespace, parquet_file
+ WHERE namespace.retention_period_ns IS NOT NULL
+ AND parquet_file.to_delete IS NULL
+ AND parquet_file.max_time < $1 - namespace.retention_period_ns
+ AND namespace.id = parquet_file.namespace_id
+ "#,
+ )
+ .bind(flagged_at) // $1
+ .fetch_all(self.inner.get_mut())
+ .await
+ .map_err(|e| Error::SqlxError { source: e })?;
+
+ let flagged = flagged.into_iter().map(|row| row.get("id")).collect();
+ Ok(flagged)
+ }
+
async fn list_by_shard_greater_than(
&mut self,
shard_id: ShardId,
|
e76b1073322b9da5bd66388cd26be2268b69af4e
|
Dom Dwyer
|
2022-12-13 14:36:18
|
persist back-pressure
|
This commit causes an ingester2 instance to stop accepting new writes
when at least one persist queue is full. Writes continue to be rejected
until the persist workers have processed enough outstanding persist
tasks to drain the queues to half of their capacity, at which point
writes are accepted again.
When a write is rejected, the ingester returns a "resource exhausted"
RPC code to the caller.
Checking if the system is in a healthy state for writes is extremely
cheap, as it is on the hot path for all writes.
| null |
feat(ingester2): persist back-pressure
This commit causes an ingester2 instance to stop accepting new writes
when at least one persist queue is full. Writes continue to be rejected
until the persist workers have processed enough outstanding persist
tasks to drain the queues to half of their capacity, at which point
writes are accepted again.
When a write is rejected, the ingester returns a "resource exhausted"
RPC code to the caller.
Checking if the system is in a healthy state for writes is extremely
cheap, as it is on the hot path for all writes.
|
diff --git a/ingester2/src/init.rs b/ingester2/src/init.rs
index 23b3d443bd..9bb01621af 100644
--- a/ingester2/src/init.rs
+++ b/ingester2/src/init.rs
@@ -241,7 +241,7 @@ pub async fn new(
// Spawn the persist workers to compact partition data, convert it into
// Parquet files, and upload them to object storage.
- let persist_handle = PersistHandle::new(
+ let (persist_handle, persist_state) = PersistHandle::new(
persist_workers,
persist_worker_queue_depth,
persist_executor,
@@ -273,7 +273,14 @@ pub async fn new(
));
Ok(IngesterGuard {
- rpc: GrpcDelegate::new(Arc::new(write_path), buffer, timestamp, catalog, metrics),
+ rpc: GrpcDelegate::new(
+ Arc::new(write_path),
+ buffer,
+ timestamp,
+ persist_state,
+ catalog,
+ metrics,
+ ),
rotation_task: handle,
})
}
diff --git a/ingester2/src/init/wal_replay.rs b/ingester2/src/init/wal_replay.rs
index e826ac2280..a049ca306f 100644
--- a/ingester2/src/init/wal_replay.rs
+++ b/ingester2/src/init/wal_replay.rs
@@ -95,7 +95,7 @@ where
"dropping empty wal segment",
);
- // TODO(dom:test): empty WAL replay
+ // TODO(test): empty WAL replay
// A failure to delete an empty file should not prevent WAL
// replay from continuing.
diff --git a/ingester2/src/persist/backpressure.rs b/ingester2/src/persist/backpressure.rs
new file mode 100644
index 0000000000..a996d9e905
--- /dev/null
+++ b/ingester2/src/persist/backpressure.rs
@@ -0,0 +1,521 @@
+use std::{
+ sync::{
+ atomic::{AtomicUsize, Ordering},
+ Arc,
+ },
+ time::Duration,
+};
+
+use crossbeam_utils::CachePadded;
+use observability_deps::tracing::*;
+use parking_lot::Mutex;
+use tokio::{
+ sync::mpsc,
+ task::JoinHandle,
+ time::{Interval, MissedTickBehavior},
+};
+
+/// The interval of time between evaluations of the state of the persist system
+/// when [`CurrentState::Saturated`].
+const EVALUATE_SATURATION_INTERVAL: Duration = Duration::from_secs(1);
+
+/// A state of the persist system.
+#[derive(Debug, Clone, Copy, PartialEq, Eq)]
+pub(crate) enum CurrentState {
+ /// The system is operating normally.
+ Ok,
+ /// The persist system is overloaded.
+ Saturated,
+}
+
+/// A handle to read (and set, within the persist module) the state of the
+/// persist system.
+///
+/// Clone operations are cheap, and state read operations are very cheap.
+///
+/// # Saturation Recovery
+///
+/// Once the persist system is marked as [`CurrentState::Saturated`], it remains
+/// in that state until the following conditions are satisfied:
+///
+/// * There are no outstanding enqueue operations (no thread is blocked adding
+/// an item to any work queue).
+///
+/// * All queues have at least half of their capacity free (being at most,
+/// half full).
+///
+/// These conditions are evaluated periodically, at the interval specified in
+/// [`EVALUATE_SATURATION_INTERVAL`].
+#[derive(Debug)]
+pub(crate) struct PersistState {
+ /// The actual state value.
+ ///
+ /// The value of this variable is set to the [`CurrentState`] discriminant
+ /// for the respective state that was [`PersistState::set()`] in it.
+ ///
+ /// This is cache padded due to the high read volume, preventing any
+ /// unfortunate false-sharing of cache lines from impacting the hot-path
+ /// reads.
+ state: CachePadded<AtomicUsize>,
+
+ /// Tracks the number of async tasks waiting within
+ /// [`PersistHandle::queue_persist()`], asynchronously blocking to enqueue a
+ /// persist job.
+ ///
+ /// This is modified using [`Ordering::SeqCst`] as performance is not a
+ /// priority for code paths that modify it.
+ ///
+ /// [`PersistHandle::queue_persist()`]:
+ /// super::handle::PersistHandle::queue_persist()
+ waiting_to_enqueue: Arc<AtomicUsize>,
+
+ /// The handle to the current saturation evaluation/recovery task, if any.
+ recovery_handle: Mutex<Option<JoinHandle<()>>>,
+}
+
+/// Initialise a [`PersistState`] with [`CurrentState::Ok`].
+impl Default for PersistState {
+ fn default() -> Self {
+ let s = Self {
+ state: Default::default(),
+ waiting_to_enqueue: Arc::new(AtomicUsize::new(0)),
+ recovery_handle: Default::default(),
+ };
+ s.set(CurrentState::Ok);
+ s
+ }
+}
+
+impl PersistState {
+ /// Set the reported state of the [`PersistState`].
+ fn set(&self, s: CurrentState) -> bool {
+ // Set the new state, retaining the most recent state.
+ //
+ // SeqCst is absolute overkill, but is used here due to the strong
+ // ordering guarantees providing minimal risk of bugs. The low volume of
+ // writes to this variable means the overhead is more than acceptable.
+ let last = self.state.swap(s as usize, Ordering::SeqCst);
+
+ // If "s" does not match the old state, this is the first thread to
+ // switch the state from "last", to "s", since setting it to "last".
+ //
+ // Subsequent calls setting the state to "s" will return false, until a
+ // different state is set.
+ s as usize != last
+ }
+
+ /// Get the current reported state of the [`PersistState`].
+ ///
+ /// Reading this value is extremely cheap and can be done without
+ /// performance concern.
+ ///
+ /// This value is eventually consistent, with a presumption of
+ pub(crate) fn get(&self) -> CurrentState {
+ // Correctness: relaxed as reading the current state is allowed to be
+ // racy for performance reasons; this call should be as cheap as
+ // possible due to it being squarely in the hot path.
+ //
+ // Any value change will "eventually" be made visible to all threads, at
+ // which point this read converges to the latest value. A potential
+ // extra write or two arriving before this value is visible to all
+ // threads is acceptable in the "saturated" cold path, prioritising
+ // latency of the hot path.
+ match self.state.load(Ordering::Relaxed) {
+ v if v == CurrentState::Ok as usize => CurrentState::Ok,
+ v if v == CurrentState::Saturated as usize => CurrentState::Saturated,
+ _ => unreachable!(),
+ }
+ }
+
+ /// A convenience method that returns true if `self` is
+ /// [`CurrentState::Saturated`].
+ pub(crate) fn is_saturated(&self) -> bool {
+ self.get() == CurrentState::Saturated
+ }
+
+ /// Mark the persist system as saturated, returning a [`WaitGuard`] that
+ /// MUST be held during any subsequent async-blocking enqueue request
+ /// ([`mpsc::Sender::send()`] and the like).
+ ///
+ /// Holding the guard over the `send()` await allows the saturation
+ /// evaluation to track the number of threads with an ongoing enqueue wait.
+ pub(super) fn set_saturated<T>(s: Arc<Self>, persist_queues: Vec<mpsc::Sender<T>>) -> WaitGuard
+ where
+ T: Send + 'static,
+ {
+ // Increment the number of tasks waiting to push into a queue.
+ //
+ // INVARIANT: this increment MUST happen-before returning the guard, and
+ // waiting on the queue send(), and before starting the saturation
+ // monitor task so that it observes this waiter.
+ let _ = s.waiting_to_enqueue.fetch_add(1, Ordering::SeqCst);
+
+ // Attempt to set the system to "saturated".
+ let first = s.set(CurrentState::Saturated);
+ if first {
+ // This is the first thread to mark the system as saturated.
+ warn!("persist queues saturated, blocking ingest");
+
+ // Always check the state of the system EVALUATE_SATURATION_INTERVAL
+ // duration of time after the last completed evaluation - do not
+ // attempt to check continuously should the check fall behind the
+ // ticker.
+ let mut interval = tokio::time::interval(EVALUATE_SATURATION_INTERVAL);
+ interval.set_missed_tick_behavior(MissedTickBehavior::Delay);
+
+ // Spawn a task that marks the system as not saturated after the queues
+ // have processed some of the backlog.
+ let h = tokio::spawn(saturation_monitor_task(
+ interval,
+ Arc::clone(&s),
+ persist_queues,
+ ));
+ // Retain the task handle to avoid leaking it if dropped.
+ *s.recovery_handle.lock() = Some(h);
+ }
+
+ WaitGuard(Arc::clone(&s.waiting_to_enqueue))
+ }
+
+ /// A test-only helper that sets the state of `self` only. It does not spawn
+ /// a recovery task.
+ #[cfg(test)]
+ pub(crate) fn test_set_state(&self, s: CurrentState) {
+ self.set(s);
+ }
+}
+
+impl Drop for PersistState {
+ fn drop(&mut self) {
+ if let Some(h) = self.recovery_handle.lock().as_ref() {
+ h.abort();
+ }
+ }
+}
+
+/// A guard that decrements the number of writers waiting to enqueue an item
+/// into the persistence queue when dropped.
+///
+/// This MUST be held whilst calling [`mpsc::Sender::send()`].
+#[must_use = "must hold wait guard while waiting for enqueue"]
+pub(super) struct WaitGuard(Arc<AtomicUsize>);
+
+impl Drop for WaitGuard {
+ fn drop(&mut self) {
+ let _ = self.0.fetch_sub(1, Ordering::SeqCst);
+ }
+}
+
+/// A task that monitors the `waiters` and `queues` to determine when the
+/// persist system is no longer saturated.
+///
+/// Once the system is no longer saturated (as determined according to the
+/// documentation for [`PersistState`]), the [`PersistState`] is set to
+/// [`CurrentState::Ok`].
+async fn saturation_monitor_task<T>(
+ mut interval: Interval,
+ state: Arc<PersistState>,
+ queues: Vec<mpsc::Sender<T>>,
+) where
+ T: Send,
+{
+ loop {
+ // Wait before evaluating the state of the system.
+ interval.tick().await;
+
+ // INVARIANT: this task only ever runs when the system is saturated.
+ assert!(state.is_saturated());
+
+ // First check if any tasks are waiting to enqueue an item (an
+ // indication that one or more queues is full).
+ let n_waiting = state.waiting_to_enqueue.load(Ordering::SeqCst);
+ if n_waiting > 0 {
+ debug!(
+ n_waiting,
+ "waiting for outstanding persist jobs to be enqueued"
+ );
+ continue;
+ }
+
+ // No async task WAS currently waiting to enqueue a persist job when
+ // checking above, but one may want to immediately enqueue one now (or
+ // later).
+ //
+ // In order to minimise health flip-flopping, only mark the persist
+ // system as healthy once there is some capacity in the queues to accept
+ // new persist jobs. This avoids a queue having 1 slot free, only to be
+ // immediately filled and the system pause again.
+ //
+ // This check below ensures that all queues are at least half empty
+ // before marking the system as recovered.
+ let n_queues = queues
+ .iter()
+ .filter(|q| !has_sufficient_capacity(q.capacity(), q.max_capacity()))
+ .count();
+ if n_queues != 0 {
+ debug!(n_queues, "waiting for queues to drain");
+ continue;
+ }
+
+ // There are no outstanding enqueue waiters, and all queues are at half
+ // capacity or better.
+ info!("persist queue saturation reduced, resuming ingest");
+
+ // INVARIANT: there is only ever one task that monitors the queue state
+ // and transitions the persist state to OK, therefore this task is
+ // always the first to set the state to OK.
+ assert!(state.set(CurrentState::Ok));
+
+ // The task MUST immediately stop so any subsequent saturation is
+ // handled by the newly spawned task, upholding the above invariant.
+ return;
+ }
+}
+
+/// Returns true if `capacity` is sufficient to be considered ready for more
+/// requests to be enqueued.
+fn has_sufficient_capacity(capacity: usize, max_capacity: usize) -> bool {
+ // Did this fire? You have your arguments the wrong way around.
+ assert!(capacity <= max_capacity);
+
+ let want_at_least = (max_capacity + 1) / 2;
+ trace!(
+ available = capacity,
+ max = max_capacity,
+ want_at_least,
+ "evaluating queue backlog"
+ );
+
+ capacity >= want_at_least
+}
+
+#[cfg(test)]
+mod tests {
+ use std::sync::Arc;
+
+ use test_helpers::timeout::FutureTimeout;
+
+ use super::*;
+
+ const POLL_INTERVAL: Duration = Duration::from_millis(5);
+
+ #[test]
+ fn test_has_sufficient_capacity() {
+ // A queue of minimal depth (1).
+ //
+ // Validates there are no off-by-one errors.
+ assert!(!has_sufficient_capacity(0, 1));
+ assert!(has_sufficient_capacity(1, 1));
+
+ // Even queues
+ assert!(!has_sufficient_capacity(0, 2));
+ assert!(has_sufficient_capacity(1, 2));
+ assert!(has_sufficient_capacity(2, 2));
+
+ // Odd queues
+ assert!(!has_sufficient_capacity(0, 3));
+ assert!(!has_sufficient_capacity(1, 3));
+ assert!(has_sufficient_capacity(2, 3));
+ assert!(has_sufficient_capacity(3, 3));
+ }
+
+ /// Validate the state setters and getters are correct, and that only the
+ /// first thread that changes the state observes the "first=true" response.
+ #[test]
+ fn test_state_transitions() {
+ let s = PersistState::default();
+ assert_eq!(s.get(), CurrentState::Ok);
+ assert!(!s.is_saturated());
+
+ assert!(!s.set(CurrentState::Ok)); // Already OK
+ assert_eq!(s.get(), CurrentState::Ok);
+ assert!(!s.is_saturated());
+
+ assert!(!s.set(CurrentState::Ok)); // Already OK
+ assert_eq!(s.get(), CurrentState::Ok);
+ assert!(!s.is_saturated());
+
+ assert!(s.set(CurrentState::Saturated)); // First to change
+ assert!(s.is_saturated());
+ assert!(!s.set(CurrentState::Saturated)); // Not first
+ assert!(s.is_saturated());
+ assert_eq!(s.get(), CurrentState::Saturated);
+ assert!(s.is_saturated());
+
+ assert!(!s.set(CurrentState::Saturated)); // Not first
+ assert_eq!(s.get(), CurrentState::Saturated);
+ assert!(s.is_saturated());
+
+ assert!(s.set(CurrentState::Ok)); // First to change
+ assert_eq!(s.get(), CurrentState::Ok);
+ assert!(!s.is_saturated());
+ }
+
+ /// Ensure that the saturation evaluation checks for outstanding enqueue
+ /// waiters (as tracked by the [`WaitGuard`]).
+ #[tokio::test]
+ async fn test_saturation_recovery_enqueue_waiters() {
+ let s = Arc::new(PersistState::default());
+
+ // Use no queues to ensure only the waiters are blocking recovery.
+
+ assert!(!s.is_saturated());
+
+ let w1 = PersistState::set_saturated::<()>(Arc::clone(&s), vec![]);
+ let w2 = PersistState::set_saturated::<()>(Arc::clone(&s), vec![]);
+
+ assert!(s.is_saturated());
+
+ // Kill the actual recovery task (there must be one running at this
+ // point).
+ s.recovery_handle.lock().take().unwrap().abort();
+
+ // Spawn a replacement that ticks way more often to speed up the test.
+ let h = tokio::spawn(saturation_monitor_task::<()>(
+ tokio::time::interval(POLL_INTERVAL),
+ Arc::clone(&s),
+ vec![],
+ ));
+
+ // Drop a waiter and ensure the system is still saturated.
+ drop(w1);
+ assert!(s.is_saturated());
+
+ // Sleep a little to ensure it remains saturated with 1 outstanding
+ // waiter.
+ //
+ // This is false-negative racy - if this assert fires, there is a
+ // legitimate problem - one outstanding waiter should prevent the system
+ // from ever transitioning to a healthy state.
+ tokio::time::sleep(POLL_INTERVAL * 4).await;
+ assert!(s.is_saturated());
+
+ // Drop the other waiter.
+ drop(w2);
+
+ // Wait up to 5 seconds to observe the system recovery.
+ async {
+ loop {
+ if !s.is_saturated() {
+ return;
+ }
+ tokio::time::sleep(POLL_INTERVAL).await;
+ }
+ }
+ .with_timeout_panic(Duration::from_secs(5))
+ .await;
+
+ // Wait up to 60 seconds to observe the recovery task finish.
+ //
+ // The recovery task sets the system state as healthy, and THEN exits,
+ // so there exists a window of time where the system has passed the
+ // saturation check above, but the recovery task MAY still be running.
+ //
+ // By waiting an excessive duration of time, we ensure the task does
+ // indeed finish.
+ async {
+ loop {
+ if h.is_finished() {
+ return;
+ }
+ tokio::time::sleep(POLL_INTERVAL).await;
+ }
+ }
+ .with_timeout_panic(Duration::from_secs(60))
+ .await;
+
+ // No task panic occurred.
+ assert!(h.with_timeout_panic(Duration::from_secs(5)).await.is_ok());
+ assert!(!s.is_saturated());
+ }
+
+ /// Ensure that the saturation evaluation checks for free queue slots before
+ /// marking the system as healthy.
+ #[tokio::test]
+ async fn test_saturation_recovery_queue_capacity() {
+ let s = Arc::new(PersistState::default());
+
+ async fn fill(q: &mpsc::Sender<()>, times: usize) {
+ for _ in 0..times {
+ q.send(()).await.unwrap();
+ }
+ }
+
+ // Use no waiters to ensure only the queue slots are blocking recovery.
+
+ let (tx1, mut rx1) = mpsc::channel(5);
+ let (tx2, mut rx2) = mpsc::channel(5);
+
+ // Place some items in the queues
+ fill(&tx1, 3).await; // Over the threshold of 5/2 = 2.5, rounded down to 2.
+ fill(&tx2, 3).await; // Over the threshold of 5/2 = 2.5, rounded down to 2.
+
+ assert!(!s.is_saturated());
+ assert!(s.set(CurrentState::Saturated));
+ assert!(s.is_saturated());
+
+ // Spawn the recovery task directly, not via set_saturated() for
+ // simplicity - the test above asserts the task is started by a call to
+ // set_saturated().
+ let h = tokio::spawn(saturation_monitor_task::<()>(
+ tokio::time::interval(POLL_INTERVAL),
+ Arc::clone(&s),
+ vec![tx1, tx2],
+ ));
+
+ // Wait a little and ensure the state hasn't changed.
+ //
+ // While this could be a false negative, if this assert fires there is a
+ // legitimate problem.
+ tokio::time::sleep(POLL_INTERVAL * 4).await;
+ assert!(s.is_saturated());
+
+ // Drain one of the queues to below the saturation point.
+ rx1.recv().await.expect("no recovery task running");
+
+ // Wait a little and ensure the state still hasn't changed.
+ //
+ // While this could also be a false negative, if this assert fires there
+ // is a legitimate problem.
+ tokio::time::sleep(POLL_INTERVAL * 4).await;
+ assert!(s.is_saturated());
+
+ // Drain the remaining queue below the threshold for recovery.
+ rx2.recv().await.expect("no recovery task running");
+
+ // Wait up to 5 seconds to observe the system recovery.
+ async {
+ loop {
+ if !s.is_saturated() {
+ return;
+ }
+ tokio::time::sleep(POLL_INTERVAL).await;
+ }
+ }
+ .with_timeout_panic(Duration::from_secs(5))
+ .await;
+
+ // Wait up to 60 seconds to observe the recovery task finish.
+ //
+ // The recovery task sets the system state as healthy, and THEN exits,
+ // so there exists a window of time where the system has passed the
+ // saturation check above, but the recovery task MAY still be running.
+ //
+ // By waiting an excessive duration of time, we ensure the task does
+ // indeed finish.
+ async {
+ loop {
+ if h.is_finished() {
+ return;
+ }
+ tokio::time::sleep(POLL_INTERVAL).await;
+ }
+ }
+ .with_timeout_panic(Duration::from_secs(60))
+ .await;
+
+ // No task panic occurred.
+ assert!(h.with_timeout_panic(Duration::from_secs(5)).await.is_ok());
+ assert!(!s.is_saturated());
+ }
+}
diff --git a/ingester2/src/persist/handle.rs b/ingester2/src/persist/handle.rs
index eb993e0dac..bc49f0caf2 100644
--- a/ingester2/src/persist/handle.rs
+++ b/ingester2/src/persist/handle.rs
@@ -2,22 +2,21 @@ use std::sync::Arc;
use iox_catalog::interface::Catalog;
use iox_query::exec::Executor;
-use observability_deps::tracing::{debug, info};
+use observability_deps::tracing::*;
use parking_lot::Mutex;
use parquet_file::storage::ParquetStorage;
use sharder::JumpHash;
-use thiserror::Error;
-use tokio::sync::{mpsc, oneshot};
+use tokio::sync::{
+ mpsc::{self, error::TrySendError},
+ oneshot,
+};
use crate::buffer_tree::partition::{persisting::PersistingData, PartitionData};
-use super::context::{Context, PersistRequest};
-
-#[derive(Debug, Error)]
-pub(crate) enum PersistError {
- #[error("persist queue is full")]
- QueueFull,
-}
+use super::{
+ backpressure::PersistState,
+ context::{Context, PersistRequest},
+};
/// A persistence task submission handle.
///
@@ -90,6 +89,28 @@ pub(crate) enum PersistError {
/// always placed in the same worker queue, ensuring they execute sequentially.
///
/// [`SortKey`]: schema::sort::SortKey
+///
+/// # Overload & Back-pressure
+///
+/// The persist queue is bounded, but the caller must prevent new persist jobs
+/// from being generated and blocked whilst waiting to add the persist job to
+/// the bounded queue, otherwise the system is effectively unbounded. If an
+/// unbounded number of threads block on [`PersistHandle::queue_persist()`]
+/// waiting to successfully enqueue the job, then there is no bound on
+/// outstanding persist jobs at all.
+///
+/// To prevent this, the persistence system exposes an indicator of saturation
+/// (readable via the [`PersistState`]) that the caller MUST use to prevent the
+/// generation of new persist tasks (for example, by blocking any further
+/// ingest) on a best-effort basis.
+///
+/// When the persist queue is saturated, the [`PersistState::is_saturated()`]
+/// returns true. Once the backlog of persist jobs is reduced, the
+/// [`PersistState`] is switched back to a healthy state and new persist jobs
+/// may be generated as normal.
+///
+/// For details of the exact saturation detection & recovery logic, see
+/// [`PersistState`].
#[derive(Debug, Clone)]
pub(crate) struct PersistHandle {
/// THe state/dependencies shared across all worker tasks.
@@ -105,6 +126,9 @@ pub(crate) struct PersistHandle {
/// Task handles for the worker tasks, aborted on drop of all
/// [`PersistHandle`] instances.
tasks: Arc<Vec<AbortOnDrop<()>>>,
+
+ /// Records the saturation state of the persist system.
+ persist_state: Arc<PersistState>,
}
impl PersistHandle {
@@ -115,7 +139,7 @@ impl PersistHandle {
exec: Arc<Executor>,
store: ParquetStorage,
catalog: Arc<dyn Catalog>,
- ) -> Self {
+ ) -> (Self, Arc<PersistState>) {
assert_ne!(n_workers, 0, "must run at least 1 persist worker");
assert_ne!(worker_queue_depth, 0, "worker queue depth must be non-zero");
@@ -143,11 +167,18 @@ impl PersistHandle {
assert!(!tasks.is_empty());
- Self {
- inner,
- persist_queues: Arc::new(JumpHash::new(tx_handles)),
- tasks: Arc::new(tasks),
- }
+ // Initialise the saturation state as "not saturated".
+ let persist_state = Default::default();
+
+ (
+ Self {
+ inner,
+ persist_queues: Arc::new(JumpHash::new(tx_handles)),
+ tasks: Arc::new(tasks),
+ persist_state: Arc::clone(&persist_state),
+ },
+ persist_state,
+ )
}
/// Place `data` from `partition` into the persistence queue.
@@ -159,12 +190,12 @@ impl PersistHandle {
/// key will be updated, and [`PartitionData::mark_persisted()`] is called
/// with `data`.
///
- /// Once all persistence related tasks are complete, the returned channel
- /// publishes a notification.
+ /// Once all persistence related tasks for `data` are complete, the returned
+ /// channel publishes a notification.
///
/// # Panics
///
- /// Panics if one or more worker threads have stopped.
+ /// Panics if the assigned persist worker task has stopped.
///
/// Panics (asynchronously) if the [`PartitionData`]'s sort key is updated
/// between persistence starting and ending.
@@ -180,14 +211,40 @@ impl PersistHandle {
"enqueuing persistence task"
);
- // Build the persist task request
+ // Build the persist task request.
let (r, notify) = PersistRequest::new(partition, data);
- self.persist_queues
- .hash(r.partition_id())
- .send(r)
- .await
- .expect("persist worker has stopped");
+ // Select a worker to dispatch this request to.
+ let queue = self.persist_queues.hash(r.partition_id());
+
+ // Try and enqueue the persist task immediately.
+ match queue.try_send(r) {
+ Ok(()) => {} // Success!
+ Err(TrySendError::Closed(_)) => panic!("persist worker has stopped"),
+ Err(TrySendError::Full(r)) => {
+ // The worker's queue is full. Mark the persist system as being
+ // saturated, requiring some time to clear outstanding persist
+ // operations.
+ //
+ // The returned guard MUST be held during the send() await
+ // below.
+ let _guard = PersistState::set_saturated(
+ Arc::clone(&self.persist_state),
+ self.persist_queues.shards().to_owned(),
+ );
+
+ // TODO(test): the guard is held over the await point below
+
+ // Park this task waiting to enqueue the persist whilst holding
+ // the guard above.
+ //
+ // If this send() is aborted, the guard is dropped and the
+ // number of waiters is decremented. If the send() is
+ // successful, the guard is dropped immediately when leaving
+ // this scope.
+ queue.send(r).await.expect("persist worker stopped");
+ }
+ };
notify
}
diff --git a/ingester2/src/persist/mod.rs b/ingester2/src/persist/mod.rs
index 1ae9092259..a5b0c433ae 100644
--- a/ingester2/src/persist/mod.rs
+++ b/ingester2/src/persist/mod.rs
@@ -1,3 +1,4 @@
+pub(crate) mod backpressure;
pub(super) mod compact;
mod context;
pub(crate) mod handle;
diff --git a/ingester2/src/server/grpc.rs b/ingester2/src/server/grpc.rs
index 0f80fbfd99..96b3dc9dc0 100644
--- a/ingester2/src/server/grpc.rs
+++ b/ingester2/src/server/grpc.rs
@@ -16,6 +16,7 @@ use service_grpc_catalog::CatalogService;
use crate::{
dml_sink::DmlSink,
init::IngesterRpcInterface,
+ persist::backpressure::PersistState,
query::{response::QueryResponse, QueryExec},
timestamp_oracle::TimestampOracle,
};
@@ -32,6 +33,7 @@ pub(crate) struct GrpcDelegate<D, Q> {
dml_sink: Arc<D>,
query_exec: Arc<Q>,
timestamp: Arc<TimestampOracle>,
+ persist_state: Arc<PersistState>,
catalog: Arc<dyn Catalog>,
metrics: Arc<metric::Registry>,
}
@@ -46,6 +48,7 @@ where
dml_sink: Arc<D>,
query_exec: Arc<Q>,
timestamp: Arc<TimestampOracle>,
+ persist_state: Arc<PersistState>,
catalog: Arc<dyn Catalog>,
metrics: Arc<metric::Registry>,
) -> Self {
@@ -53,6 +56,7 @@ where
dml_sink,
query_exec,
timestamp,
+ persist_state,
catalog,
metrics,
}
@@ -84,6 +88,7 @@ where
WriteServiceServer::new(RpcWrite::new(
Arc::clone(&self.dml_sink),
Arc::clone(&self.timestamp),
+ Arc::clone(&self.persist_state),
))
}
diff --git a/ingester2/src/server/grpc/rpc_write.rs b/ingester2/src/server/grpc/rpc_write.rs
index fa8531dc8b..cb63ede31d 100644
--- a/ingester2/src/server/grpc/rpc_write.rs
+++ b/ingester2/src/server/grpc/rpc_write.rs
@@ -9,10 +9,11 @@ use mutable_batch::writer;
use mutable_batch_pb::decode::decode_database_batch;
use observability_deps::tracing::*;
use thiserror::Error;
-use tonic::{Request, Response};
+use tonic::{Code, Request, Response};
use crate::{
dml_sink::{DmlError, DmlSink},
+ persist::backpressure::PersistState,
timestamp_oracle::TimestampOracle,
TRANSITION_SHARD_INDEX,
};
@@ -36,15 +37,25 @@ enum RpcError {
/// The serialised write payload could not be read.
#[error(transparent)]
Decode(mutable_batch_pb::decode::Error),
+
+ /// The ingester's [`PersistState`] is marked as
+ /// [`CurrentState::Saturated`]. See [`PersistHandle`] for documentation.
+ ///
+ /// [`PersistHandle`]: crate::persist::handle::PersistHandle
+ /// [`CurrentState::Saturated`]:
+ /// crate::persist::backpressure::CurrentState::Saturated
+ #[error("ingester overloaded")]
+ PersistSaturated,
}
impl From<RpcError> for tonic::Status {
fn from(e: RpcError) -> Self {
- match e {
- RpcError::Decode(_) | RpcError::NoPayload | RpcError::NoTables => {
- Self::invalid_argument(e.to_string())
- }
- }
+ let code = match e {
+ RpcError::Decode(_) | RpcError::NoPayload | RpcError::NoTables => Code::InvalidArgument,
+ RpcError::PersistSaturated => Code::ResourceExhausted,
+ };
+
+ Self::new(code, e.to_string())
}
}
@@ -94,14 +105,22 @@ fn map_write_error(e: mutable_batch::Error) -> tonic::Status {
pub(crate) struct RpcWrite<T> {
sink: T,
timestamp: Arc<TimestampOracle>,
+ persist_state: Arc<PersistState>,
}
impl<T> RpcWrite<T> {
/// Instantiate a new [`RpcWrite`] that pushes [`DmlOperation`] instances
/// into `sink`.
- #[allow(dead_code)]
- pub(crate) fn new(sink: T, timestamp: Arc<TimestampOracle>) -> Self {
- Self { sink, timestamp }
+ pub(crate) fn new(
+ sink: T,
+ timestamp: Arc<TimestampOracle>,
+ persist_state: Arc<PersistState>,
+ ) -> Self {
+ Self {
+ sink,
+ timestamp,
+ persist_state,
+ }
}
}
@@ -115,6 +134,27 @@ where
&self,
request: Request<proto::WriteRequest>,
) -> Result<Response<proto::WriteResponse>, tonic::Status> {
+ // Drop writes if the persistence is saturated.
+ //
+ // This gives the ingester a chance to reduce the backlog of persistence
+ // tasks, which in turn reduces the memory usage of the ingester. If
+ // ingest was to continue unabated, an OOM would be inevitable.
+ //
+ // If you're seeing these error responses in RPC requests, you need to
+ // either:
+ //
+ // * Increase the persist queue depth if there is a decent headroom of
+ // unused RAM allocated to the ingester.
+ // * Increase the RAM allocation, and increase the persist queue
+ // depth proportionally.
+ // * Deploy more ingesters to reduce the request load on any single
+ // ingester.
+ //
+ if self.persist_state.is_saturated() {
+ return Err(RpcError::PersistSaturated)?;
+ }
+
+ // Extract the remote address for debugging.
let remote_addr = request
.remote_addr()
.map(|v| v.to_string())
@@ -188,7 +228,7 @@ mod tests {
};
use super::*;
- use crate::dml_sink::mock_sink::MockDmlSink;
+ use crate::{dml_sink::mock_sink::MockDmlSink, persist::backpressure::CurrentState};
const NAMESPACE_ID: NamespaceId = NamespaceId::new(42);
const PARTITION_KEY: &str = "bananas";
@@ -208,7 +248,7 @@ mod tests {
MockDmlSink::default().with_apply_return(vec![$sink_ret]),
);
let timestamp = Arc::new(TimestampOracle::new(0));
- let handler = RpcWrite::new(Arc::clone(&mock), timestamp);
+ let handler = RpcWrite::new(Arc::clone(&mock), timestamp, Default::default());
let ret = handler
.write(Request::new($request))
@@ -319,7 +359,7 @@ mod tests {
async fn test_rpc_write_ordered_timestamps() {
let mock = Arc::new(MockDmlSink::default().with_apply_return(vec![Ok(()), Ok(())]));
let timestamp = Arc::new(TimestampOracle::new(0));
- let handler = RpcWrite::new(Arc::clone(&mock), timestamp);
+ let handler = RpcWrite::new(Arc::clone(&mock), timestamp, Default::default());
let req = proto::WriteRequest {
payload: Some(DatabaseBatch {
@@ -366,4 +406,58 @@ mod tests {
}
);
}
+
+ /// Validate that the persist system being marked as saturated prevents the
+ /// ingester from accepting new writes.
+ #[tokio::test]
+ async fn test_rpc_write_persist_saturation() {
+ let mock = Arc::new(MockDmlSink::default().with_apply_return(vec![Ok(()), Ok(())]));
+ let timestamp = Arc::new(TimestampOracle::new(0));
+ let persist_state = Default::default();
+ let handler = RpcWrite::new(Arc::clone(&mock), timestamp, Arc::clone(&persist_state));
+
+ let req = proto::WriteRequest {
+ payload: Some(DatabaseBatch {
+ database_id: NAMESPACE_ID.get(),
+ partition_key: PARTITION_KEY.to_string(),
+ table_batches: vec![TableBatch {
+ table_id: 42,
+ columns: vec![Column {
+ column_name: "time".to_string(),
+ semantic_type: SemanticType::Time.into(),
+ values: Some(Values {
+ i64_values: vec![4242],
+ f64_values: vec![],
+ u64_values: vec![],
+ string_values: vec![],
+ bool_values: vec![],
+ bytes_values: vec![],
+ packed_string_values: None,
+ interned_string_values: None,
+ }),
+ null_mask: vec![0],
+ }],
+ row_count: 1,
+ }],
+ }),
+ };
+
+ handler
+ .write(Request::new(req.clone()))
+ .await
+ .expect("write should succeed");
+
+ persist_state.test_set_state(CurrentState::Saturated);
+
+ let err = handler
+ .write(Request::new(req))
+ .await
+ .expect_err("write should fail");
+
+ // Validate the error code returned to the user.
+ assert_eq!(err.code(), Code::ResourceExhausted);
+
+ // One write should have been passed through to the DML sinks.
+ assert_matches!(*mock.get_calls(), [DmlOperation::Write(_)]);
+ }
}
diff --git a/schema/src/sort.rs b/schema/src/sort.rs
index 4aa12b130e..3eb1c50a27 100644
--- a/schema/src/sort.rs
+++ b/schema/src/sort.rs
@@ -386,7 +386,7 @@ pub fn compute_sort_key<'a>(
builder = builder.with_col(TIME_COLUMN_NAME);
let sort_key = builder.build();
- debug!(?primary_key, ?sort_key, "Computed sort key");
+ debug!(?primary_key, ?sort_key, "computed sort key");
sort_key
}
@@ -519,7 +519,7 @@ pub fn adjust_sort_key_columns(
input_catalog_sort_key=?catalog_sort_key,
output_chunk_sort_key=?metadata_sort_key,
output_catalog_sort_key=?catalog_update,
- "Adjusted sort key");
+ "adjusted sort key");
(metadata_sort_key, catalog_update)
}
|
67903a4bf2d0bffd6c49e3ee933282a14081d19c
|
Dom Dwyer
|
2023-02-02 14:52:09
|
ingester2 WAL replay
|
Adds two metrics:
* Number of files replayed (counted at the start of, not completion)
* Number of applied ops
This will help identify when WAL replay is happening (an indication of
an ungraceful shutdown & potential temporary read unavailability).
| null |
feat(metrics): ingester2 WAL replay
Adds two metrics:
* Number of files replayed (counted at the start of, not completion)
* Number of applied ops
This will help identify when WAL replay is happening (an indication of
an ungraceful shutdown & potential temporary read unavailability).
|
diff --git a/ingester2/benches/wal.rs b/ingester2/benches/wal.rs
index 5d197b78d9..cb81c310d3 100644
--- a/ingester2/benches/wal.rs
+++ b/ingester2/benches/wal.rs
@@ -64,9 +64,14 @@ fn wal_replay_bench(c: &mut Criterion) {
let persist = ingester2::persist::queue::benches::MockPersistQueue::default();
// Replay the wal into the NOP.
- ingester2::benches::replay(&wal, &sink, Arc::new(persist))
- .await
- .expect("WAL replay error");
+ ingester2::benches::replay(
+ &wal,
+ &sink,
+ Arc::new(persist),
+ &metric::Registry::default(),
+ )
+ .await
+ .expect("WAL replay error");
},
// Use the WAL for one test invocation only, and re-create a new one
// for the next iteration.
diff --git a/ingester2/src/init.rs b/ingester2/src/init.rs
index e179c22915..c2a329be42 100644
--- a/ingester2/src/init.rs
+++ b/ingester2/src/init.rs
@@ -320,9 +320,10 @@ where
let wal = Wal::new(wal_directory).await.map_err(InitError::WalInit)?;
// Replay the WAL log files, if any.
- let max_sequence_number = wal_replay::replay(&wal, &buffer, Arc::clone(&persist_handle))
- .await
- .map_err(|e| InitError::WalReplay(e.into()))?;
+ let max_sequence_number =
+ wal_replay::replay(&wal, &buffer, Arc::clone(&persist_handle), &metrics)
+ .await
+ .map_err(|e| InitError::WalReplay(e.into()))?;
// Build the chain of DmlSink that forms the write path.
let write_path = DmlSinkInstrumentation::new(
diff --git a/ingester2/src/init/wal_replay.rs b/ingester2/src/init/wal_replay.rs
index 2cba5dd743..a8bdad10ab 100644
--- a/ingester2/src/init/wal_replay.rs
+++ b/ingester2/src/init/wal_replay.rs
@@ -1,6 +1,7 @@
use data_types::{NamespaceId, PartitionKey, Sequence, SequenceNumber, TableId};
use dml::{DmlMeta, DmlOperation, DmlWrite};
use generated_types::influxdata::iox::wal::v1::sequenced_wal_op::Op;
+use metric::U64Counter;
use mutable_batch_pb::decode::decode_database_batch;
use observability_deps::tracing::*;
use std::time::Instant;
@@ -47,6 +48,7 @@ pub async fn replay<T, P>(
wal: &Wal,
sink: &T,
persist: P,
+ metrics: &metric::Registry,
) -> Result<Option<SequenceNumber>, WalReplayError>
where
T: DmlSink + PartitionIter,
@@ -62,6 +64,24 @@ where
return Ok(None);
}
+ // Initialise metrics to track the progress of the WAL replay.
+ //
+ // The file count tracks the number of WAL files that have started
+ // replaying, as opposed to finished replaying - this gives us the ability
+ // to monitor WAL replays that hang or otherwise go wrong.
+ let file_count_metric = metrics
+ .register_metric::<U64Counter>(
+ "ingester_wal_replay_files_started",
+ "Number of WAL files that have started to be replayed",
+ )
+ .recorder(&[]);
+ let op_count_metric = metrics
+ .register_metric::<U64Counter>(
+ "ingester_wal_replay_ops",
+ "Number of operations successfully replayed from the WAL",
+ )
+ .recorder(&[]);
+
let n_files = files.len();
info!(n_files, "found wal files for replay");
@@ -74,6 +94,8 @@ where
// Map 0-based iter index to 1 based file count
let file_number = index + 1;
+ file_count_metric.inc(1);
+
// Read the segment
let reader = wal
.reader_for_segment(file.id())
@@ -90,7 +112,7 @@ where
);
// Replay this segment file
- match replay_file(reader, sink).await? {
+ match replay_file(reader, sink, &op_count_metric).await? {
v @ Some(_) => max_sequence = max_sequence.max(v),
None => {
// This file was empty and should be deleted.
@@ -159,6 +181,7 @@ where
async fn replay_file<T>(
mut file: wal::ClosedSegmentFileReader,
sink: &T,
+ op_count_metric: &U64Counter,
) -> Result<Option<SequenceNumber>, WalReplayError>
where
T: DmlSink,
@@ -227,6 +250,8 @@ where
sink.apply(DmlOperation::Write(op))
.await
.map_err(Into::<DmlError>::into)?;
+
+ op_count_metric.inc(1);
}
}
}
@@ -238,6 +263,7 @@ mod tests {
use assert_matches::assert_matches;
use async_trait::async_trait;
use data_types::{NamespaceId, PartitionId, PartitionKey, ShardId, TableId};
+ use metric::{Attributes, Metric};
use parking_lot::Mutex;
use wal::Wal;
@@ -384,7 +410,8 @@ mod tests {
partitions: vec![Arc::new(Mutex::new(partition))],
};
- let max_sequence_number = replay(&wal, &mock_iter, Arc::clone(&persist))
+ let metrics = metric::Registry::default();
+ let max_sequence_number = replay(&wal, &mock_iter, Arc::clone(&persist), &metrics)
.await
.expect("failed to replay WAL");
@@ -416,5 +443,21 @@ mod tests {
.expect("failed to initialise WAL");
assert_eq!(wal.closed_segments().len(), 1);
+
+ // Validate the expected metric values were populated.
+ let files = metrics
+ .get_instrument::<Metric<U64Counter>>("ingester_wal_replay_files_started")
+ .expect("file counter not found")
+ .get_observer(&Attributes::from([]))
+ .expect("attributes not found")
+ .fetch();
+ assert_eq!(files, 2);
+ let ops = metrics
+ .get_instrument::<Metric<U64Counter>>("ingester_wal_replay_ops")
+ .expect("file counter not found")
+ .get_observer(&Attributes::from([]))
+ .expect("attributes not found")
+ .fetch();
+ assert_eq!(ops, 3);
}
}
|
73339cfc574b23d312de450badd77701984955a5
|
Marco Neumann
|
2023-07-27 12:04:56
|
remove sqlx "used" metrics (#8336)
|
PR #8327 introduced a bunch of metrics for the sqlx connection pool. One
of the metrics was the "used" metrics that was supposed to count
"currently in use" connection. In prod however this metric underflows to
a very large integer. It seems that "acquire" callback is only used by sqlx for
re-used connections (i.e. for the transition from "idle" to "used").
Now we could try to work around it but since there is no "close
connection" callback, I doubt it it possible to do the accurately.
Luckily though we don't really need that counter. sqlx already offers
"active" (defined as idle + used) and "idle", so getting "used" is just
the difference. I removed the "used" metric nevertheless because
"active" and "idle" are read independently from each other (based on atomic
integers) and are NOT guaranteed to be in-sync. Calculating the
difference within IOx however would give the illusion that they are. So
I leave this to the dashboard / alert / whatever, because there it is
usually understood that metrics are samples and may be out of sync for a
very short time.
A nice side effect of this change is that it simplifies the code quite a
bit.
|
Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
|
fix: remove sqlx "used" metrics (#8336)
PR #8327 introduced a bunch of metrics for the sqlx connection pool. One
of the metrics was the "used" metrics that was supposed to count
"currently in use" connection. In prod however this metric underflows to
a very large integer. It seems that "acquire" callback is only used by sqlx for
re-used connections (i.e. for the transition from "idle" to "used").
Now we could try to work around it but since there is no "close
connection" callback, I doubt it it possible to do the accurately.
Luckily though we don't really need that counter. sqlx already offers
"active" (defined as idle + used) and "idle", so getting "used" is just
the difference. I removed the "used" metric nevertheless because
"active" and "idle" are read independently from each other (based on atomic
integers) and are NOT guaranteed to be in-sync. Calculating the
difference within IOx however would give the illusion that they are. So
I leave this to the dashboard / alert / whatever, because there it is
usually understood that metrics are samples and may be out of sync for a
very short time.
A nice side effect of this change is that it simplifies the code quite a
bit.
Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
|
diff --git a/iox_catalog/src/postgres.rs b/iox_catalog/src/postgres.rs
index fa8f6b5bbf..9714a5496f 100644
--- a/iox_catalog/src/postgres.rs
+++ b/iox_catalog/src/postgres.rs
@@ -38,7 +38,7 @@ use sqlx::{
};
use sqlx_hotswap_pool::HotSwapPool;
use std::borrow::Cow;
-use std::collections::{BTreeMap, HashSet};
+use std::collections::HashSet;
use std::sync::atomic::{AtomicU64, Ordering};
use std::{collections::HashMap, fmt::Display, str::FromStr, sync::Arc, time::Duration};
@@ -339,19 +339,9 @@ struct PoolMetricsInner {
pool_id_gen: AtomicU64,
/// Set of known pools and their ID labels.
- pools: RwLock<BTreeMap<Arc<str>, KnownPool>>,
-}
-
-/// A registered sqlx pool.
-#[derive(Debug, Default)]
-struct KnownPool {
- /// Number of currently used connection.
- used: AtomicU64,
-
- /// sqlx pool.
///
/// Note: The pool is internally ref-counted via an [`Arc`]. Holding a reference does NOT prevent it from being closed.
- pool: RwLock<Option<sqlx::Pool<Postgres>>>,
+ pools: RwLock<Vec<(Arc<str>, sqlx::Pool<Postgres>)>>,
}
impl PoolMetrics {
@@ -360,54 +350,21 @@ impl PoolMetrics {
metrics.register_instrument("iox_catalog_postgres", Self::default)
}
- /// Generate new pool ID.
- ///
- /// Is is separate from [`register_pool`](Self::register_pool) because of the initialization dance that is required
- /// to set up a pool with the right [`before_acquire`](PgPoolOptions::before_acquire) and
- /// [`after_release`](PgPoolOptions::after_release) callbacks.
- fn new_pool_id(&self) -> Arc<str> {
+ /// Register a new pool.
+ fn register_pool(&self, pool: sqlx::Pool<Postgres>) {
let id = self
.state
.pool_id_gen
.fetch_add(1, Ordering::SeqCst)
.to_string()
.into();
- self.state
- .pools
- .write()
- .insert(Arc::clone(&id), KnownPool::default());
- id
- }
-
- /// Acquire connection.
- fn acquire(&self, id: &str) {
- let pools = self.state.pools.read();
- let Some(p) = pools.get(id) else {return};
- p.used.fetch_add(1, Ordering::SeqCst);
- }
-
- /// Release connection.
- fn release(&self, id: &str) {
- let pools = self.state.pools.read();
- let Some(p) = pools.get(id) else {return};
- p.used.fetch_sub(1, Ordering::SeqCst);
- }
-
- /// Register a new pool.
- fn register_pool(&self, id: &str, pool: sqlx::Pool<Postgres>) {
- let pools = self.state.pools.read();
- let Some(p) = pools.get(id) else {return};
- let mut p = p.pool.write();
- assert!(p.is_none(), "Pool with same ID already known");
- *p = Some(pool);
+ let mut pools = self.state.pools.write();
+ pools.push((id, pool));
}
/// Remove closed pools from given list.
- fn clean_pools(pools: &mut BTreeMap<Arc<str>, KnownPool>) {
- pools.retain(|_id, p| match p.pool.read().as_ref() {
- Some(p) => !p.is_closed(),
- None => true,
- });
+ fn clean_pools(pools: &mut Vec<(Arc<str>, sqlx::Pool<Postgres>)>) {
+ pools.retain(|(_id, p)| !p.is_closed());
}
}
@@ -433,10 +390,7 @@ impl Instrument for PoolMetrics {
"Number of connections within the postgres connection pool that sqlx uses",
MetricKind::U64Gauge,
);
- for (id, pool) in pools.iter() {
- let p = pool.pool.read();
- let Some(p) = p.as_ref() else {continue;};
-
+ for (id, p) in pools.iter() {
reporter.report_observation(
&Attributes::from([
("pool_id", Cow::Owned(id.as_ref().to_owned())),
@@ -465,13 +419,6 @@ impl Instrument for PoolMetrics {
]),
metric::Observation::U64Gauge(p.options().get_min_connections() as u64),
);
- reporter.report_observation(
- &Attributes::from([
- ("pool_id", Cow::Owned(id.as_ref().to_owned())),
- ("state", Cow::Borrowed("used")),
- ]),
- metric::Observation::U64Gauge(pool.used.load(Ordering::SeqCst)),
- );
}
reporter.finish_metric();
@@ -495,37 +442,15 @@ async fn new_raw_pool(
// the default is INFO, which is frankly surprising.
.log_statements(log::LevelFilter::Trace);
- let pool_id = metrics.new_pool_id();
-
let app_name = options.app_name.clone();
let app_name2 = options.app_name.clone(); // just to log below
let schema_name = options.schema_name.clone();
- let metrics_captured_1 = metrics.clone();
- let metrics_captured_2 = metrics.clone();
- let id_captured_1 = Arc::clone(&pool_id);
- let id_captured_2 = Arc::clone(&pool_id);
let pool = PgPoolOptions::new()
.min_connections(1)
.max_connections(options.max_conns)
.acquire_timeout(options.connect_timeout)
.idle_timeout(options.idle_timeout)
.test_before_acquire(true)
- .before_acquire(move |_c, _meta| {
- let metrics = metrics_captured_1.clone();
- let pool_id = Arc::clone(&id_captured_1);
- Box::pin(async move {
- metrics.acquire(&pool_id);
- Ok(true)
- })
- })
- .after_release(move |_c, _meta| {
- let metrics = metrics_captured_2.clone();
- let pool_id = Arc::clone(&id_captured_2);
- Box::pin(async move {
- metrics.release(&pool_id);
- Ok(true)
- })
- })
.after_connect(move |c, _meta| {
let app_name = app_name.to_owned();
let schema_name = schema_name.to_owned();
@@ -561,7 +486,7 @@ async fn new_raw_pool(
// name for cross-correlation between Conductor logs & database connections.
info!(application_name=%app_name2, "connected to config store");
- metrics.register_pool(&pool_id, pool.clone());
+ metrics.register_pool(pool.clone());
Ok(pool)
}
|
1b8b3ae4c3628f676be3b91f4616e63b014b111f
|
Marco Neumann
|
2023-06-30 10:27:30
|
bundle projection schema calculation (#8108)
|
* refactor: convert projection mask earlier
* refactor: bundle projection schema calculation
Same as #8102 but for the projected schema. This now has a nice side
effect:
1. there is no longer a per chunk cache lookup
2. there is no longer ANY per chunk async computation
3. we no longer need an early pruning stage for the chunks (we've used
to do that so we can throw away chunks before doing the more
expensive part of the chunk creation)
This nicely streamlines and simplifies the code.
---------
|
Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
|
refactor: bundle projection schema calculation (#8108)
* refactor: convert projection mask earlier
* refactor: bundle projection schema calculation
Same as #8102 but for the projected schema. This now has a nice side
effect:
1. there is no longer a per chunk cache lookup
2. there is no longer ANY per chunk async computation
3. we no longer need an early pruning stage for the chunks (we've used
to do that so we can throw away chunks before doing the more
expensive part of the chunk creation)
This nicely streamlines and simplifies the code.
---------
Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
|
diff --git a/Cargo.lock b/Cargo.lock
index 2fc4913dbd..26c93efdcb 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -4395,6 +4395,7 @@ dependencies = [
"datafusion_util",
"futures",
"generated_types",
+ "hashbrown 0.14.0",
"influxdb_iox_client",
"ingester_query_grpc",
"insta",
diff --git a/querier/Cargo.toml b/querier/Cargo.toml
index 8162899f6d..20ae5b31e7 100644
--- a/querier/Cargo.toml
+++ b/querier/Cargo.toml
@@ -17,6 +17,7 @@ data_types = { path = "../data_types" }
datafusion = { workspace = true }
datafusion_util = { path = "../datafusion_util" }
futures = "0.3"
+hashbrown = { version = "0.14.0" }
influxdb_iox_client = { path = "../influxdb_iox_client" }
iox_catalog = { path = "../iox_catalog" }
iox_query = { path = "../iox_query" }
diff --git a/querier/src/cache/projected_schema.rs b/querier/src/cache/projected_schema.rs
index 70fe960074..d77ab21ba7 100644
--- a/querier/src/cache/projected_schema.rs
+++ b/querier/src/cache/projected_schema.rs
@@ -36,13 +36,13 @@ impl CacheKey {
/// Create new key.
///
/// This normalizes `projection`.
- fn new(table_id: TableId, mut projection: Vec<ColumnId>) -> Self {
+ fn new(table_id: TableId, mut projection: Box<[ColumnId]>) -> Self {
// normalize column order
projection.sort();
Self {
table_id,
- projection: projection.into(),
+ projection,
}
}
@@ -141,7 +141,7 @@ impl ProjectedSchemaCache {
pub async fn get(
&self,
table: Arc<CachedTable>,
- projection: Vec<ColumnId>,
+ projection: Box<[ColumnId]>,
span: Option<Span>,
) -> Schema {
let key = CacheKey::new(table.id, projection);
@@ -249,7 +249,7 @@ mod tests {
let projection_1 = cache
.get(
Arc::clone(&table_1a),
- vec![ColumnId::new(1), ColumnId::new(2)],
+ [ColumnId::new(1), ColumnId::new(2)].into(),
None,
)
.await;
@@ -259,7 +259,7 @@ mod tests {
let projection_2 = cache
.get(
Arc::clone(&table_1a),
- vec![ColumnId::new(1), ColumnId::new(2)],
+ [ColumnId::new(1), ColumnId::new(2)].into(),
None,
)
.await;
@@ -269,7 +269,7 @@ mod tests {
let projection_3 = cache
.get(
Arc::clone(&table_1b),
- vec![ColumnId::new(1), ColumnId::new(2)],
+ [ColumnId::new(1), ColumnId::new(2)].into(),
None,
)
.await;
@@ -279,7 +279,7 @@ mod tests {
let projection_4 = cache
.get(
Arc::clone(&table_1a),
- vec![ColumnId::new(2), ColumnId::new(1)],
+ [ColumnId::new(2), ColumnId::new(1)].into(),
None,
)
.await;
@@ -290,7 +290,7 @@ mod tests {
let projection_5 = cache
.get(
Arc::clone(&table_1a),
- vec![ColumnId::new(1), ColumnId::new(3)],
+ [ColumnId::new(1), ColumnId::new(3)].into(),
None,
)
.await;
@@ -300,7 +300,7 @@ mod tests {
let projection_6 = cache
.get(
Arc::clone(&table_2a),
- vec![ColumnId::new(1), ColumnId::new(2)],
+ [ColumnId::new(1), ColumnId::new(2)].into(),
None,
)
.await;
@@ -311,7 +311,7 @@ mod tests {
let projection_7 = cache
.get(
Arc::clone(&table_1a),
- vec![ColumnId::new(1), ColumnId::new(2)],
+ [ColumnId::new(1), ColumnId::new(2)].into(),
None,
)
.await;
diff --git a/querier/src/parquet/creation.rs b/querier/src/parquet/creation.rs
index 09c2a85eae..c28618a05c 100644
--- a/querier/src/parquet/creation.rs
+++ b/querier/src/parquet/creation.rs
@@ -1,25 +1,18 @@
-use std::{
- collections::{HashMap, HashSet},
- sync::Arc,
-};
+use std::{collections::HashMap, sync::Arc};
-use data_types::{ChunkId, ChunkOrder, ColumnId, ParquetFile, PartitionId, TimestampMinMax};
+use data_types::{ChunkId, ChunkOrder, ColumnId, ParquetFile, PartitionId};
use futures::StreamExt;
+use hashbrown::HashSet;
use iox_catalog::interface::Catalog;
-use iox_query::pruning::prune_summaries;
-use observability_deps::tracing::debug;
use parquet_file::chunk::ParquetChunk;
-use predicate::Predicate;
use rand::{rngs::StdRng, seq::SliceRandom, SeedableRng};
-use schema::sort::SortKey;
+use schema::{sort::SortKey, Schema};
use trace::span::{Span, SpanRecorder};
use uuid::Uuid;
use crate::{
cache::{namespace::CachedTable, partition::CachedPartition, CatalogCache},
- df_stats::create_chunk_statistics,
parquet::QuerierParquetChunkMeta,
- table::MetricPruningObserver,
CONCURRENT_CHUNK_CREATION_JOBS,
};
@@ -63,160 +56,108 @@ impl ChunkAdapter {
&self,
cached_table: Arc<CachedTable>,
files: Arc<[Arc<ParquetFile>]>,
- predicate: &Predicate,
- early_pruning_observer: MetricPruningObserver,
cached_partitions: &HashMap<PartitionId, CachedPartition>,
span: Option<Span>,
) -> Vec<QuerierParquetChunk> {
let span_recorder = SpanRecorder::new(span);
- // throw out files that belong to removed partitions
- let files = files
- .iter()
- .filter(|f| cached_partitions.contains_key(&f.partition_id))
- .cloned()
- .collect::<Vec<_>>();
-
- let chunk_stats: Vec<_> = {
- let _span_recorder = span_recorder.child("create chunk stats");
+ // prepare files
+ let files = {
+ let _span_recorder = span_recorder.child("prepare files");
files
.iter()
- .map(|p| {
- let stats = Arc::new(create_chunk_statistics(
- p.row_count as u64,
- &cached_table.schema,
- TimestampMinMax {
- min: p.min_time.get(),
- max: p.max_time.get(),
- },
- &cached_partitions
- .get(&p.partition_id)
- .expect("filter files down to existing partitions")
- .column_ranges,
- ));
- let schema = Arc::clone(cached_table.schema.inner());
-
- (stats, schema)
- })
- .collect()
+ // throw out files that belong to removed partitions
+ .filter(|f| cached_partitions.contains_key(&f.partition_id))
+ .cloned()
+ .map(|f| PreparedParquetFile::new(f, &cached_table))
+ .collect::<Vec<_>>()
};
- // Prune on the most basic summary data (timestamps and column names) before trying to fully load the chunks
- let keeps = {
- let _span_recorder = span_recorder.child("prune summaries");
-
- match prune_summaries(&cached_table.schema, &chunk_stats, predicate) {
- Ok(keeps) => keeps,
- Err(reason) => {
- // Ignore pruning failures here - the chunk pruner should have already logged them.
- // Just skip pruning and gather all the metadata. We have another chance to prune them
- // once all the metadata is available
- debug!(?reason, "Could not prune before metadata fetch");
- vec![true; chunk_stats.len()]
- }
+ // find all projected schemas
+ let projections = {
+ let span_recorder = span_recorder.child("get projected schemas");
+ let mut projections: HashSet<Box<[ColumnId]>> = HashSet::with_capacity(files.len());
+ for f in &files {
+ projections.get_or_insert_owned(&f.col_list);
}
- };
-
- // Remove any unused parquet files up front to maximize the
- // concurrent catalog requests that could be outstanding
- let mut parquet_files = files
- .iter()
- .zip(keeps)
- .filter_map(|(pf, keep)| {
- if keep {
- Some(Arc::clone(pf))
- } else {
- early_pruning_observer
- .was_pruned_early(pf.row_count as u64, pf.file_size_bytes as u64);
- None
- }
- })
- .collect::<Vec<_>>();
-
- // de-correlate parquet files so that subsequent items likely don't block/wait on the same cache lookup
- // (they are likely ordered by partition)
- //
- // Note that we sort before shuffling to achieve a deterministic pseudo-random order
- {
- let _span_recorder = span_recorder.child("shuffle order");
+ // de-correlate projections so that subsequent items likely don't block/wait on the same cache lookup
+ // (they are likely ordered by partition)
+ //
+ // Note that we sort before shuffling to achieve a deterministic pseudo-random order
+ let mut projections = projections.into_iter().collect::<Vec<_>>();
+ projections.sort();
let mut rng = StdRng::seed_from_u64(cached_table.id.get() as u64);
- parquet_files.sort_by_key(|f| f.id);
- parquet_files.shuffle(&mut rng);
- }
+ projections.shuffle(&mut rng);
- {
- let span_recorder = span_recorder.child("create individual chunks");
-
- futures::stream::iter(parquet_files)
- .map(|cached_parquet_file| {
+ futures::stream::iter(projections)
+ .map(|column_ids| {
let span_recorder = &span_recorder;
let cached_table = Arc::clone(&cached_table);
- let cached_partition = cached_partitions
- .get(&cached_parquet_file.partition_id)
- .expect("filter files down to existing partitions");
async move {
- let span = span_recorder.child_span("new_chunk");
- self.new_chunk(cached_table, cached_parquet_file, cached_partition, span)
- .await
+ let schema = self
+ .catalog_cache
+ .projected_schema()
+ .get(
+ cached_table,
+ column_ids.clone(),
+ span_recorder.child_span("cache GET projected schema"),
+ )
+ .await;
+ (column_ids, schema)
}
})
.buffer_unordered(CONCURRENT_CHUNK_CREATION_JOBS)
- .filter_map(|x| async { x })
- .collect()
+ .collect::<HashMap<_, _>>()
.await
+ };
+
+ {
+ let _span_recorder = span_recorder.child("finalize chunks");
+
+ files
+ .into_iter()
+ .map(|file| {
+ let cached_table = Arc::clone(&cached_table);
+ let schema = projections
+ .get(&file.col_list)
+ .expect("looked up all projections")
+ .clone();
+ let cached_partition = cached_partitions
+ .get(&file.file.partition_id)
+ .expect("filter files down to existing partitions");
+ self.new_chunk(cached_table, file, schema, cached_partition)
+ })
+ .collect()
}
}
- async fn new_chunk(
+ fn new_chunk(
&self,
cached_table: Arc<CachedTable>,
- parquet_file: Arc<ParquetFile>,
+ parquet_file: PreparedParquetFile,
+ schema: Schema,
cached_partition: &CachedPartition,
- span: Option<Span>,
- ) -> Option<QuerierParquetChunk> {
- let span_recorder = SpanRecorder::new(span);
-
- let parquet_file_cols: HashSet<ColumnId> =
- parquet_file.column_set.iter().copied().collect();
-
- let partition_sort_key = cached_partition
- .sort_key
- .as_ref()
- .expect("partition sort key should be set when a parquet file exists");
-
+ ) -> QuerierParquetChunk {
// NOTE: Because we've looked up the sort key AFTER the namespace schema, it may contain columns for which we
// don't have any schema information yet. This is OK because we've ensured that all file columns are known
// withing the schema and if a column is NOT part of the file, it will also not be part of the chunk sort
// key, so we have consistency here.
- // calculate schema
- // IMPORTANT: Do NOT use the sort key to list columns because the sort key only contains primary-key columns.
- // NOTE: The schema that we calculate here may have a different column order than the actual parquet file. This
+ // NOTE: The schema that we've projected here may have a different column order than the actual parquet file. This
// is OK because the IOx parquet reader can deal with that (see #4921).
- let column_ids: Vec<_> = cached_table
- .column_id_map
- .keys()
- .filter(|id| parquet_file_cols.contains(id))
- .copied()
- .collect();
- let schema = self
- .catalog_cache
- .projected_schema()
- .get(
- Arc::clone(&cached_table),
- column_ids,
- span_recorder.child_span("cache GET projected schema"),
- )
- .await;
// calculate sort key
+ let partition_sort_key = cached_partition
+ .sort_key
+ .as_ref()
+ .expect("partition sort key should be set when a parquet file exists");
let sort_key = SortKey::from_columns(
partition_sort_key
.column_order
.iter()
- .filter(|c_id| parquet_file_cols.contains(c_id))
+ .filter(|c_id| parquet_file.col_set.contains(*c_id))
.filter_map(|c_id| cached_table.column_id_map.get(c_id))
.cloned(),
);
@@ -225,27 +166,59 @@ impl ChunkAdapter {
"Sort key can never be empty because there should at least be a time column",
);
- let chunk_id = ChunkId::from(Uuid::from_u128(parquet_file.id.get() as _));
+ let chunk_id = ChunkId::from(Uuid::from_u128(parquet_file.file.id.get() as _));
- let order = ChunkOrder::new(parquet_file.max_l0_created_at.get());
+ let order = ChunkOrder::new(parquet_file.file.max_l0_created_at.get());
let meta = Arc::new(QuerierParquetChunkMeta {
chunk_id,
order,
sort_key: Some(sort_key),
- partition_id: parquet_file.partition_id,
+ partition_id: parquet_file.file.partition_id,
});
let parquet_chunk = Arc::new(ParquetChunk::new(
- parquet_file,
+ parquet_file.file,
schema,
self.catalog_cache.parquet_store(),
));
- Some(QuerierParquetChunk::new(
+ QuerierParquetChunk::new(
parquet_chunk,
meta,
Arc::clone(&cached_partition.column_ranges),
- ))
+ )
+ }
+}
+
+/// [`ParquetFile`] with some additional fields.
+struct PreparedParquetFile {
+ /// The parquet file as received from the catalog.
+ file: Arc<ParquetFile>,
+
+ /// The set of columns in this file.
+ col_set: HashSet<ColumnId>,
+
+ /// The columns in this file as ordered in the schema.
+ col_list: Box<[ColumnId]>,
+}
+
+impl PreparedParquetFile {
+ fn new(file: Arc<ParquetFile>, cached_table: &CachedTable) -> Self {
+ let col_set: HashSet<ColumnId> = file
+ .column_set
+ .iter()
+ .filter(|id| cached_table.column_id_map.contains_key(*id))
+ .copied()
+ .collect();
+
+ let mut col_list = col_set.iter().copied().collect::<Box<[ColumnId]>>();
+ col_list.sort();
+
+ Self {
+ file,
+ col_set,
+ col_list,
+ }
}
}
diff --git a/querier/src/parquet/mod.rs b/querier/src/parquet/mod.rs
index a811561538..24ac694d78 100644
--- a/querier/src/parquet/mod.rs
+++ b/querier/src/parquet/mod.rs
@@ -97,12 +97,9 @@ impl QuerierParquetChunk {
pub mod tests {
use std::collections::HashMap;
- use crate::{
- cache::{
- namespace::{CachedNamespace, CachedTable},
- CatalogCache,
- },
- table::MetricPruningObserver,
+ use crate::cache::{
+ namespace::{CachedNamespace, CachedTable},
+ CatalogCache,
};
use super::*;
@@ -116,7 +113,6 @@ pub mod tests {
};
use iox_tests::{TestCatalog, TestParquetFileBuilder};
use metric::{Attributes, Observation, RawReporter};
- use predicate::Predicate;
use schema::{builder::SchemaBuilder, sort::SortKeyBuilder};
use test_helpers::maybe_start_logging;
use tokio::runtime::Handle;
@@ -258,8 +254,6 @@ pub mod tests {
.new_chunks(
Arc::clone(&self.cached_table),
vec![Arc::clone(&self.parquet_file)].into(),
- &Predicate::new(),
- MetricPruningObserver::new_unregistered(),
&cached_partitions,
None,
)
diff --git a/querier/src/table/mod.rs b/querier/src/table/mod.rs
index 8b5bf4714d..25bbb08b7a 100644
--- a/querier/src/table/mod.rs
+++ b/querier/src/table/mod.rs
@@ -24,7 +24,6 @@ use trace::span::{Span, SpanRecorder};
use uuid::Uuid;
pub use self::query_access::metrics::PruneMetrics;
-pub(crate) use self::query_access::MetricPruningObserver;
mod query_access;
@@ -272,8 +271,6 @@ impl QuerierTable {
.new_chunks(
Arc::clone(cached_table),
Arc::clone(&parquet_files.files),
- predicate,
- MetricPruningObserver::new(Arc::clone(&self.prune_metrics)),
&cached_partitions,
span_recorder.child_span("new_chunks"),
)
diff --git a/querier/src/table/query_access/metrics.rs b/querier/src/table/query_access/metrics.rs
index 16f1638c67..a67078ef4b 100644
--- a/querier/src/table/query_access/metrics.rs
+++ b/querier/src/table/query_access/metrics.rs
@@ -124,9 +124,4 @@ impl PruneMetrics {
could_not_prune_df,
}
}
-
- #[cfg(test)]
- pub(crate) fn new_unregistered() -> Self {
- Self::new(&metric::Registry::new())
- }
}
diff --git a/querier/src/table/query_access/mod.rs b/querier/src/table/query_access/mod.rs
index ce1140612a..c49a9ee202 100644
--- a/querier/src/table/query_access/mod.rs
+++ b/querier/src/table/query_access/mod.rs
@@ -168,16 +168,6 @@ impl MetricPruningObserver {
pub(crate) fn new(metrics: Arc<PruneMetrics>) -> Self {
Self { metrics }
}
-
- #[cfg(test)]
- pub(crate) fn new_unregistered() -> Self {
- Self::new(Arc::new(PruneMetrics::new_unregistered()))
- }
-
- /// Called when pruning a chunk before fully creating the chunk structure
- pub(crate) fn was_pruned_early(&self, row_count: u64, size_estimate: u64) {
- self.metrics.pruned_early.inc(1, row_count, size_estimate);
- }
}
impl PruningObserver for MetricPruningObserver {
|
297ea8be55c46f9cf8864f73cd4b6e2495a6db98
|
Marco Neumann
|
2022-11-29 14:52:32
|
make `IOxSessionContext::exec` non-optional (#6266)
|
`None` was only used for testing and even than we should probably have a
proper executor instead of panicking for some methods.
Found while working on #6216.
| null |
refactor: make `IOxSessionContext::exec` non-optional (#6266)
`None` was only used for testing and even than we should probably have a
proper executor instead of panicking for some methods.
Found while working on #6216.
|
diff --git a/Cargo.lock b/Cargo.lock
index d42b7b7e10..a4d691cc4c 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -2630,6 +2630,7 @@ dependencies = [
"itertools",
"object_store",
"observability_deps",
+ "once_cell",
"parking_lot 0.12.1",
"parquet_file",
"predicate",
diff --git a/iox_query/Cargo.toml b/iox_query/Cargo.toml
index cfd669a2fe..b7d1ce5d28 100644
--- a/iox_query/Cargo.toml
+++ b/iox_query/Cargo.toml
@@ -29,6 +29,7 @@ influxdb_influxql_parser = { path = "../influxdb_influxql_parser" }
itertools = "0.10.5"
object_store = "0.5.1"
observability_deps = { path = "../observability_deps" }
+once_cell = { version = "1.16.0", features = ["parking_lot"] }
parking_lot = "0.12"
parquet_file = { path = "../parquet_file" }
query_functions = { path = "../query_functions"}
@@ -42,4 +43,4 @@ workspace-hack = { path = "../workspace-hack"}
[dev-dependencies] # In alphabetical order
test_helpers = { path = "../test_helpers" }
-assert_matches = "1"
\ No newline at end of file
+assert_matches = "1"
diff --git a/iox_query/src/exec.rs b/iox_query/src/exec.rs
index a184fb3fc4..bfcbfe46ba 100644
--- a/iox_query/src/exec.rs
+++ b/iox_query/src/exec.rs
@@ -166,7 +166,7 @@ impl Executor {
let inner = SessionContext::with_state(state.clone());
let exec = self.executor(executor_type).clone();
let recorder = SpanRecorder::new(state.span_ctx().child_span("Query Execution"));
- IOxSessionContext::new(inner, Some(exec), recorder)
+ IOxSessionContext::new(inner, exec, recorder)
}
/// Create a new execution context, suitable for executing a new query or system task
diff --git a/iox_query/src/exec/context.rs b/iox_query/src/exec/context.rs
index 2abc963b6b..77a062a694 100644
--- a/iox_query/src/exec/context.rs
+++ b/iox_query/src/exec/context.rs
@@ -44,6 +44,7 @@ use datafusion_util::config::{iox_session_config, DEFAULT_CATALOG};
use executor::DedicatedExecutor;
use futures::{Stream, StreamExt, TryStreamExt};
use observability_deps::tracing::debug;
+use once_cell::sync::Lazy;
use query_functions::selectors::register_selector_aggregates;
use std::{convert::TryInto, fmt, sync::Arc};
use trace::{
@@ -218,7 +219,7 @@ impl IOxSessionConfig {
let maybe_span = self.span_ctx.child_span("Query Execution");
- IOxSessionContext::new(inner, Some(self.exec), SpanRecorder::new(maybe_span))
+ IOxSessionContext::new(inner, self.exec, SpanRecorder::new(maybe_span))
}
}
@@ -237,13 +238,13 @@ impl IOxSessionConfig {
pub struct IOxSessionContext {
inner: SessionContext,
- /// Optional dedicated executor for query execution.
+ /// Dedicated executor for query execution.
///
/// DataFusion plans are "CPU" bound and thus can consume tokio
/// executors threads for extended periods of time. We use a
/// dedicated tokio runtime to run them so that other requests
/// can be handled.
- exec: Option<DedicatedExecutor>,
+ exec: DedicatedExecutor,
/// Span context from which to create spans for this query
recorder: SpanRecorder,
@@ -253,10 +254,16 @@ impl fmt::Debug for IOxSessionContext {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("IOxSessionContext")
.field("inner", &"<DataFusion ExecutionContext>")
+ .field("exec", &self.exec)
+ .field("recorder", &self.recorder)
.finish()
}
}
+/// [`DedicatedExecutor`] for testing purposes.
+static TESTING_EXECUTOR: Lazy<DedicatedExecutor> =
+ Lazy::new(|| DedicatedExecutor::new("testing", 1));
+
impl IOxSessionContext {
/// Constructor for testing.
///
@@ -265,7 +272,7 @@ impl IOxSessionContext {
pub fn with_testing() -> Self {
Self {
inner: SessionContext::default(),
- exec: None,
+ exec: TESTING_EXECUTOR.clone(),
recorder: SpanRecorder::default(),
}
}
@@ -273,7 +280,7 @@ impl IOxSessionContext {
/// Private constructor
pub(crate) fn new(
inner: SessionContext,
- exec: Option<DedicatedExecutor>,
+ exec: DedicatedExecutor,
recorder: SpanRecorder,
) -> Self {
// attach span to DataFusion session
@@ -594,13 +601,10 @@ impl IOxSessionContext {
Fut: std::future::Future<Output = Result<T>> + Send + 'static,
T: Send + 'static,
{
- match &self.exec {
- Some(exec) => exec
- .spawn(fut)
- .await
- .unwrap_or_else(|e| Err(Error::Execution(format!("Join Error: {}", e)))),
- None => unimplemented!("spawn onto current threadpool"),
- }
+ self.exec
+ .spawn(fut)
+ .await
+ .unwrap_or_else(|e| Err(Error::Execution(format!("Join Error: {}", e))))
}
/// Returns a IOxSessionContext with a SpanRecorder that is a child of the current
@@ -629,7 +633,7 @@ impl IOxSessionContext {
/// Number of currently active tasks.
pub fn tasks(&self) -> usize {
- self.exec.as_ref().map(|e| e.tasks()).unwrap_or_default()
+ self.exec.tasks()
}
}
|
b783bb1967a5b7f5ab14a1240cdaa9693c01c0fd
|
Dom Dwyer
|
2023-05-22 14:56:45
|
add missing lints to service_grpc_influxrpc
|
Adds the standard lints to service_grpc_influxrpc and fixes any lint
failures.
Note this doesn't include the normal "document things" lint, because
there's a load of missing docs
| null |
refactor(lints): add missing lints to service_grpc_influxrpc
Adds the standard lints to service_grpc_influxrpc and fixes any lint
failures.
Note this doesn't include the normal "document things" lint, because
there's a load of missing docs
|
diff --git a/service_grpc_influxrpc/src/expr.rs b/service_grpc_influxrpc/src/expr.rs
index 915c34310f..e3de7f3dfd 100644
--- a/service_grpc_influxrpc/src/expr.rs
+++ b/service_grpc_influxrpc/src/expr.rs
@@ -456,6 +456,7 @@ impl InListBuilder {
/// Decoded special tag key.
///
/// The storage gRPC layer uses magic special bytes to encode measurement name and field name as tag
+#[derive(Debug)]
pub enum DecodedTagKey {
Measurement,
Field,
diff --git a/service_grpc_influxrpc/src/lib.rs b/service_grpc_influxrpc/src/lib.rs
index ae4f3ee879..2869037412 100644
--- a/service_grpc_influxrpc/src/lib.rs
+++ b/service_grpc_influxrpc/src/lib.rs
@@ -1,4 +1,18 @@
-//! This module contains gRPC service implementation for "InfluxRPC" (aka the storage RPC API used for Flux and InfluxQL)
+//! This module contains gRPC service implementation for "InfluxRPC" (aka the
+//! storage RPC API used for Flux and InfluxQL)
+
+#![deny(rustdoc::broken_intra_doc_links, rustdoc::bare_urls, rust_2018_idioms)]
+#![allow(clippy::clone_on_ref_ptr)]
+#![warn(
+ missing_copy_implementations,
+ missing_debug_implementations,
+ clippy::explicit_iter_loop,
+ // See https://github.com/influxdata/influxdb_iox/pull/1671
+ clippy::future_not_send,
+ clippy::clone_on_ref_ptr,
+ clippy::todo,
+ clippy::dbg_macro,
+)]
/// `[0x00]` is the magic value that that the storage gRPC layer uses to
/// encode a tag_key that means "measurement name"
diff --git a/service_grpc_influxrpc/src/permit.rs b/service_grpc_influxrpc/src/permit.rs
index 3041070d6d..58e5f41e6e 100644
--- a/service_grpc_influxrpc/src/permit.rs
+++ b/service_grpc_influxrpc/src/permit.rs
@@ -3,6 +3,7 @@ use pin_project::pin_project;
use tracker::InstrumentedAsyncOwnedSemaphorePermit;
/// Helper to keep a semaphore permit attached to a stream.
+#[derive(Debug)]
#[pin_project]
pub struct StreamWithPermit<S> {
#[pin]
diff --git a/service_grpc_influxrpc/src/query_completed_token.rs b/service_grpc_influxrpc/src/query_completed_token.rs
index 0c27e46a89..097877f978 100644
--- a/service_grpc_influxrpc/src/query_completed_token.rs
+++ b/service_grpc_influxrpc/src/query_completed_token.rs
@@ -7,9 +7,10 @@ use futures::{ready, Stream, StreamExt};
use iox_query::QueryCompletedToken;
/// Wraps an inner query stream, calling the `QueryCompletedToken::set_success` on success
+#[derive(Debug)]
pub struct QueryCompletedTokenStream<S, T, E>
where
- S: Stream<Item = Result<T, E>> + Unpin,
+ S: Stream<Item = Result<T, E>> + Unpin + Send,
{
inner: S,
token: QueryCompletedToken,
@@ -18,7 +19,7 @@ where
impl<S, T, E> QueryCompletedTokenStream<S, T, E>
where
- S: Stream<Item = Result<T, E>> + Unpin,
+ S: Stream<Item = Result<T, E>> + Unpin + Send,
{
pub fn new(inner: S, token: QueryCompletedToken) -> Self {
Self {
@@ -31,7 +32,7 @@ where
impl<S, T, E> Stream for QueryCompletedTokenStream<S, T, E>
where
- S: Stream<Item = Result<T, E>> + Unpin,
+ S: Stream<Item = Result<T, E>> + Unpin + Send,
{
type Item = Result<T, E>;
diff --git a/service_grpc_influxrpc/src/service.rs b/service_grpc_influxrpc/src/service.rs
index 2308593165..3c5cb15861 100644
--- a/service_grpc_influxrpc/src/service.rs
+++ b/service_grpc_influxrpc/src/service.rs
@@ -308,7 +308,7 @@ enum InfluxCode {
}
impl Display for InfluxCode {
- fn fmt(&self, f: &mut Formatter) -> FmtResult {
+ fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult {
let str = match self {
InfluxCode::EInternal => "internal error",
InfluxCode::ENotFound => "not found",
@@ -1705,7 +1705,7 @@ pub fn make_response<S, T, E>(
permit: InstrumentedAsyncOwnedSemaphorePermit,
) -> Result<Response<StreamWithPermit<QueryCompletedTokenStream<S, T, E>>>, Status>
where
- S: Stream<Item = Result<T, E>> + Unpin,
+ S: Stream<Item = Result<T, E>> + Unpin + Send,
{
let mut response = Response::new(StreamWithPermit::new(
QueryCompletedTokenStream::new(stream, token),
|
dcb4a9bb5c19acf850fd0bcfb9b9382b16ea521d
|
Marco Neumann
|
2023-06-29 13:02:48
|
fuse `QueryChunk` and `QueryChunkMeta` (#8107)
|
Closes #8095.
| null |
refactor: fuse `QueryChunk` and `QueryChunkMeta` (#8107)
Closes #8095.
|
diff --git a/compactor/src/components/df_planner/query_chunk.rs b/compactor/src/components/df_planner/query_chunk.rs
index a52a32080a..0e18e76d8c 100644
--- a/compactor/src/components/df_planner/query_chunk.rs
+++ b/compactor/src/components/df_planner/query_chunk.rs
@@ -6,7 +6,7 @@ use datafusion::{error::DataFusionError, physical_plan::Statistics};
use iox_query::{
exec::{stringset::StringSet, IOxSessionContext},
util::create_basic_summary,
- QueryChunk, QueryChunkData, QueryChunkMeta,
+ QueryChunk, QueryChunkData,
};
use observability_deps::tracing::debug;
use parquet_file::{chunk::ParquetChunk, storage::ParquetStorage};
@@ -64,7 +64,7 @@ impl QueryableParquetChunk {
}
}
-impl QueryChunkMeta for QueryableParquetChunk {
+impl QueryChunk for QueryableParquetChunk {
fn stats(&self) -> Arc<Statistics> {
Arc::clone(&self.stats)
}
@@ -80,9 +80,7 @@ impl QueryChunkMeta for QueryableParquetChunk {
fn sort_key(&self) -> Option<&SortKey> {
self.sort_key.as_ref()
}
-}
-impl QueryChunk for QueryableParquetChunk {
// This function is needed to distinguish the ParquetChunks further if they happen to have the
// same creation order.
// Ref: chunks.sort_unstable_by_key(|c| (c.order(), c.id())); in provider.rs
diff --git a/ingester/src/persist/compact.rs b/ingester/src/persist/compact.rs
index 6a78cadd16..c9240c2834 100644
--- a/ingester/src/persist/compact.rs
+++ b/ingester/src/persist/compact.rs
@@ -4,7 +4,7 @@ use datafusion::physical_plan::SendableRecordBatchStream;
use iox_query::{
exec::{Executor, ExecutorType},
frontend::reorg::ReorgPlanner,
- QueryChunk, QueryChunkMeta,
+ QueryChunk,
};
use schema::sort::{adjust_sort_key_columns, compute_sort_key, SortKey};
diff --git a/ingester/src/persist/handle.rs b/ingester/src/persist/handle.rs
index 28dd8c0a42..e823767ef2 100644
--- a/ingester/src/persist/handle.rs
+++ b/ingester/src/persist/handle.rs
@@ -2,7 +2,7 @@ use std::{sync::Arc, time::Duration};
use async_trait::async_trait;
use iox_catalog::interface::Catalog;
-use iox_query::{exec::Executor, QueryChunkMeta};
+use iox_query::{exec::Executor, QueryChunk};
use metric::{DurationHistogram, DurationHistogramOptions, U64Counter, U64Gauge, DURATION_MAX};
use observability_deps::tracing::*;
use parking_lot::Mutex;
diff --git a/ingester/src/query_adaptor.rs b/ingester/src/query_adaptor.rs
index 31b8cf6b48..f1574aa08b 100644
--- a/ingester/src/query_adaptor.rs
+++ b/ingester/src/query_adaptor.rs
@@ -10,7 +10,7 @@ use datafusion::{error::DataFusionError, physical_plan::Statistics};
use iox_query::{
exec::{stringset::StringSet, IOxSessionContext},
util::{compute_timenanosecond_min_max, create_basic_summary},
- QueryChunk, QueryChunkData, QueryChunkMeta,
+ QueryChunk, QueryChunkData,
};
use once_cell::sync::OnceCell;
use predicate::Predicate;
@@ -109,7 +109,7 @@ impl QueryAdaptor {
}
}
-impl QueryChunkMeta for QueryAdaptor {
+impl QueryChunk for QueryAdaptor {
fn stats(&self) -> Arc<Statistics> {
Arc::clone(self.stats.get_or_init(|| {
let ts_min_max = compute_timenanosecond_min_max(self.data.iter().map(|b| b.as_ref()))
@@ -134,9 +134,7 @@ impl QueryChunkMeta for QueryAdaptor {
fn sort_key(&self) -> Option<&SortKey> {
None // Ingester data is not sorted
}
-}
-impl QueryChunk for QueryAdaptor {
fn id(&self) -> ChunkId {
self.id
}
diff --git a/iox_query/src/frontend.rs b/iox_query/src/frontend.rs
index 1bde776b76..1f93902130 100644
--- a/iox_query/src/frontend.rs
+++ b/iox_query/src/frontend.rs
@@ -20,7 +20,7 @@ mod test {
frontend::reorg::ReorgPlanner,
provider::{DeduplicateExec, RecordBatchesExec},
test::{format_execution_plan, TestChunk},
- QueryChunk, QueryChunkMeta, ScanPlanBuilder,
+ QueryChunk, ScanPlanBuilder,
};
/// A macro to asserts the contents of the extracted metrics is reasonable
diff --git a/iox_query/src/lib.rs b/iox_query/src/lib.rs
index 28ee02a02b..c58efdc357 100644
--- a/iox_query/src/lib.rs
+++ b/iox_query/src/lib.rs
@@ -59,9 +59,8 @@ pub fn chunk_order_field() -> Arc<Field> {
Arc::clone(&CHUNK_ORDER_FIELD)
}
-/// Trait for an object (designed to be a Chunk) which can provide
-/// metadata
-pub trait QueryChunkMeta {
+/// A single chunk of data.
+pub trait QueryChunk: Debug + Send + Sync + 'static {
/// Return a statistics of the data
fn stats(&self) -> Arc<Statistics>;
@@ -73,6 +72,51 @@ pub trait QueryChunkMeta {
/// return a reference to the sort key if any
fn sort_key(&self) -> Option<&SortKey>;
+
+ /// returns the Id of this chunk. Ids are unique within a
+ /// particular partition.
+ fn id(&self) -> ChunkId;
+
+ /// Returns true if the chunk may contain a duplicate "primary
+ /// key" within itself
+ fn may_contain_pk_duplicates(&self) -> bool;
+
+ /// Returns a set of Strings with column names from the specified
+ /// table that have at least one row that matches `predicate`, if
+ /// the predicate can be evaluated entirely on the metadata of
+ /// this Chunk. Returns `None` otherwise
+ fn column_names(
+ &self,
+ ctx: IOxSessionContext,
+ predicate: &Predicate,
+ columns: Projection<'_>,
+ ) -> Result<Option<StringSet>, DataFusionError>;
+
+ /// Return a set of Strings containing the distinct values in the
+ /// specified columns. If the predicate can be evaluated entirely
+ /// on the metadata of this Chunk. Returns `None` otherwise
+ ///
+ /// The requested columns must all have String type.
+ fn column_values(
+ &self,
+ ctx: IOxSessionContext,
+ column_name: &str,
+ predicate: &Predicate,
+ ) -> Result<Option<StringSet>, DataFusionError>;
+
+ /// Provides access to raw [`QueryChunk`] data.
+ ///
+ /// The engine assume that minimal work shall be performed to gather the `QueryChunkData`.
+ fn data(&self) -> QueryChunkData;
+
+ /// Returns chunk type. Useful in tests and debug logs.
+ fn chunk_type(&self) -> &str;
+
+ /// Order of this chunk relative to other overlapping chunks.
+ fn order(&self) -> ChunkOrder;
+
+ /// Return backend as [`Any`] which can be used to downcast to a specific implementation.
+ fn as_any(&self) -> &dyn Any;
}
/// A `QueryCompletedToken` is returned by `record_query` implementations of
@@ -176,7 +220,7 @@ pub trait QueryNamespace: QueryNamespaceMeta + Debug + Send + Sync {
pub enum QueryChunkData {
/// In-memory record batches.
///
- /// **IMPORTANT: All batches MUST have the schema that the [chunk reports](QueryChunkMeta::schema).**
+ /// **IMPORTANT: All batches MUST have the schema that the [chunk reports](QueryChunk::schema).**
RecordBatches(Vec<RecordBatch>),
/// Parquet file.
@@ -210,59 +254,71 @@ impl QueryChunkData {
}
}
-/// Collection of data that shares the same partition key
-pub trait QueryChunk: QueryChunkMeta + Debug + Send + Sync + 'static {
- /// returns the Id of this chunk. Ids are unique within a
- /// particular partition.
- fn id(&self) -> ChunkId;
+impl<P> QueryChunk for Arc<P>
+where
+ P: QueryChunk,
+{
+ fn stats(&self) -> Arc<Statistics> {
+ self.as_ref().stats()
+ }
- /// Returns true if the chunk may contain a duplicate "primary
- /// key" within itself
- fn may_contain_pk_duplicates(&self) -> bool;
+ fn schema(&self) -> &Schema {
+ self.as_ref().schema()
+ }
+
+ fn partition_id(&self) -> PartitionId {
+ self.as_ref().partition_id()
+ }
+
+ fn sort_key(&self) -> Option<&SortKey> {
+ self.as_ref().sort_key()
+ }
+
+ fn id(&self) -> ChunkId {
+ self.as_ref().id()
+ }
+
+ fn may_contain_pk_duplicates(&self) -> bool {
+ self.as_ref().may_contain_pk_duplicates()
+ }
- /// Returns a set of Strings with column names from the specified
- /// table that have at least one row that matches `predicate`, if
- /// the predicate can be evaluated entirely on the metadata of
- /// this Chunk. Returns `None` otherwise
fn column_names(
&self,
ctx: IOxSessionContext,
predicate: &Predicate,
columns: Projection<'_>,
- ) -> Result<Option<StringSet>, DataFusionError>;
+ ) -> Result<Option<StringSet>, DataFusionError> {
+ self.as_ref().column_names(ctx, predicate, columns)
+ }
- /// Return a set of Strings containing the distinct values in the
- /// specified columns. If the predicate can be evaluated entirely
- /// on the metadata of this Chunk. Returns `None` otherwise
- ///
- /// The requested columns must all have String type.
fn column_values(
&self,
ctx: IOxSessionContext,
column_name: &str,
predicate: &Predicate,
- ) -> Result<Option<StringSet>, DataFusionError>;
+ ) -> Result<Option<StringSet>, DataFusionError> {
+ self.as_ref().column_values(ctx, column_name, predicate)
+ }
- /// Provides access to raw [`QueryChunk`] data.
- ///
- /// The engine assume that minimal work shall be performed to gather the `QueryChunkData`.
- fn data(&self) -> QueryChunkData;
+ fn data(&self) -> QueryChunkData {
+ self.as_ref().data()
+ }
- /// Returns chunk type. Useful in tests and debug logs.
- fn chunk_type(&self) -> &str;
+ fn chunk_type(&self) -> &str {
+ self.as_ref().chunk_type()
+ }
- /// Order of this chunk relative to other overlapping chunks.
- fn order(&self) -> ChunkOrder;
+ fn order(&self) -> ChunkOrder {
+ self.as_ref().order()
+ }
- /// Return backend as [`Any`] which can be used to downcast to a specific implementation.
- fn as_any(&self) -> &dyn Any;
+ fn as_any(&self) -> &dyn Any {
+ // present the underlying implementation, not the wrapper
+ self.as_ref().as_any()
+ }
}
-/// Implement ChunkMeta for something wrapped in an Arc (like Chunks often are)
-impl<P> QueryChunkMeta for Arc<P>
-where
- P: QueryChunkMeta,
-{
+impl QueryChunk for Arc<dyn QueryChunk> {
fn stats(&self) -> Arc<Statistics> {
self.as_ref().stats()
}
@@ -278,24 +334,48 @@ where
fn sort_key(&self) -> Option<&SortKey> {
self.as_ref().sort_key()
}
-}
-/// Implement `ChunkMeta` for `Arc<dyn QueryChunk>`
-impl QueryChunkMeta for Arc<dyn QueryChunk> {
- fn stats(&self) -> Arc<Statistics> {
- self.as_ref().stats()
+ fn id(&self) -> ChunkId {
+ self.as_ref().id()
}
- fn schema(&self) -> &Schema {
- self.as_ref().schema()
+ fn may_contain_pk_duplicates(&self) -> bool {
+ self.as_ref().may_contain_pk_duplicates()
}
- fn partition_id(&self) -> PartitionId {
- self.as_ref().partition_id()
+ fn column_names(
+ &self,
+ ctx: IOxSessionContext,
+ predicate: &Predicate,
+ columns: Projection<'_>,
+ ) -> Result<Option<StringSet>, DataFusionError> {
+ self.as_ref().column_names(ctx, predicate, columns)
}
- fn sort_key(&self) -> Option<&SortKey> {
- self.as_ref().sort_key()
+ fn column_values(
+ &self,
+ ctx: IOxSessionContext,
+ column_name: &str,
+ predicate: &Predicate,
+ ) -> Result<Option<StringSet>, DataFusionError> {
+ self.as_ref().column_values(ctx, column_name, predicate)
+ }
+
+ fn data(&self) -> QueryChunkData {
+ self.as_ref().data()
+ }
+
+ fn chunk_type(&self) -> &str {
+ self.as_ref().chunk_type()
+ }
+
+ fn order(&self) -> ChunkOrder {
+ self.as_ref().order()
+ }
+
+ fn as_any(&self) -> &dyn Any {
+ // present the underlying implementation, not the wrapper
+ self.as_ref().as_any()
}
}
diff --git a/iox_query/src/physical_optimizer/chunk_extraction.rs b/iox_query/src/physical_optimizer/chunk_extraction.rs
index 14a554800d..9f5f341e33 100644
--- a/iox_query/src/physical_optimizer/chunk_extraction.rs
+++ b/iox_query/src/physical_optimizer/chunk_extraction.rs
@@ -167,9 +167,7 @@ impl ExecutionPlanVisitor for ExtractChunksVisitor {
#[cfg(test)]
mod tests {
- use crate::{
- provider::chunks_to_physical_nodes, test::TestChunk, util::df_physical_expr, QueryChunkMeta,
- };
+ use crate::{provider::chunks_to_physical_nodes, test::TestChunk, util::df_physical_expr};
use arrow::datatypes::{DataType, Field, Schema as ArrowSchema};
use data_types::ChunkId;
use datafusion::{
diff --git a/iox_query/src/physical_optimizer/combine_chunks.rs b/iox_query/src/physical_optimizer/combine_chunks.rs
index 971f986e8c..65e822b982 100644
--- a/iox_query/src/physical_optimizer/combine_chunks.rs
+++ b/iox_query/src/physical_optimizer/combine_chunks.rs
@@ -83,7 +83,7 @@ mod tests {
scalar::ScalarValue,
};
- use crate::{physical_optimizer::test_util::OptimizationTest, test::TestChunk, QueryChunkMeta};
+ use crate::{physical_optimizer::test_util::OptimizationTest, test::TestChunk, QueryChunk};
use super::*;
diff --git a/iox_query/src/physical_optimizer/dedup/dedup_null_columns.rs b/iox_query/src/physical_optimizer/dedup/dedup_null_columns.rs
index cc13bc06a0..23d5d0ea11 100644
--- a/iox_query/src/physical_optimizer/dedup/dedup_null_columns.rs
+++ b/iox_query/src/physical_optimizer/dedup/dedup_null_columns.rs
@@ -102,7 +102,7 @@ mod tests {
test_util::OptimizationTest,
},
test::TestChunk,
- QueryChunkMeta,
+ QueryChunk,
};
use super::*;
diff --git a/iox_query/src/physical_optimizer/dedup/dedup_sort_order.rs b/iox_query/src/physical_optimizer/dedup/dedup_sort_order.rs
index a177b1c144..eb692aa4f5 100644
--- a/iox_query/src/physical_optimizer/dedup/dedup_sort_order.rs
+++ b/iox_query/src/physical_optimizer/dedup/dedup_sort_order.rs
@@ -174,7 +174,7 @@ mod tests {
test_util::OptimizationTest,
},
test::TestChunk,
- QueryChunkMeta,
+ QueryChunk,
};
use super::*;
diff --git a/iox_query/src/physical_optimizer/dedup/partition_split.rs b/iox_query/src/physical_optimizer/dedup/partition_split.rs
index ea293c109a..bfb582ead3 100644
--- a/iox_query/src/physical_optimizer/dedup/partition_split.rs
+++ b/iox_query/src/physical_optimizer/dedup/partition_split.rs
@@ -111,12 +111,9 @@ impl PhysicalOptimizerRule for PartitionSplit {
#[cfg(test)]
mod tests {
- use crate::{
- physical_optimizer::{
- dedup::test_util::{chunk, dedup_plan},
- test_util::OptimizationTest,
- },
- QueryChunkMeta,
+ use crate::physical_optimizer::{
+ dedup::test_util::{chunk, dedup_plan},
+ test_util::OptimizationTest,
};
use super::*;
diff --git a/iox_query/src/physical_optimizer/dedup/remove_dedup.rs b/iox_query/src/physical_optimizer/dedup/remove_dedup.rs
index dc7dc74972..617b499f71 100644
--- a/iox_query/src/physical_optimizer/dedup/remove_dedup.rs
+++ b/iox_query/src/physical_optimizer/dedup/remove_dedup.rs
@@ -64,7 +64,7 @@ mod tests {
dedup::test_util::{chunk, dedup_plan},
test_util::OptimizationTest,
},
- QueryChunkMeta,
+ QueryChunk,
};
use super::*;
diff --git a/iox_query/src/physical_optimizer/dedup/time_split.rs b/iox_query/src/physical_optimizer/dedup/time_split.rs
index cbf71670cc..93665cdabf 100644
--- a/iox_query/src/physical_optimizer/dedup/time_split.rs
+++ b/iox_query/src/physical_optimizer/dedup/time_split.rs
@@ -103,7 +103,7 @@ mod tests {
dedup::test_util::{chunk, dedup_plan},
test_util::OptimizationTest,
},
- QueryChunkMeta,
+ QueryChunk,
};
use super::*;
diff --git a/iox_query/src/provider.rs b/iox_query/src/provider.rs
index 7e7d30b21a..b0da731ffd 100644
--- a/iox_query/src/provider.rs
+++ b/iox_query/src/provider.rs
@@ -308,7 +308,6 @@ mod test {
use crate::{
exec::IOxSessionContext,
test::{format_execution_plan, TestChunk},
- QueryChunkMeta,
};
use datafusion::prelude::{col, lit};
diff --git a/iox_query/src/provider/physical.rs b/iox_query/src/provider/physical.rs
index 95df24f790..05289e7522 100644
--- a/iox_query/src/provider/physical.rs
+++ b/iox_query/src/provider/physical.rs
@@ -354,7 +354,6 @@ mod tests {
use crate::{
chunk_order_field,
test::{format_execution_plan, TestChunk},
- QueryChunkMeta,
};
use super::*;
diff --git a/iox_query/src/pruning.rs b/iox_query/src/pruning.rs
index 5e1b5d0b22..d75195d0a0 100644
--- a/iox_query/src/pruning.rs
+++ b/iox_query/src/pruning.rs
@@ -1,6 +1,6 @@
//! Implementation of statistics based pruning
-use crate::{QueryChunk, QueryChunkMeta};
+use crate::QueryChunk;
use arrow::{
array::{ArrayRef, UInt64Array},
datatypes::{DataType, SchemaRef},
@@ -217,7 +217,7 @@ mod test {
use predicate::Predicate;
use schema::merge::SchemaMerger;
- use crate::{test::TestChunk, QueryChunk, QueryChunkMeta};
+ use crate::{test::TestChunk, QueryChunk};
use super::*;
diff --git a/iox_query/src/test.rs b/iox_query/src/test.rs
index 0af3e9f5f6..70deabd311 100644
--- a/iox_query/src/test.rs
+++ b/iox_query/src/test.rs
@@ -8,8 +8,7 @@ use crate::{
ExecutionContextProvider, Executor, ExecutorType, IOxSessionContext,
},
pruning::prune_chunks,
- Predicate, QueryChunk, QueryChunkData, QueryChunkMeta, QueryCompletedToken, QueryNamespace,
- QueryText,
+ Predicate, QueryChunk, QueryChunkData, QueryCompletedToken, QueryNamespace, QueryText,
};
use arrow::array::{BooleanArray, Float64Array};
use arrow::datatypes::SchemaRef;
@@ -1091,6 +1090,36 @@ impl fmt::Display for TestChunk {
}
impl QueryChunk for TestChunk {
+ fn stats(&self) -> Arc<DataFusionStatistics> {
+ self.check_error().unwrap();
+
+ Arc::new(DataFusionStatistics {
+ num_rows: self.num_rows,
+ total_byte_size: None,
+ column_statistics: Some(
+ self.schema
+ .inner()
+ .fields()
+ .iter()
+ .map(|f| self.column_stats.get(f.name()).cloned().unwrap_or_default())
+ .collect(),
+ ),
+ is_exact: true,
+ })
+ }
+
+ fn schema(&self) -> &Schema {
+ &self.schema
+ }
+
+ fn partition_id(&self) -> PartitionId {
+ self.partition_id
+ }
+
+ fn sort_key(&self) -> Option<&SortKey> {
+ self.sort_key.as_ref()
+ }
+
fn id(&self) -> ChunkId {
self.id
}
@@ -1147,38 +1176,6 @@ impl QueryChunk for TestChunk {
}
}
-impl QueryChunkMeta for TestChunk {
- fn stats(&self) -> Arc<DataFusionStatistics> {
- self.check_error().unwrap();
-
- Arc::new(DataFusionStatistics {
- num_rows: self.num_rows,
- total_byte_size: None,
- column_statistics: Some(
- self.schema
- .inner()
- .fields()
- .iter()
- .map(|f| self.column_stats.get(f.name()).cloned().unwrap_or_default())
- .collect(),
- ),
- is_exact: true,
- })
- }
-
- fn schema(&self) -> &Schema {
- &self.schema
- }
-
- fn partition_id(&self) -> PartitionId {
- self.partition_id
- }
-
- fn sort_key(&self) -> Option<&SortKey> {
- self.sort_key.as_ref()
- }
-}
-
/// Return the raw data from the list of chunks
pub async fn raw_data(chunks: &[Arc<dyn QueryChunk>]) -> Vec<RecordBatch> {
let ctx = IOxSessionContext::with_testing();
diff --git a/querier/src/ingester/mod.rs b/querier/src/ingester/mod.rs
index 81cc556a75..c49abde473 100644
--- a/querier/src/ingester/mod.rs
+++ b/querier/src/ingester/mod.rs
@@ -25,7 +25,7 @@ use ingester_query_grpc::{
use iox_query::{
exec::{stringset::StringSet, IOxSessionContext},
util::compute_timenanosecond_min_max,
- QueryChunk, QueryChunkData, QueryChunkMeta,
+ QueryChunk, QueryChunkData,
};
use iox_time::{Time, TimeProvider};
use metric::{DurationHistogram, Metric};
@@ -913,7 +913,7 @@ impl IngesterChunk {
}
}
-impl QueryChunkMeta for IngesterChunk {
+impl QueryChunk for IngesterChunk {
fn stats(&self) -> Arc<Statistics> {
Arc::clone(self.stats.as_ref().expect("chunk stats set"))
}
@@ -931,9 +931,7 @@ impl QueryChunkMeta for IngesterChunk {
// Data is not sorted
None
}
-}
-impl QueryChunk for IngesterChunk {
fn id(&self) -> ChunkId {
self.chunk_id
}
diff --git a/querier/src/parquet/mod.rs b/querier/src/parquet/mod.rs
index c0c9a69835..a811561538 100644
--- a/querier/src/parquet/mod.rs
+++ b/querier/src/parquet/mod.rs
@@ -112,7 +112,7 @@ pub mod tests {
use datafusion_util::config::register_iox_object_store;
use iox_query::{
exec::{ExecutorType, IOxSessionContext},
- QueryChunk, QueryChunkMeta,
+ QueryChunk,
};
use iox_tests::{TestCatalog, TestParquetFileBuilder};
use metric::{Attributes, Observation, RawReporter};
diff --git a/querier/src/parquet/query_access.rs b/querier/src/parquet/query_access.rs
index 9d0a211f3c..251712fe55 100644
--- a/querier/src/parquet/query_access.rs
+++ b/querier/src/parquet/query_access.rs
@@ -3,13 +3,13 @@ use data_types::{ChunkId, ChunkOrder, PartitionId};
use datafusion::{error::DataFusionError, physical_plan::Statistics};
use iox_query::{
exec::{stringset::StringSet, IOxSessionContext},
- QueryChunk, QueryChunkData, QueryChunkMeta,
+ QueryChunk, QueryChunkData,
};
use predicate::Predicate;
use schema::{sort::SortKey, Projection, Schema};
use std::{any::Any, sync::Arc};
-impl QueryChunkMeta for QuerierParquetChunk {
+impl QueryChunk for QuerierParquetChunk {
fn stats(&self) -> Arc<Statistics> {
Arc::clone(&self.stats)
}
@@ -25,9 +25,7 @@ impl QueryChunkMeta for QuerierParquetChunk {
fn sort_key(&self) -> Option<&SortKey> {
self.meta().sort_key()
}
-}
-impl QueryChunk for QuerierParquetChunk {
fn id(&self) -> ChunkId {
self.meta().chunk_id
}
|
8f0da90d7699bb683413a5500f9b22cb3d73fff8
|
Dom Dwyer
|
2022-12-13 16:58:47
|
remove ref to PersistActor
|
Fix bad reflink to something that no longer exists.
| null |
docs: remove ref to PersistActor
Fix bad reflink to something that no longer exists.
|
diff --git a/ingester2/src/persist/handle.rs b/ingester2/src/persist/handle.rs
index 7e253cc426..eb993e0dac 100644
--- a/ingester2/src/persist/handle.rs
+++ b/ingester2/src/persist/handle.rs
@@ -109,9 +109,6 @@ pub(crate) struct PersistHandle {
impl PersistHandle {
/// Initialise a new persist actor & obtain the first handle.
- ///
- /// The caller should call [`PersistActor::run()`] in a separate
- /// thread / task to start the persistence executor.
pub(crate) fn new(
n_workers: usize,
worker_queue_depth: usize,
|
b5c0c9c16776309a7e245a96b63797f57ed574fd
|
Marco Neumann
|
2023-09-14 10:37:50
|
allow fallback to generic TS column range for chunk stats (#8724)
|
This will be useful for #8705.
|
Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
|
feat: allow fallback to generic TS column range for chunk stats (#8724)
This will be useful for #8705.
Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
|
diff --git a/iox_query/src/chunk_statistics.rs b/iox_query/src/chunk_statistics.rs
index 486b2b6532..ca2ba74206 100644
--- a/iox_query/src/chunk_statistics.rs
+++ b/iox_query/src/chunk_statistics.rs
@@ -34,18 +34,29 @@ pub fn create_chunk_statistics(
for (t, field) in schema.iter() {
let stats = match t {
- InfluxColumnType::Timestamp => ColumnStatistics {
- null_count: Some(0),
- max_value: Some(ScalarValue::TimestampNanosecond(
- ts_min_max.map(|v| v.max),
- None,
- )),
- min_value: Some(ScalarValue::TimestampNanosecond(
- ts_min_max.map(|v| v.min),
- None,
- )),
- distinct_count: None,
- },
+ InfluxColumnType::Timestamp => {
+ // prefer explicitely given time range but fall back to column ranges
+ let (min_value, max_value) = match ts_min_max {
+ Some(ts_min_max) => (
+ Some(ScalarValue::TimestampNanosecond(Some(ts_min_max.min), None)),
+ Some(ScalarValue::TimestampNanosecond(Some(ts_min_max.max), None)),
+ ),
+ None => {
+ let range = ranges.get::<str>(field.name().as_ref());
+ (
+ range.map(|r| r.min_value.as_ref().clone()),
+ range.map(|r| r.max_value.as_ref().clone()),
+ )
+ }
+ };
+
+ ColumnStatistics {
+ null_count: Some(0),
+ max_value,
+ min_value,
+ distinct_count: None,
+ }
+ }
_ => ranges
.get::<str>(field.name().as_ref())
.map(|range| ColumnStatistics {
@@ -189,6 +200,42 @@ mod tests {
assert_eq!(actual, expected);
}
+ #[test]
+ fn test_create_chunk_statistics_ts_min_max_none_so_fallback_to_column_range() {
+ let schema = full_schema();
+ let row_count = 42u64;
+ let ranges = Arc::new(HashMap::from([(
+ Arc::from(TIME_COLUMN_NAME),
+ ColumnRange {
+ min_value: Arc::new(ScalarValue::TimestampNanosecond(Some(12), None)),
+ max_value: Arc::new(ScalarValue::TimestampNanosecond(Some(22), None)),
+ },
+ )]));
+
+ let actual = create_chunk_statistics(row_count, &schema, None, &ranges);
+ let expected = Statistics {
+ num_rows: Some(row_count as usize),
+ total_byte_size: None,
+ column_statistics: Some(vec![
+ ColumnStatistics::default(),
+ ColumnStatistics::default(),
+ ColumnStatistics::default(),
+ ColumnStatistics::default(),
+ ColumnStatistics::default(),
+ ColumnStatistics::default(),
+ ColumnStatistics::default(),
+ ColumnStatistics {
+ null_count: Some(0),
+ min_value: Some(ScalarValue::TimestampNanosecond(Some(12), None)),
+ max_value: Some(ScalarValue::TimestampNanosecond(Some(22), None)),
+ distinct_count: None,
+ },
+ ]),
+ is_exact: true,
+ };
+ assert_eq!(actual, expected);
+ }
+
fn full_schema() -> Schema {
SchemaBuilder::new()
.tag("tag1")
|
1d356959518c0000c7bb574869264dd76718ba42
|
Jamie Strandboge
|
2025-02-19 13:49:22
|
prefer pip over over uv (#26039)
|
Official builds build with python build standalone currently only work
with pip. Until https://github.com/influxdata/influxdb/issues/26016 is
decided upon, prefer pip instead of uv.
| null |
fix: prefer pip over over uv (#26039)
Official builds build with python build standalone currently only work
with pip. Until https://github.com/influxdata/influxdb/issues/26016 is
decided upon, prefer pip instead of uv.
|
diff --git a/influxdb3/src/commands/serve.rs b/influxdb3/src/commands/serve.rs
index a62113f88f..f3d5e8e4b9 100644
--- a/influxdb3/src/commands/serve.rs
+++ b/influxdb3/src/commands/serve.rs
@@ -665,14 +665,7 @@ pub(crate) fn setup_processing_engine_env_manager(
}
fn determine_package_manager() -> Arc<dyn PythonEnvironmentManager> {
- // Check for uv first (highest preference)
- if let Ok(output) = Command::new("uv").arg("--version").output() {
- if output.status.success() {
- return Arc::new(UVManager);
- }
- }
-
- // Check for pip second
+ // Check for pip (highest preference)
// XXX: put this somewhere common
let python_exe_bn = if cfg!(windows) {
"python.exe"
@@ -681,7 +674,7 @@ fn determine_package_manager() -> Arc<dyn PythonEnvironmentManager> {
};
let python_exe = if let Ok(v) = env::var("PYTHONHOME") {
// honor PYTHONHOME (set earlier for python standalone). python build
- // standalone has bin/python3 on OSX/Linx and python.exe on Windows
+ // standalone has bin/python3 on OSX/Linux and python.exe on Windows
let mut path = PathBuf::from(v);
if !cfg!(windows) {
path.push("bin");
@@ -701,6 +694,13 @@ fn determine_package_manager() -> Arc<dyn PythonEnvironmentManager> {
}
}
+ // Check for uv second (ie, prefer python standalone pip)
+ if let Ok(output) = Command::new("uv").arg("--version").output() {
+ if output.status.success() {
+ return Arc::new(UVManager);
+ }
+ }
+
// If neither is available, return DisabledManager
Arc::new(DisabledManager)
}
|
0297fe36516455bab9e45f9203bf3f59d259b5d8
|
Dom Dwyer
|
2023-07-03 17:23:59
|
less nesting in partition pruning logic
|
Improve readability by pulling the partition pruning logic into it's own
function and clean up some minor bits.
| null |
refactor: less nesting in partition pruning logic
Improve readability by pulling the partition pruning logic into it's own
function and clean up some minor bits.
|
diff --git a/ingester/src/buffer_tree/table.rs b/ingester/src/buffer_tree/table.rs
index db1f70e9ba..6add3f7053 100644
--- a/ingester/src/buffer_tree/table.rs
+++ b/ingester/src/buffer_tree/table.rs
@@ -32,6 +32,7 @@ use crate::{
query::{
partition_response::PartitionResponse, response::PartitionStream, QueryError, QueryExec,
},
+ query_adaptor::QueryAdaptor,
};
/// Metadata from the catalog for a table
@@ -288,102 +289,28 @@ where
assert_eq!(id, data.partition_id());
let data = Arc::new(data);
- if let Some(predicate) = &predicate {
- // Filter using the partition key
- let column_ranges = Arc::new(
- build_column_values(&table_partition_template, partition_key.inner())
- .filter_map(|(col, val)| {
- let range = match val {
- ColumnValue::Identity(s) => {
- let s = Arc::new(ScalarValue::from(s.as_ref()));
- ColumnRange {
- min_value: Arc::clone(&s),
- max_value: s,
- }
- }
- ColumnValue::Prefix(p) => {
- if p.is_empty() {
- // full range => value is useless
- return None;
- }
-
- // If the partition only has a prefix of the tag value (it was truncated) then form a conservative
- // range:
- //
- //
- // # Minimum
- // Use the prefix itself.
- //
- // Note that the minimum is inclusive.
- //
- // All values in the partition are either:
- // - identical to the prefix, in which case they are included by the inclusive minimum
- // - have the form `"<prefix><s>"`, and it holds that `"<prefix><s>" > "<prefix>"` for all
- // strings `"<s>"`.
- //
- //
- // # Maximum
- // Use `"<prefix_excluding_last_char><char::max>"`.
- //
- // Note that the maximum is inclusive.
- //
- // All strings in this partition must be smaller than this constructed maximum, because
- // string comparison is front-to-back and the `"<prefix_excluding_last_char><char::max>" > "<prefix>"`.
-
- let min_value = Arc::new(ScalarValue::from(p.as_ref()));
-
- let mut chars = p.as_ref().chars().collect::<Vec<_>>();
- *chars
- .last_mut()
- .expect("checked that prefix is not empty") =
- std::char::MAX;
- let max_value = Arc::new(ScalarValue::from(
- chars.into_iter().collect::<String>().as_str(),
- ));
-
- ColumnRange {
- min_value,
- max_value,
- }
- }
- };
-
- Some((Arc::from(col), range))
- })
- .collect::<HashMap<_, _>>(),
- );
-
- let chunk_statistics = Arc::new(create_chunk_statistics(
- data.num_rows(),
- data.schema(),
- data.ts_min_max(),
- &column_ranges,
- ));
- let keep_after_pruning = prune_summaries(
- data.schema(),
- &[(chunk_statistics, data.schema().as_arrow())],
- predicate,
- )
- // Errors are logged by `iox_query` and sometimes fine, e.g. for not implemented DataFusion
- // features or upstream bugs. The querier uses the same strategy. Pruning is a mere
- // optimization and should not lead to crashes.
- .ok()
- .map(|vals| {
- vals.into_iter()
- .next()
- .expect("one chunk in, one chunk out")
+ // Potentially prune out this partition if the partition
+ // template & derived partition key can be used to match
+ // against the optional predicate.
+ if predicate
+ .as_ref()
+ .map(|p| {
+ !keep_after_pruning_partition_key(
+ &table_partition_template,
+ &partition_key,
+ p,
+ &data,
+ )
})
- .unwrap_or(true);
-
- if !keep_after_pruning {
- return PartitionResponse::new(
- vec![],
- id,
- hash_id,
- completed_persistence_count,
- );
- }
+ .unwrap_or_default()
+ {
+ return PartitionResponse::new(
+ vec![],
+ id,
+ hash_id,
+ completed_persistence_count,
+ );
}
// Project the data if necessary
@@ -408,6 +335,106 @@ where
}
}
+/// Return true if `data` contains one or more rows matching `predicate`,
+/// pruning based on the `partition_key` and `template`.
+///
+/// Returns false iff it can be proven that all of data does not match the
+/// predicate.
+fn keep_after_pruning_partition_key(
+ table_partition_template: &TablePartitionTemplateOverride,
+ partition_key: &PartitionKey,
+ predicate: &Predicate,
+ data: &QueryAdaptor,
+) -> bool {
+ // Construct a set of per-column min/max statistics based on the partition
+ // key values.
+ let column_ranges = Arc::new(
+ build_column_values(table_partition_template, partition_key.inner())
+ .filter_map(|(col, val)| {
+ let range = match val {
+ ColumnValue::Identity(s) => {
+ let s = Arc::new(ScalarValue::from(s.as_ref()));
+ ColumnRange {
+ min_value: Arc::clone(&s),
+ max_value: s,
+ }
+ }
+ ColumnValue::Prefix(p) if p.is_empty() => return None,
+ ColumnValue::Prefix(p) => {
+ // If the partition only has a prefix of the tag value
+ // (it was truncated) then form a conservative range:
+ //
+ // # Minimum
+ // Use the prefix itself.
+ //
+ // Note that the minimum is inclusive.
+ //
+ // All values in the partition are either:
+ //
+ // - identical to the prefix, in which case they are
+ // included by the inclusive minimum
+ //
+ // - have the form `"<prefix><s>"`, and it holds that
+ // `"<prefix><s>" > "<prefix>"` for all strings
+ // `"<s>"`.
+ //
+ // # Maximum
+ // Use `"<prefix_excluding_last_char><char::max>"`.
+ //
+ // Note that the maximum is inclusive.
+ //
+ // All strings in this partition must be smaller than
+ // this constructed maximum, because string comparison
+ // is front-to-back and the
+ // `"<prefix_excluding_last_char><char::max>" >
+ // "<prefix>"`.
+
+ let min_value = Arc::new(ScalarValue::from(p.as_ref()));
+
+ let mut chars = p.as_ref().chars().collect::<Vec<_>>();
+ *chars.last_mut().expect("checked that prefix is not empty") =
+ std::char::MAX;
+ let max_value = Arc::new(ScalarValue::from(
+ chars.into_iter().collect::<String>().as_str(),
+ ));
+
+ ColumnRange {
+ min_value,
+ max_value,
+ }
+ }
+ };
+
+ Some((Arc::from(col), range))
+ })
+ .collect::<HashMap<_, _>>(),
+ );
+
+ let chunk_statistics = Arc::new(create_chunk_statistics(
+ data.num_rows(),
+ data.schema(),
+ data.ts_min_max(),
+ &column_ranges,
+ ));
+
+ prune_summaries(
+ data.schema(),
+ &[(chunk_statistics, data.schema().as_arrow())],
+ predicate,
+ )
+ // Errors are logged by `iox_query` and sometimes fine, e.g. for not
+ // implemented DataFusion features or upstream bugs. The querier uses the
+ // same strategy. Pruning is a mere optimization and should not lead to
+ // crashes or unreadable data.
+ .ok()
+ .map(|vals| {
+ vals.into_iter()
+ .next()
+ .expect("one chunk in, one chunk out")
+ })
+ .unwrap_or(true)
+}
+
#[cfg(test)]
mod tests {
use std::sync::Arc;
|
a017d1d7f999818601c38873f692a93672cf56f4
|
Dom Dwyer
|
2023-08-03 18:02:33
|
simplify integration test setup
|
Remove the redundant async mutex (previously required, but I refactored
the code to make it unnecessary) and DRY the node setup.
| null |
test: simplify integration test setup
Remove the redundant async mutex (previously required, but I refactored
the code to make it unnecessary) and DRY the node setup.
|
diff --git a/router/src/gossip/mod.rs b/router/src/gossip/mod.rs
index d808ffbd6b..75f02a0960 100644
--- a/router/src/gossip/mod.rs
+++ b/router/src/gossip/mod.rs
@@ -80,7 +80,6 @@ mod tests {
};
use gossip::Dispatcher;
use test_helpers::timeout::FutureTimeout;
- use tokio::sync::Mutex;
use crate::namespace_cache::{MemoryNamespaceCache, NamespaceCache};
@@ -89,14 +88,14 @@ mod tests {
schema_change_observer::SchemaChangeObserver, traits::SchemaBroadcast,
};
- #[derive(Debug, Default)]
+ #[derive(Debug)]
struct GossipPipe {
- dispatcher: Mutex<Option<GossipMessageDispatcher>>,
+ dispatcher: GossipMessageDispatcher,
}
impl GossipPipe {
- async fn set_dispatcher(&self, dispatcher: GossipMessageDispatcher) {
- *self.dispatcher.lock().await = Some(dispatcher);
+ fn new(dispatcher: GossipMessageDispatcher) -> Self {
+ Self { dispatcher }
}
}
@@ -104,42 +103,42 @@ mod tests {
impl SchemaBroadcast for Arc<GossipPipe> {
async fn broadcast(&self, payload: Vec<u8>) {
self.dispatcher
- .lock()
- .await
- .as_mut()
- .unwrap()
.dispatch(payload.into())
.with_timeout_panic(Duration::from_secs(5))
.await;
}
}
- // Place a new namespace with a table and column into node A, and check it
- // becomes readable on node B.
- //
- // This is an integration test of the various schema gossip components.
- #[tokio::test]
- async fn test_integration() {
- // Two adaptors that will plug one "node" into the other.
- let gossip_a = Arc::new(GossipPipe::default());
- let gossip_b = Arc::new(GossipPipe::default());
-
+ /// Return a pair of "nodes" (independent caches) layered in the various
+ /// gossip components, with a mock gossip communication layer.
+ fn new_node_pair() -> (impl NamespaceCache, impl NamespaceCache) {
// Setup a cache for node A and wrap it in the gossip layer.
let node_a_cache = Arc::new(MemoryNamespaceCache::default());
let dispatcher_a = Arc::new(NamespaceSchemaGossip::new(Arc::clone(&node_a_cache)));
let dispatcher_a = GossipMessageDispatcher::new(dispatcher_a, 100);
- let node_a = SchemaChangeObserver::new(Arc::clone(&node_a_cache), Arc::clone(&gossip_b));
+ let gossip_a = Arc::new(GossipPipe::new(dispatcher_a));
// Setup a cache for node B.
-
let node_b_cache = Arc::new(MemoryNamespaceCache::default());
let dispatcher_b = Arc::new(NamespaceSchemaGossip::new(Arc::clone(&node_b_cache)));
let dispatcher_b = GossipMessageDispatcher::new(dispatcher_b, 100);
- let node_b = SchemaChangeObserver::new(Arc::clone(&node_b_cache), Arc::clone(&gossip_b));
+ let gossip_b = Arc::new(GossipPipe::new(dispatcher_b));
+
+ // Connect the two nodes via adaptors that will plug one "node" into the
+ // other.
+ let node_a = SchemaChangeObserver::new(Arc::clone(&node_a_cache), Arc::clone(&gossip_b));
+ let node_b = SchemaChangeObserver::new(Arc::clone(&node_b_cache), Arc::clone(&gossip_a));
+
+ (node_a, node_b)
+ }
- // Connect them together
- gossip_a.set_dispatcher(dispatcher_a).await;
- gossip_b.set_dispatcher(dispatcher_b).await;
+ // Place a new namespace with a table and column into node A, and check it
+ // becomes readable on node B.
+ //
+ // This is an integration test of the various schema gossip components.
+ #[tokio::test]
+ async fn test_integration() {
+ let (node_a, node_b) = new_node_pair();
// Fill in a table with a column to insert into A
let mut tables = BTreeMap::new();
@@ -195,26 +194,7 @@ mod tests {
// As above, but ensuring default partition templates propagate correctly.
#[tokio::test]
async fn test_integration_default_partition_templates() {
- // Two adaptors that will plug one "node" into the other.
- let gossip_a = Arc::new(GossipPipe::default());
- let gossip_b = Arc::new(GossipPipe::default());
-
- // Setup a cache for node A and wrap it in the gossip layer.
- let node_a_cache = Arc::new(MemoryNamespaceCache::default());
- let dispatcher_a = Arc::new(NamespaceSchemaGossip::new(Arc::clone(&node_a_cache)));
- let dispatcher_a = GossipMessageDispatcher::new(dispatcher_a, 100);
- let node_a = SchemaChangeObserver::new(Arc::clone(&node_a_cache), Arc::clone(&gossip_b));
-
- // Setup a cache for node B.
-
- let node_b_cache = Arc::new(MemoryNamespaceCache::default());
- let dispatcher_b = Arc::new(NamespaceSchemaGossip::new(Arc::clone(&node_b_cache)));
- let dispatcher_b = GossipMessageDispatcher::new(dispatcher_b, 100);
- let node_b = SchemaChangeObserver::new(Arc::clone(&node_b_cache), Arc::clone(&gossip_b));
-
- // Connect them together
- gossip_a.set_dispatcher(dispatcher_a).await;
- gossip_b.set_dispatcher(dispatcher_b).await;
+ let (node_a, node_b) = new_node_pair();
// Fill in a table with a column to insert into A
let mut tables = BTreeMap::new();
|
2f2fcb6f05375d7565e0b17c17ad379a93e69dc6
|
Martin Hilton
|
2023-07-20 07:19:02
|
make DERIVATIVE a user-defined window function (#8265)
|
Now that user-defined window functions are available, change the
DERIVATIVE and NON_NEGATIVE_DERIVATIVE function implementations to
use user-defined windows functions. This should improve performance
by allowing the entire window to be processed in one go, rather
than processing one row at a time.
The implementation is also moved out of the planner module alongside
the other user-defined window functions.
|
Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
|
refactor(influxql): make DERIVATIVE a user-defined window function (#8265)
Now that user-defined window functions are available, change the
DERIVATIVE and NON_NEGATIVE_DERIVATIVE function implementations to
use user-defined windows functions. This should improve performance
by allowing the entire window to be processed in one go, rather
than processing one row at a time.
The implementation is also moved out of the planner module alongside
the other user-defined window functions.
Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
|
diff --git a/iox_query_influxql/src/plan/planner.rs b/iox_query_influxql/src/plan/planner.rs
index 8936106e62..ab9e78b006 100644
--- a/iox_query_influxql/src/plan/planner.rs
+++ b/iox_query_influxql/src/plan/planner.rs
@@ -9,7 +9,7 @@ use crate::plan::planner::select::{
};
use crate::plan::planner_time_range_expression::time_range_to_df_expr;
use crate::plan::rewriter::{find_table_names, rewrite_statement, ProjectionType};
-use crate::plan::udaf::{derivative_udf, non_negative_derivative_udf, MOVING_AVERAGE};
+use crate::plan::udaf::MOVING_AVERAGE;
use crate::plan::udf::{
cumulative_sum, derivative, difference, find_window_udfs, moving_average,
non_negative_derivative, non_negative_difference,
@@ -17,7 +17,10 @@ use crate::plan::udf::{
use crate::plan::util::{binary_operator_to_df_operator, rebase_expr, IQLSchema};
use crate::plan::var_ref::var_ref_data_type_to_data_type;
use crate::plan::{planner_rewrite_expression, udf, util_copy};
-use crate::window::{CUMULATIVE_SUM, DIFFERENCE, NON_NEGATIVE_DIFFERENCE, PERCENT_ROW_NUMBER};
+use crate::window::{
+ CUMULATIVE_SUM, DERIVATIVE, DIFFERENCE, NON_NEGATIVE_DERIVATIVE, NON_NEGATIVE_DIFFERENCE,
+ PERCENT_ROW_NUMBER,
+};
use arrow::array::{StringBuilder, StringDictionaryBuilder};
use arrow::datatypes::{DataType, Field as ArrowField, Int32Type, Schema as ArrowSchema};
use arrow::record_batch::RecordBatch;
@@ -1444,17 +1447,17 @@ impl<'a> InfluxQLToLogicalPlan<'a> {
return error::internal(format!("udf_to_expr: unexpected expression: {e}"))
};
- fn derivative_unit(ctx: &Context<'_>, args: &Vec<Expr>) -> Result<i64> {
+ fn derivative_unit(ctx: &Context<'_>, args: &Vec<Expr>) -> Result<ScalarValue> {
if args.len() > 1 {
- if let Expr::Literal(ScalarValue::IntervalMonthDayNano(Some(v))) = args[1] {
- Ok(v as i64)
+ if let Expr::Literal(v) = &args[1] {
+ Ok(v.clone())
} else {
error::internal(format!("udf_to_expr: unexpected expression: {}", args[1]))
}
} else if let Some(interval) = ctx.interval {
- Ok(interval.duration)
+ Ok(ScalarValue::new_interval_mdn(0, 0, interval.duration))
} else {
- Ok(1000000000) // 1s
+ Ok(ScalarValue::new_interval_mdn(0, 0, 1_000_000_000)) // 1s
}
}
@@ -1498,31 +1501,35 @@ impl<'a> InfluxQLToLogicalPlan<'a> {
.alias(alias))
}
Some(udf::WindowFunction::Derivative) => Ok(Expr::WindowFunction(WindowFunction {
- fun: window_function::WindowFunction::AggregateUDF(
- derivative_udf(derivative_unit(ctx, &args)?).into(),
- ),
- args: vec!["time".as_expr(), args[0].clone()],
+ fun: DERIVATIVE.clone(),
+ args: vec![
+ args[0].clone(),
+ lit(derivative_unit(ctx, &args)?),
+ "time".as_expr(),
+ ],
partition_by,
order_by,
window_frame: WindowFrame {
units: WindowFrameUnits::Rows,
start_bound: WindowFrameBound::Preceding(ScalarValue::Null),
- end_bound: WindowFrameBound::CurrentRow,
+ end_bound: WindowFrameBound::Following(ScalarValue::Null),
},
})
.alias(alias)),
Some(udf::WindowFunction::NonNegativeDerivative) => {
Ok(Expr::WindowFunction(WindowFunction {
- fun: window_function::WindowFunction::AggregateUDF(
- non_negative_derivative_udf(derivative_unit(ctx, &args)?).into(),
- ),
- args: vec!["time".as_expr(), args[0].clone()],
+ fun: NON_NEGATIVE_DERIVATIVE.clone(),
+ args: vec![
+ args[0].clone(),
+ lit(derivative_unit(ctx, &args)?),
+ "time".as_expr(),
+ ],
partition_by,
order_by,
window_frame: WindowFrame {
units: WindowFrameUnits::Rows,
start_bound: WindowFrameBound::Preceding(ScalarValue::Null),
- end_bound: WindowFrameBound::CurrentRow,
+ end_bound: WindowFrameBound::Following(ScalarValue::Null),
},
})
.alias(alias))
@@ -3969,7 +3976,7 @@ mod test {
Projection: Dictionary(Int32, Utf8("cpu")) AS iox::measurement, time, derivative [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), derivative:Float64;N]
Filter: NOT derivative IS NULL [time:Timestamp(Nanosecond, None), derivative:Float64;N]
Projection: cpu.time AS time, derivative(cpu.usage_idle) AS derivative [time:Timestamp(Nanosecond, None), derivative:Float64;N]
- WindowAggr: windowExpr=[[AggregateUDF { name: "derivative(unit: 1000000000)", signature: Signature { type_signature: OneOf([Exact([Timestamp(Nanosecond, None), Int64]), Exact([Timestamp(Nanosecond, None), UInt64]), Exact([Timestamp(Nanosecond, None), Float64])]), volatility: Immutable }, fun: "<FUNC>" }(cpu.time, cpu.usage_idle) ORDER BY [cpu.time ASC NULLS LAST] ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW AS derivative(cpu.usage_idle)]] [cpu:Dictionary(Int32, Utf8);N, host:Dictionary(Int32, Utf8);N, region:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), usage_idle:Float64;N, usage_system:Float64;N, usage_user:Float64;N, derivative(cpu.usage_idle):Float64;N]
+ WindowAggr: windowExpr=[[derivative(cpu.usage_idle, IntervalMonthDayNano("1000000000"), cpu.time) ORDER BY [cpu.time ASC NULLS LAST] ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING AS derivative(cpu.usage_idle)]] [cpu:Dictionary(Int32, Utf8);N, host:Dictionary(Int32, Utf8);N, region:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), usage_idle:Float64;N, usage_system:Float64;N, usage_user:Float64;N, derivative(cpu.usage_idle):Float64;N]
TableScan: cpu [cpu:Dictionary(Int32, Utf8);N, host:Dictionary(Int32, Utf8);N, region:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), usage_idle:Float64;N, usage_system:Float64;N, usage_user:Float64;N]
"###);
@@ -3979,7 +3986,7 @@ mod test {
Projection: Dictionary(Int32, Utf8("cpu")) AS iox::measurement, time, derivative [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None);N, derivative:Float64;N]
Filter: NOT derivative IS NULL [time:Timestamp(Nanosecond, None);N, derivative:Float64;N]
Projection: time, derivative(AVG(cpu.usage_idle)) AS derivative [time:Timestamp(Nanosecond, None);N, derivative:Float64;N]
- WindowAggr: windowExpr=[[AggregateUDF { name: "derivative(unit: 10000000000)", signature: Signature { type_signature: OneOf([Exact([Timestamp(Nanosecond, None), Int64]), Exact([Timestamp(Nanosecond, None), UInt64]), Exact([Timestamp(Nanosecond, None), Float64])]), volatility: Immutable }, fun: "<FUNC>" }(time, AVG(cpu.usage_idle)) ORDER BY [time ASC NULLS LAST] ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW AS derivative(AVG(cpu.usage_idle))]] [time:Timestamp(Nanosecond, None);N, AVG(cpu.usage_idle):Float64;N, derivative(AVG(cpu.usage_idle)):Float64;N]
+ WindowAggr: windowExpr=[[derivative(AVG(cpu.usage_idle), IntervalMonthDayNano("10000000000"), time) ORDER BY [time ASC NULLS LAST] ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING AS derivative(AVG(cpu.usage_idle))]] [time:Timestamp(Nanosecond, None);N, AVG(cpu.usage_idle):Float64;N, derivative(AVG(cpu.usage_idle)):Float64;N]
GapFill: groupBy=[time], aggr=[[AVG(cpu.usage_idle)]], time_column=time, stride=IntervalMonthDayNano("10000000000"), range=Unbounded..Included(Literal(TimestampNanosecond(1672531200000000000, None))) [time:Timestamp(Nanosecond, None);N, AVG(cpu.usage_idle):Float64;N]
Aggregate: groupBy=[[date_bin(IntervalMonthDayNano("10000000000"), cpu.time, TimestampNanosecond(0, None)) AS time]], aggr=[[AVG(cpu.usage_idle)]] [time:Timestamp(Nanosecond, None);N, AVG(cpu.usage_idle):Float64;N]
Filter: cpu.time <= TimestampNanosecond(1672531200000000000, None) [cpu:Dictionary(Int32, Utf8);N, host:Dictionary(Int32, Utf8);N, region:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), usage_idle:Float64;N, usage_system:Float64;N, usage_user:Float64;N]
@@ -3995,7 +4002,7 @@ mod test {
Projection: Dictionary(Int32, Utf8("cpu")) AS iox::measurement, time, non_negative_derivative [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), non_negative_derivative:Float64;N]
Filter: NOT non_negative_derivative IS NULL [time:Timestamp(Nanosecond, None), non_negative_derivative:Float64;N]
Projection: cpu.time AS time, non_negative_derivative(cpu.usage_idle) AS non_negative_derivative [time:Timestamp(Nanosecond, None), non_negative_derivative:Float64;N]
- WindowAggr: windowExpr=[[AggregateUDF { name: "non_negative_derivative(unit: 1000000000)", signature: Signature { type_signature: OneOf([Exact([Timestamp(Nanosecond, None), Int64]), Exact([Timestamp(Nanosecond, None), UInt64]), Exact([Timestamp(Nanosecond, None), Float64])]), volatility: Immutable }, fun: "<FUNC>" }(cpu.time, cpu.usage_idle) ORDER BY [cpu.time ASC NULLS LAST] ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW AS non_negative_derivative(cpu.usage_idle)]] [cpu:Dictionary(Int32, Utf8);N, host:Dictionary(Int32, Utf8);N, region:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), usage_idle:Float64;N, usage_system:Float64;N, usage_user:Float64;N, non_negative_derivative(cpu.usage_idle):Float64;N]
+ WindowAggr: windowExpr=[[non_negative_derivative(cpu.usage_idle, IntervalMonthDayNano("1000000000"), cpu.time) ORDER BY [cpu.time ASC NULLS LAST] ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING AS non_negative_derivative(cpu.usage_idle)]] [cpu:Dictionary(Int32, Utf8);N, host:Dictionary(Int32, Utf8);N, region:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), usage_idle:Float64;N, usage_system:Float64;N, usage_user:Float64;N, non_negative_derivative(cpu.usage_idle):Float64;N]
TableScan: cpu [cpu:Dictionary(Int32, Utf8);N, host:Dictionary(Int32, Utf8);N, region:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), usage_idle:Float64;N, usage_system:Float64;N, usage_user:Float64;N]
"###);
@@ -4005,7 +4012,7 @@ mod test {
Projection: Dictionary(Int32, Utf8("cpu")) AS iox::measurement, time, non_negative_derivative [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None);N, non_negative_derivative:Float64;N]
Filter: NOT non_negative_derivative IS NULL [time:Timestamp(Nanosecond, None);N, non_negative_derivative:Float64;N]
Projection: time, non_negative_derivative(AVG(cpu.usage_idle)) AS non_negative_derivative [time:Timestamp(Nanosecond, None);N, non_negative_derivative:Float64;N]
- WindowAggr: windowExpr=[[AggregateUDF { name: "non_negative_derivative(unit: 10000000000)", signature: Signature { type_signature: OneOf([Exact([Timestamp(Nanosecond, None), Int64]), Exact([Timestamp(Nanosecond, None), UInt64]), Exact([Timestamp(Nanosecond, None), Float64])]), volatility: Immutable }, fun: "<FUNC>" }(time, AVG(cpu.usage_idle)) ORDER BY [time ASC NULLS LAST] ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW AS non_negative_derivative(AVG(cpu.usage_idle))]] [time:Timestamp(Nanosecond, None);N, AVG(cpu.usage_idle):Float64;N, non_negative_derivative(AVG(cpu.usage_idle)):Float64;N]
+ WindowAggr: windowExpr=[[non_negative_derivative(AVG(cpu.usage_idle), IntervalMonthDayNano("10000000000"), time) ORDER BY [time ASC NULLS LAST] ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING AS non_negative_derivative(AVG(cpu.usage_idle))]] [time:Timestamp(Nanosecond, None);N, AVG(cpu.usage_idle):Float64;N, non_negative_derivative(AVG(cpu.usage_idle)):Float64;N]
GapFill: groupBy=[time], aggr=[[AVG(cpu.usage_idle)]], time_column=time, stride=IntervalMonthDayNano("10000000000"), range=Unbounded..Included(Literal(TimestampNanosecond(1672531200000000000, None))) [time:Timestamp(Nanosecond, None);N, AVG(cpu.usage_idle):Float64;N]
Aggregate: groupBy=[[date_bin(IntervalMonthDayNano("10000000000"), cpu.time, TimestampNanosecond(0, None)) AS time]], aggr=[[AVG(cpu.usage_idle)]] [time:Timestamp(Nanosecond, None);N, AVG(cpu.usage_idle):Float64;N]
Filter: cpu.time <= TimestampNanosecond(1672531200000000000, None) [cpu:Dictionary(Int32, Utf8);N, host:Dictionary(Int32, Utf8);N, region:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), usage_idle:Float64;N, usage_system:Float64;N, usage_user:Float64;N]
diff --git a/iox_query_influxql/src/plan/udaf.rs b/iox_query_influxql/src/plan/udaf.rs
index a83b1c5226..3b0eaed6a6 100644
--- a/iox_query_influxql/src/plan/udaf.rs
+++ b/iox_query_influxql/src/plan/udaf.rs
@@ -1,13 +1,12 @@
use crate::{error, NUMERICS};
use arrow::array::{Array, ArrayRef, Int64Array};
-use arrow::datatypes::{DataType, TimeUnit};
+use arrow::datatypes::DataType;
use datafusion::common::{downcast_value, DataFusionError, Result, ScalarValue};
use datafusion::logical_expr::{
Accumulator, AccumulatorFactoryFunction, AggregateUDF, ReturnTypeFunction, Signature,
StateTypeFunction, TypeSignature, Volatility,
};
use once_cell::sync::Lazy;
-use std::mem::replace;
use std::sync::Arc;
/// Name of the `MOVING_AVERAGE` user-defined aggregate function.
@@ -148,222 +147,3 @@ impl Accumulator for AvgNAccumulator {
- std::mem::size_of_val(&self.data_type)
}
}
-
-/// NonNegative is a wrapper around an Accumulator that transposes
-/// negative value to be NULL.
-#[derive(Debug)]
-struct NonNegative<T> {
- acc: T,
-}
-
-impl<T> NonNegative<T> {
- fn new(acc: T) -> Self {
- Self { acc }
- }
-}
-
-impl<T: Accumulator> Accumulator for NonNegative<T> {
- fn state(&self) -> Result<Vec<ScalarValue>> {
- self.acc.state()
- }
-
- fn update_batch(&mut self, values: &[ArrayRef]) -> Result<()> {
- self.acc.update_batch(values)
- }
-
- fn merge_batch(&mut self, states: &[ArrayRef]) -> Result<()> {
- self.acc.merge_batch(states)
- }
-
- fn evaluate(&self) -> Result<ScalarValue> {
- Ok(match self.acc.evaluate()? {
- ScalarValue::Float64(Some(v)) if v < 0.0 => ScalarValue::Float64(None),
- ScalarValue::Int64(Some(v)) if v < 0 => ScalarValue::Int64(None),
- v => v,
- })
- }
-
- fn size(&self) -> usize {
- self.acc.size()
- }
-}
-
-/// Name of the `DERIVATIVE` user-defined aggregate function.
-pub(crate) const DERIVATIVE_NAME: &str = "derivative";
-
-pub(crate) fn derivative_udf(unit: i64) -> AggregateUDF {
- let return_type: ReturnTypeFunction = Arc::new(|_| Ok(Arc::new(DataType::Float64)));
- let accumulator: AccumulatorFactoryFunction =
- Arc::new(move |_| Ok(Box::new(DerivativeAccumulator::new(unit))));
- let state_type: StateTypeFunction = Arc::new(|_| Ok(Arc::new(vec![])));
- let sig = Signature::one_of(
- NUMERICS
- .iter()
- .map(|dt| {
- TypeSignature::Exact(vec![
- DataType::Timestamp(TimeUnit::Nanosecond, None),
- dt.clone(),
- ])
- })
- .collect(),
- Volatility::Immutable,
- );
- AggregateUDF::new(
- format!("{DERIVATIVE_NAME}(unit: {unit})").as_str(),
- &sig,
- &return_type,
- &accumulator,
- // State shouldn't be called, so no schema to report
- &state_type,
- )
-}
-
-/// Name of the `NON_NEGATIVE_DERIVATIVE` user-defined aggregate function.
-pub(crate) const NON_NEGATIVE_DERIVATIVE_NAME: &str = "non_negative_derivative";
-
-pub(crate) fn non_negative_derivative_udf(unit: i64) -> AggregateUDF {
- let return_type: ReturnTypeFunction = Arc::new(|_| Ok(Arc::new(DataType::Float64)));
- let accumulator: AccumulatorFactoryFunction = Arc::new(move |_| {
- Ok(Box::new(NonNegative::<_>::new(DerivativeAccumulator::new(
- unit,
- ))))
- });
- let state_type: StateTypeFunction = Arc::new(|_| Ok(Arc::new(vec![])));
- let sig = Signature::one_of(
- NUMERICS
- .iter()
- .map(|dt| {
- TypeSignature::Exact(vec![
- DataType::Timestamp(TimeUnit::Nanosecond, None),
- dt.clone(),
- ])
- })
- .collect(),
- Volatility::Immutable,
- );
- AggregateUDF::new(
- format!("{NON_NEGATIVE_DERIVATIVE_NAME}(unit: {unit})").as_str(),
- &sig,
- &return_type,
- &accumulator,
- // State shouldn't be called, so no schema to report
- &state_type,
- )
-}
-
-#[derive(Debug)]
-struct DerivativeAccumulator {
- unit: i64,
- prev: Option<Point>,
- curr: Option<Point>,
-}
-
-impl DerivativeAccumulator {
- fn new(unit: i64) -> Self {
- Self {
- unit,
- prev: None,
- curr: None,
- }
- }
-}
-
-impl Accumulator for DerivativeAccumulator {
- /// `state` is only called when used as an aggregate function. It can be
- /// can safely left unimplemented, as this accumulator is only used as a window aggregate.
- ///
- /// See: <https://docs.rs/datafusion/latest/datafusion/physical_plan/trait.Accumulator.html#tymethod.state>
- fn state(&self) -> Result<Vec<ScalarValue>> {
- error::internal("unexpected call to DerivativeAccumulator::state")
- }
-
- fn update_batch(&mut self, values: &[ArrayRef]) -> Result<()> {
- if values.is_empty() {
- return Ok(());
- }
-
- let times = &values[0];
- let arr = &values[1];
- for index in 0..arr.len() {
- let time = match ScalarValue::try_from_array(times, index)? {
- ScalarValue::TimestampNanosecond(Some(ts), _) => ts,
- v => {
- return Err(DataFusionError::Internal(format!(
- "invalid time value: {}",
- v
- )))
- }
- };
- let curr = Point::new(time, ScalarValue::try_from_array(arr, index)?);
- let prev = replace(&mut self.curr, curr);
-
- // don't replace the previous value if the current value has the same timestamp.
- if self.prev.is_none()
- || prev
- .as_ref()
- .is_some_and(|prev| prev.time > self.prev.as_ref().unwrap().time)
- {
- self.prev = prev
- }
- }
- Ok(())
- }
-
- /// `merge_batch` is only called when used as an aggregate function. It can be
- /// can safely left unimplemented, as this accumulator is only used as a window aggregate.
- ///
- /// See: <https://docs.rs/datafusion/latest/datafusion/physical_plan/trait.Accumulator.html#tymethod.state>
- fn merge_batch(&mut self, _states: &[ArrayRef]) -> Result<()> {
- error::internal("unexpected call to DerivativeAccumulator::merge_batch")
- }
-
- fn evaluate(&self) -> Result<ScalarValue> {
- Ok(ScalarValue::Float64(
- self.curr
- .as_ref()
- .and_then(|c| c.derivative(self.prev.as_ref(), self.unit)),
- ))
- }
-
- fn size(&self) -> usize {
- std::mem::size_of_val(self)
- }
-}
-
-#[derive(Debug)]
-struct Point {
- time: i64,
- value: ScalarValue,
-}
-
-impl Point {
- fn new(time: i64, value: ScalarValue) -> Option<Self> {
- if value.is_null() {
- None
- } else {
- Some(Self { time, value })
- }
- }
-
- fn value_as_f64(&self) -> f64 {
- match self.value {
- ScalarValue::Int64(Some(v)) => v as f64,
- ScalarValue::Float64(Some(v)) => v,
- ScalarValue::UInt64(Some(v)) => v as f64,
- _ => panic!("invalid point {:?}", self),
- }
- }
-
- fn derivative(&self, prev: Option<&Self>, unit: i64) -> Option<f64> {
- prev.and_then(|prev| {
- let diff = self.value_as_f64() - prev.value_as_f64();
- let elapsed = match self.time - prev.time {
- // if the time hasn't changed then it is a NULL.
- 0 => return None,
- v => v,
- } as f64;
- let devisor = elapsed / (unit as f64);
- Some(diff / devisor)
- })
- }
-}
diff --git a/iox_query_influxql/src/plan/udf.rs b/iox_query_influxql/src/plan/udf.rs
index fe9fa7196e..e61e65af9b 100644
--- a/iox_query_influxql/src/plan/udf.rs
+++ b/iox_query_influxql/src/plan/udf.rs
@@ -7,7 +7,7 @@
use crate::plan::util_copy::find_exprs_in_exprs;
use crate::{error, NUMERICS};
-use arrow::datatypes::DataType;
+use arrow::datatypes::{DataType, TimeUnit};
use datafusion::logical_expr::{
Expr, ReturnTypeFunction, ScalarFunctionImplementation, ScalarUDF, Signature, TypeSignature,
Volatility,
@@ -131,13 +131,21 @@ pub(crate) fn derivative(args: Vec<Expr>) -> Expr {
/// Definition of the `DERIVATIVE` function.
static DERIVATIVE: Lazy<Arc<ScalarUDF>> = Lazy::new(|| {
- let return_type_fn: ReturnTypeFunction = Arc::new(|args| Ok(Arc::new(args[0].clone())));
+ let return_type_fn: ReturnTypeFunction = Arc::new(|_| Ok(Arc::new(DataType::Float64)));
Arc::new(ScalarUDF::new(
DERIVATIVE_UDF_NAME,
&Signature::one_of(
NUMERICS
.iter()
- .map(|dt| TypeSignature::Exact(vec![dt.clone()]))
+ .flat_map(|dt| {
+ vec![
+ TypeSignature::Exact(vec![dt.clone()]),
+ TypeSignature::Exact(vec![
+ dt.clone(),
+ DataType::Duration(TimeUnit::Nanosecond),
+ ]),
+ ]
+ })
.collect(),
Volatility::Immutable,
),
@@ -155,13 +163,21 @@ pub(crate) fn non_negative_derivative(args: Vec<Expr>) -> Expr {
/// Definition of the `NON_NEGATIVE_DERIVATIVE` function.
static NON_NEGATIVE_DERIVATIVE: Lazy<Arc<ScalarUDF>> = Lazy::new(|| {
- let return_type_fn: ReturnTypeFunction = Arc::new(|args| Ok(Arc::new(args[0].clone())));
+ let return_type_fn: ReturnTypeFunction = Arc::new(|_| Ok(Arc::new(DataType::Float64)));
Arc::new(ScalarUDF::new(
NON_NEGATIVE_DERIVATIVE_UDF_NAME,
&Signature::one_of(
NUMERICS
.iter()
- .map(|dt| TypeSignature::Exact(vec![dt.clone()]))
+ .flat_map(|dt| {
+ vec![
+ TypeSignature::Exact(vec![dt.clone()]),
+ TypeSignature::Exact(vec![
+ dt.clone(),
+ DataType::Duration(TimeUnit::Nanosecond),
+ ]),
+ ]
+ })
.collect(),
Volatility::Immutable,
),
diff --git a/iox_query_influxql/src/window.rs b/iox_query_influxql/src/window.rs
index 073c889d1f..9e04438780 100644
--- a/iox_query_influxql/src/window.rs
+++ b/iox_query_influxql/src/window.rs
@@ -7,6 +7,7 @@ use once_cell::sync::Lazy;
use std::sync::Arc;
mod cumulative_sum;
+mod derivative;
mod difference;
mod non_negative;
mod percent_row_number;
@@ -25,6 +26,20 @@ pub(crate) static CUMULATIVE_SUM: Lazy<WindowFunction> = Lazy::new(|| {
)))
});
+/// Definition of the `DERIVATIVE` user-defined window function.
+pub(crate) static DERIVATIVE: Lazy<WindowFunction> = Lazy::new(|| {
+ let return_type: ReturnTypeFunction = Arc::new(derivative::return_type);
+ let partition_evaluator_factory: PartitionEvaluatorFactory =
+ Arc::new(derivative::partition_evaluator_factory);
+
+ WindowFunction::WindowUDF(Arc::new(WindowUDF::new(
+ derivative::NAME,
+ &derivative::SIGNATURE,
+ &return_type,
+ &partition_evaluator_factory,
+ )))
+});
+
/// Definition of the `DIFFERENCE` user-defined window function.
pub(crate) static DIFFERENCE: Lazy<WindowFunction> = Lazy::new(|| {
let return_type: ReturnTypeFunction = Arc::new(difference::return_type);
@@ -39,6 +54,25 @@ pub(crate) static DIFFERENCE: Lazy<WindowFunction> = Lazy::new(|| {
)))
});
+const NON_NEGATIVE_DERIVATIVE_NAME: &str = "non_negative_derivative";
+
+/// Definition of the `NON_NEGATIVE_DERIVATIVE` user-defined window function.
+pub(crate) static NON_NEGATIVE_DERIVATIVE: Lazy<WindowFunction> = Lazy::new(|| {
+ let return_type: ReturnTypeFunction = Arc::new(derivative::return_type);
+ let partition_evaluator_factory: PartitionEvaluatorFactory = Arc::new(|| {
+ Ok(non_negative::wrapper(
+ derivative::partition_evaluator_factory()?,
+ ))
+ });
+
+ WindowFunction::WindowUDF(Arc::new(WindowUDF::new(
+ NON_NEGATIVE_DERIVATIVE_NAME,
+ &derivative::SIGNATURE,
+ &return_type,
+ &partition_evaluator_factory,
+ )))
+});
+
const NON_NEGATIVE_DIFFERENCE_NAME: &str = "non_negative_difference";
/// Definition of the `NON_NEGATIVE_DIFFERENCE` user-defined window function.
diff --git a/iox_query_influxql/src/window/derivative.rs b/iox_query_influxql/src/window/derivative.rs
new file mode 100644
index 0000000000..4a486275c5
--- /dev/null
+++ b/iox_query_influxql/src/window/derivative.rs
@@ -0,0 +1,125 @@
+use crate::{error, NUMERICS};
+use arrow::array::{Array, ArrayRef};
+use arrow::datatypes::{DataType, TimeUnit};
+use datafusion::common::{Result, ScalarValue};
+use datafusion::logical_expr::{PartitionEvaluator, Signature, TypeSignature, Volatility};
+use once_cell::sync::Lazy;
+use std::borrow::Borrow;
+use std::sync::Arc;
+
+/// The name of the derivative window function.
+pub(super) const NAME: &str = "derivative";
+
+/// Valid signatures for the derivative window function.
+pub(super) static SIGNATURE: Lazy<Signature> = Lazy::new(|| {
+ Signature::one_of(
+ NUMERICS
+ .iter()
+ .map(|dt| {
+ TypeSignature::Exact(vec![
+ dt.clone(),
+ DataType::Duration(TimeUnit::Nanosecond),
+ DataType::Timestamp(TimeUnit::Nanosecond, None),
+ ])
+ })
+ .collect(),
+ Volatility::Immutable,
+ )
+});
+
+/// Calculate the return type given the function signature.
+pub(super) fn return_type(_: &[DataType]) -> Result<Arc<DataType>> {
+ Ok(Arc::new(DataType::Float64))
+}
+
+/// Create a new partition_evaluator_factory.
+pub(super) fn partition_evaluator_factory() -> Result<Box<dyn PartitionEvaluator>> {
+ Ok(Box::new(DifferencePartitionEvaluator {}))
+}
+
+/// PartitionEvaluator which returns the derivative between input values,
+/// in the provided units.
+#[derive(Debug)]
+struct DifferencePartitionEvaluator {}
+
+impl PartitionEvaluator for DifferencePartitionEvaluator {
+ fn evaluate_all(&mut self, values: &[ArrayRef], _num_rows: usize) -> Result<Arc<dyn Array>> {
+ assert_eq!(values.len(), 3);
+
+ let array = Arc::clone(&values[0]);
+ let times = Arc::clone(&values[2]);
+
+ // The second element of the values array is the second argument to
+ // the 'derivative' function. This specifies the unit duration for the
+ // derivation to use.
+ //
+ // INVARIANT:
+ // The planner guarantees that the second argument is always a duration
+ // literal.
+ let unit = ScalarValue::try_from_array(&values[1], 0)?;
+
+ let mut idx: usize = 0;
+ let mut last: ScalarValue = array.data_type().try_into()?;
+ let mut last_time: ScalarValue = times.data_type().try_into()?;
+ let mut derivative: Vec<ScalarValue> = vec![];
+
+ while idx < array.len() {
+ last = ScalarValue::try_from_array(&array, idx)?;
+ last_time = ScalarValue::try_from_array(×, idx)?;
+ derivative.push(ScalarValue::Float64(None));
+ idx += 1;
+ if !last.is_null() {
+ break;
+ }
+ }
+ while idx < array.len() {
+ let v = ScalarValue::try_from_array(&array, idx)?;
+ let t = ScalarValue::try_from_array(×, idx)?;
+ if v.is_null() {
+ derivative.push(ScalarValue::Float64(None));
+ } else {
+ derivative.push(ScalarValue::Float64(Some(
+ delta(&v, &last)? / delta_time(&t, &last_time, &unit)?,
+ )));
+ last = v.clone();
+ last_time = t.clone();
+ }
+ idx += 1;
+ }
+ Ok(Arc::new(ScalarValue::iter_to_array(derivative)?))
+ }
+
+ fn uses_window_frame(&self) -> bool {
+ false
+ }
+
+ fn include_rank(&self) -> bool {
+ false
+ }
+}
+
+fn delta(curr: &ScalarValue, prev: &ScalarValue) -> Result<f64> {
+ match (curr.borrow(), prev.borrow()) {
+ (ScalarValue::Float64(Some(curr)), ScalarValue::Float64(Some(prev))) => Ok(*curr - *prev),
+ (ScalarValue::Int64(Some(curr)), ScalarValue::Int64(Some(prev))) => {
+ Ok(*curr as f64 - *prev as f64)
+ }
+ (ScalarValue::UInt64(Some(curr)), ScalarValue::UInt64(Some(prev))) => {
+ Ok(*curr as f64 - *prev as f64)
+ }
+ _ => error::internal("derivative attempted on unsupported values"),
+ }
+}
+
+fn delta_time(curr: &ScalarValue, prev: &ScalarValue, unit: &ScalarValue) -> Result<f64> {
+ if let (
+ ScalarValue::TimestampNanosecond(Some(curr), _),
+ ScalarValue::TimestampNanosecond(Some(prev), _),
+ ScalarValue::IntervalMonthDayNano(Some(unit)),
+ ) = (curr, prev, unit)
+ {
+ Ok((*curr as f64 - *prev as f64) / *unit as f64)
+ } else {
+ error::internal("derivative attempted on unsupported values")
+ }
+}
|
f11484271102d963e7c5529f5a68c0b733604964
|
Stuart Carnie
|
2023-06-06 16:33:01
|
Push outer query time-range to subqueries
|
Added additional end-to-end tests to validate time-range behaviour
| null |
feat: Push outer query time-range to subqueries
Added additional end-to-end tests to validate time-range behaviour
|
diff --git a/influxdb_iox/tests/query_tests/cases/in/issue_6112.influxql b/influxdb_iox/tests/query_tests/cases/in/issue_6112.influxql
index ea5605174e..31d6f39da7 100644
--- a/influxdb_iox/tests/query_tests/cases/in/issue_6112.influxql
+++ b/influxdb_iox/tests/query_tests/cases/in/issue_6112.influxql
@@ -628,3 +628,14 @@ SELECT last(sum_idle), last(sum_system) FROM (SELECT sum(usage_idle) AS sum_idle
-- A possible fix is to introduce a column that assigned a unique number to
-- each subquery to ensure the results are deterministic.
-- SELECT last(*) FROM (SELECT sum(usage_idle) AS sum_idle FROM cpu WHERE cpu = 'cpu-total' GROUP BY time(10s)), (SELECT sum(usage_system) AS sum_idle FROM cpu WHERE cpu = 'cpu-total' GROUP BY time(10s)) GROUP BY time(10s) FILL(none);
+
+-- subquery time range should be restricted
+SELECT COUNT(usage_idle) FROM (SELECT cpu, usage_idle FROM cpu WHERE time >= '2022-10-31T02:00:00Z' AND time <= '2022-10-31T02:00:30Z') WHERE time >= '2022-10-31T02:00:10Z' AND time <= '2022-10-31T02:00:20Z' GROUP BY time(5s), cpu FILL(0);
+
+SELECT COUNT(usage_idle) FROM cpu WHERE time >= '2022-10-31T02:00:00Z' AND time <= '2022-10-31T02:00:05Z' AND time <= '2022-10-31T02:00:10Z' GROUP BY TIME(10s) FILL(0);
+
+SELECT min, max, max - min FROM (SELECT MIN(usage_idle), MAX(usage_system) FROM cpu GROUP BY TIME(1m));
+SELECT min, max, max - min FROM (SELECT MIN(usage_idle), MAX(usage_system) FROM cpu GROUP BY TIME(10s));
+
+-- the predicate in the outer-most query is the narrowest, and therefore pushed through all the children
+SELECT * FROM (SELECT * FROM (SELECT FIRST(usage_idle) FROM cpu WHERE time >= '2022-10-31T02:00:00Z') WHERE time >= '2022-10-31T02:00:00Z') WHERE time >= '2022-10-31T02:00:10Z';
\ No newline at end of file
diff --git a/influxdb_iox/tests/query_tests/cases/in/issue_6112.influxql.expected b/influxdb_iox/tests/query_tests/cases/in/issue_6112.influxql.expected
index 7b3d0c73b3..2a6914067d 100644
--- a/influxdb_iox/tests/query_tests/cases/in/issue_6112.influxql.expected
+++ b/influxdb_iox/tests/query_tests/cases/in/issue_6112.influxql.expected
@@ -2980,4 +2980,61 @@ name: cpu
+---------------------+------+--------+
| 2022-10-31T02:00:00 | 2.98 | 2.2 |
| 2022-10-31T02:00:10 | 2.99 | 2.1 |
-+---------------------+------+--------+
\ No newline at end of file
++---------------------+------+--------+
+-- InfluxQL: SELECT COUNT(usage_idle) FROM (SELECT cpu, usage_idle FROM cpu WHERE time >= '2022-10-31T02:00:00Z' AND time <= '2022-10-31T02:00:30Z') WHERE time >= '2022-10-31T02:00:10Z' AND time <= '2022-10-31T02:00:20Z' GROUP BY time(5s), cpu FILL(0);
+name: cpu
+tags: cpu=cpu-total
++---------------------+-------+
+| time | count |
++---------------------+-------+
+| 2022-10-31T02:00:10 | 1 |
+| 2022-10-31T02:00:15 | 0 |
+| 2022-10-31T02:00:20 | 0 |
++---------------------+-------+
+name: cpu
+tags: cpu=cpu0
++---------------------+-------+
+| time | count |
++---------------------+-------+
+| 2022-10-31T02:00:10 | 1 |
+| 2022-10-31T02:00:15 | 0 |
+| 2022-10-31T02:00:20 | 0 |
++---------------------+-------+
+name: cpu
+tags: cpu=cpu1
++---------------------+-------+
+| time | count |
++---------------------+-------+
+| 2022-10-31T02:00:10 | 1 |
+| 2022-10-31T02:00:15 | 0 |
+| 2022-10-31T02:00:20 | 0 |
++---------------------+-------+
+-- InfluxQL: SELECT COUNT(usage_idle) FROM cpu WHERE time >= '2022-10-31T02:00:00Z' AND time <= '2022-10-31T02:00:05Z' AND time <= '2022-10-31T02:00:10Z' GROUP BY TIME(10s) FILL(0);
+name: cpu
++---------------------+-------+
+| time | count |
++---------------------+-------+
+| 2022-10-31T02:00:00 | 3 |
++---------------------+-------+
+-- InfluxQL: SELECT min, max, max - min FROM (SELECT MIN(usage_idle), MAX(usage_system) FROM cpu GROUP BY TIME(1m));
+name: cpu
++---------------------+------+-----+--------------------+
+| time | min | max | max_min |
++---------------------+------+-----+--------------------+
+| 2022-10-31T02:00:00 | 0.98 | 2.2 | 1.2200000000000002 |
++---------------------+------+-----+--------------------+
+-- InfluxQL: SELECT min, max, max - min FROM (SELECT MIN(usage_idle), MAX(usage_system) FROM cpu GROUP BY TIME(10s));
+name: cpu
++---------------------+------+-----+--------------------+
+| time | min | max | max_min |
++---------------------+------+-----+--------------------+
+| 2022-10-31T02:00:00 | 0.98 | 2.2 | 1.2200000000000002 |
+| 2022-10-31T02:00:10 | 0.99 | 2.1 | 1.11 |
++---------------------+------+-----+--------------------+
+-- InfluxQL: SELECT * FROM (SELECT * FROM (SELECT FIRST(usage_idle) FROM cpu WHERE time >= '2022-10-31T02:00:00Z') WHERE time >= '2022-10-31T02:00:00Z') WHERE time >= '2022-10-31T02:00:10Z';
+name: cpu
++---------------------+-------+
+| time | first |
++---------------------+-------+
+| 2022-10-31T02:00:10 | 2.99 |
++---------------------+-------+
\ No newline at end of file
diff --git a/iox_query_influxql/src/plan/planner.rs b/iox_query_influxql/src/plan/planner.rs
index 55ac7f7c47..fc75064a3c 100644
--- a/iox_query_influxql/src/plan/planner.rs
+++ b/iox_query_influxql/src/plan/planner.rs
@@ -177,7 +177,10 @@ impl<'a> Context<'a> {
projection_type: select.projection_type,
tz: select.timezone,
condition: select.condition.as_ref(),
- time_range: select.time_range,
+ // Subqueries should be restricted by the time range of the parent
+ //
+ // See: https://github.com/influxdata/influxdb/blob/f365bb7e3a9c5e227dbf66d84adf674d3d127176/query/iterator.go#L716-L721
+ time_range: select.time_range.intersected(self.time_range),
group_by: select.group_by.as_ref(),
fill: select.fill,
root_group_by_tags: self.root_group_by_tags,
|
cb7a26cb65e426642a586207633c48e30add570c
|
Fraser Savage
|
2023-09-20 15:02:39
|
Revert `IngestState` methods to crate public
|
These methods should not be used at all outside the ingester, and only
the type itself needs to be accessible in the WAL replay benchmark.
| null |
refactor(ingester): Revert `IngestState` methods to crate public
These methods should not be used at all outside the ingester, and only
the type itself needs to be accessible in the WAL replay benchmark.
|
diff --git a/ingester/src/ingest_state.rs b/ingester/src/ingest_state.rs
index 4a6076040e..64d988b0cb 100644
--- a/ingester/src/ingest_state.rs
+++ b/ingester/src/ingest_state.rs
@@ -69,7 +69,7 @@ impl IngestState {
///
/// Returns true if this call set the error state to `error`, false if
/// `error` was already set.
- pub fn set(&self, error: IngestStateError) -> bool {
+ pub(crate) fn set(&self, error: IngestStateError) -> bool {
let set = error.as_bits();
let mut current = self.state.load(Ordering::Relaxed);
loop {
@@ -105,7 +105,7 @@ impl IngestState {
///
/// Returns true if this call unset the `error` state, false if `error` was
/// already unset.
- pub fn unset(&self, error: IngestStateError) -> bool {
+ pub(crate) fn unset(&self, error: IngestStateError) -> bool {
let unset = error.as_bits();
let mut current = self.state.load(Ordering::Relaxed);
loop {
@@ -144,7 +144,7 @@ impl IngestState {
/// 2. [`IngestStateError::DiskFull`]
/// 3. [`IngestStateError::PersistSaturated`].
///
- pub fn read(&self) -> Result<(), IngestStateError> {
+ pub(crate) fn read(&self) -> Result<(), IngestStateError> {
let current = self.state.load(Ordering::Relaxed);
if current != 0 {
@@ -163,7 +163,7 @@ impl IngestState {
///
/// If more than one error state is set, this follows the same precedence
/// rules as [`IngestState::read()`].
- pub fn read_with_exceptions<const N: usize>(
+ pub(crate) fn read_with_exceptions<const N: usize>(
&self,
exceptions: [IngestStateError; N],
) -> Result<(), IngestStateError> {
|
e33c17c6f73d459e5293619cae82c7205984d141
|
Dom Dwyer
|
2023-05-22 14:07:05
|
add missing lints to ioxd_ingester
|
Adds the standard lints to ioxd_ingester and fixes any lint failures.
Note this doesn't include the normal "document things" lint, because
there's a load of missing docs
| null |
refactor(lints): add missing lints to ioxd_ingester
Adds the standard lints to ioxd_ingester and fixes any lint failures.
Note this doesn't include the normal "document things" lint, because
there's a load of missing docs
|
diff --git a/ioxd_ingester/src/lib.rs b/ioxd_ingester/src/lib.rs
index c0d7f3d7b2..28d8ba6cf0 100644
--- a/ioxd_ingester/src/lib.rs
+++ b/ioxd_ingester/src/lib.rs
@@ -1,3 +1,15 @@
+#![deny(rustdoc::broken_intra_doc_links, rust_2018_idioms)]
+#![warn(
+ clippy::clone_on_ref_ptr,
+ clippy::dbg_macro,
+ clippy::explicit_iter_loop,
+ // See https://github.com/influxdata/influxdb_iox/pull/1671
+ clippy::future_not_send,
+ clippy::todo,
+ clippy::use_self,
+ missing_debug_implementations,
+)]
+
use arrow_flight::flight_service_server::FlightServiceServer;
use async_trait::async_trait;
use clap_blocks::ingester::IngesterConfig;
@@ -162,7 +174,7 @@ pub enum IoxHttpError {
impl IoxHttpError {
fn status_code(&self) -> HttpApiErrorCode {
match self {
- IoxHttpError::NotFound => HttpApiErrorCode::NotFound,
+ Self::NotFound => HttpApiErrorCode::NotFound,
}
}
}
|
7100be410374fc433871d2b1d9201ec644977331
|
Carol (Nichols || Goulding)
|
2023-03-22 14:18:13
|
Inline run_split_plans in anticipation of combining with run_compaction_plan
|
Also set a capacity on the Vec to avoid some reallocations.
| null |
refactor: Inline run_split_plans in anticipation of combining with run_compaction_plan
Also set a capacity on the Vec to avoid some reallocations.
|
diff --git a/compactor2/src/driver.rs b/compactor2/src/driver.rs
index f36dac2e77..f0cb66c315 100644
--- a/compactor2/src/driver.rs
+++ b/compactor2/src/driver.rs
@@ -320,14 +320,21 @@ async fn run_plans(
.await
}
FilesToSplitOrCompact::Split(files) => {
- run_split_plans(
- files,
- partition_info,
- components,
- job_semaphore,
- scratchpad_ctx,
- )
- .await
+ let mut created_file_params =
+ Vec::with_capacity(files.iter().map(|f| f.split_times.len() + 1).sum());
+ for file in files {
+ created_file_params.extend(
+ run_split_plan(
+ file,
+ partition_info,
+ components,
+ Arc::clone(&job_semaphore),
+ scratchpad_ctx,
+ )
+ .await?,
+ );
+ }
+ Ok(created_file_params)
}
FilesToSplitOrCompact::None => Ok(vec![]), // Nothing to do
}
@@ -373,34 +380,6 @@ async fn run_compaction_plan(
.await
}
-/// Split each of given files into multiple files
-async fn run_split_plans(
- files_to_split: &[FileToSplit],
- partition_info: &Arc<PartitionInfo>,
- components: &Arc<Components>,
- job_semaphore: Arc<InstrumentedAsyncSemaphore>,
- scratchpad_ctx: &mut dyn Scratchpad,
-) -> Result<Vec<ParquetFileParams>, DynError> {
- if files_to_split.is_empty() {
- return Ok(vec![]);
- }
-
- let mut created_file_params = vec![];
- for file_to_split in files_to_split {
- let x = run_split_plan(
- file_to_split,
- partition_info,
- components,
- Arc::clone(&job_semaphore),
- scratchpad_ctx,
- )
- .await?;
- created_file_params.extend(x);
- }
-
- Ok(created_file_params)
-}
-
// Split a given file into multiple files
async fn run_split_plan(
file_to_split: &FileToSplit,
|
a1211b0d03d3c917f2c8e0ed2a1e60762f0e9043
|
Dom Dwyer
|
2023-08-22 18:16:07
|
define parquet file gossip type
|
Specify the gossip message used to notify peers of new parquet files.
This reuses the existing ParquetFile type in the "catalog" proto
package.
This will probably expand in the future to differentiate between new
files (via ingest) and compacted files (which make other files
obsolete).
| null |
feat(proto): define parquet file gossip type
Specify the gossip message used to notify peers of new parquet files.
This reuses the existing ParquetFile type in the "catalog" proto
package.
This will probably expand in the future to differentiate between new
files (via ingest) and compacted files (which make other files
obsolete).
|
diff --git a/generated_types/build.rs b/generated_types/build.rs
index 6465a8dcdf..c82c35fae8 100644
--- a/generated_types/build.rs
+++ b/generated_types/build.rs
@@ -57,6 +57,7 @@ fn generate_grpc_types(root: &Path) -> Result<()> {
catalog_path.join("service.proto"),
compactor_path.join("service.proto"),
delete_path.join("service.proto"),
+ gossip_path.join("parquet_file.proto"),
gossip_path.join("schema.proto"),
ingester_path.join("parquet_metadata.proto"),
ingester_path.join("persist.proto"),
diff --git a/generated_types/protos/influxdata/iox/gossip/v1/parquet_file.proto b/generated_types/protos/influxdata/iox/gossip/v1/parquet_file.proto
new file mode 100644
index 0000000000..b45825aa85
--- /dev/null
+++ b/generated_types/protos/influxdata/iox/gossip/v1/parquet_file.proto
@@ -0,0 +1,8 @@
+syntax = "proto3";
+package influxdata.iox.gossip.v1;
+option go_package = "github.com/influxdata/iox/gossip/v1";
+
+import "influxdata/iox/catalog/v1/parquet_file.proto";
+
+/// A gossip-specific wrapper over a `ParquetFile` record.
+message NewParquetFile { influxdata.iox.catalog.v1.ParquetFile file = 1; }
|
a0f1184bb3eeb8a6f8fa1bbdb9898d41355ff67f
|
Jeffrey Smith II
|
2022-10-13 14:57:57
|
manually scheduled task runs now run when expected (#23664)
|
* fix: run manually scheduled tasks at their scheduled time
* fix: actually use it
* fix: get tests building
* fix: fix tests
* fix: lint
| null |
fix: manually scheduled task runs now run when expected (#23664)
* fix: run manually scheduled tasks at their scheduled time
* fix: actually use it
* fix: get tests building
* fix: fix tests
* fix: lint
|
diff --git a/cmd/influxd/launcher/launcher.go b/cmd/influxd/launcher/launcher.go
index 881956aad4..e03e514144 100644
--- a/cmd/influxd/launcher/launcher.go
+++ b/cmd/influxd/launcher/launcher.go
@@ -461,6 +461,10 @@ func (m *Launcher) run(ctx context.Context, opts *InfluxdOpts) (err error) {
combinedTaskService,
executor.WithFlagger(m.flagger),
)
+ err = executor.LoadExistingScheduleRuns(ctx)
+ if err != nil {
+ m.log.Fatal("could not load existing scheduled runs", zap.Error(err))
+ }
m.executor = executor
m.reg.MustRegister(executorMetrics.PrometheusCollectors()...)
schLogger := m.log.With(zap.String("service", "task-scheduler"))
diff --git a/kv/task.go b/kv/task.go
index 90e0eb6298..924ed705ef 100644
--- a/kv/task.go
+++ b/kv/task.go
@@ -1012,6 +1012,15 @@ func (s *Service) findRunByID(ctx context.Context, tx Tx, taskID, runID platform
runBytes, err := bucket.Get(key)
if err != nil {
if IsNotFound(err) {
+ runs, err := s.manualRuns(ctx, tx, taskID)
+ for _, run := range runs {
+ if run.ID == runID {
+ return run, nil
+ }
+ }
+ if err != nil {
+ return nil, taskmodel.ErrRunNotFound
+ }
return nil, taskmodel.ErrRunNotFound
}
return nil, taskmodel.ErrUnexpectedTaskBucketErr(err)
diff --git a/task/backend/coordinator/coordinator.go b/task/backend/coordinator/coordinator.go
index ee5fe6a2b5..47476fc9eb 100644
--- a/task/backend/coordinator/coordinator.go
+++ b/task/backend/coordinator/coordinator.go
@@ -22,6 +22,7 @@ const DefaultLimit = 1000
// Executor is an abstraction of the task executor with only the functions needed by the coordinator
type Executor interface {
ManualRun(ctx context.Context, id platform.ID, runID platform.ID) (executor.Promise, error)
+ ScheduleManualRun(ctx context.Context, id platform.ID, runID platform.ID) error
Cancel(ctx context.Context, runID platform.ID) error
}
@@ -149,7 +150,7 @@ func (c *Coordinator) TaskUpdated(ctx context.Context, from, to *taskmodel.Task)
return nil
}
-//TaskDeleted asks the Scheduler to release the deleted task
+// TaskDeleted asks the Scheduler to release the deleted task
func (c *Coordinator) TaskDeleted(ctx context.Context, id platform.ID) error {
tid := scheduler.ID(id)
if err := c.sch.Release(tid); err != nil && err != taskmodel.ErrTaskNotClaimed {
@@ -166,14 +167,19 @@ func (c *Coordinator) RunCancelled(ctx context.Context, runID platform.ID) error
return err
}
-// RunForced speaks directly to the Executor to run a task immediately
+// RunForced speaks directly to the Executor to run a task immediately, or schedule the run if `scheduledFor` is set.
func (c *Coordinator) RunForced(ctx context.Context, task *taskmodel.Task, run *taskmodel.Run) error {
- // the returned promise is not used, since clients expect the HTTP server to return immediately after scheduling the
- // task rather than waiting for the task to finish
- _, err := c.ex.ManualRun(ctx, task.ID, run.ID)
+ var err error
+ if !run.ScheduledFor.IsZero() {
+ err = c.ex.ScheduleManualRun(ctx, task.ID, run.ID)
+ } else {
+ // the returned promise is not used, since clients expect the HTTP server to return immediately after scheduling the
+ // task rather than waiting for the task to finish
+ _, err = c.ex.ManualRun(ctx, task.ID, run.ID)
+ }
+
if err != nil {
return taskmodel.ErrRunExecutionError(err)
}
-
return nil
}
diff --git a/task/backend/coordinator/coordinator_test.go b/task/backend/coordinator/coordinator_test.go
index 712ebac675..ccc9f068a2 100644
--- a/task/backend/coordinator/coordinator_test.go
+++ b/task/backend/coordinator/coordinator_test.go
@@ -20,12 +20,13 @@ func Test_Coordinator_Executor_Methods(t *testing.T) {
taskOne = &taskmodel.Task{ID: one}
runOne = &taskmodel.Run{
- ID: one,
- TaskID: one,
- ScheduledFor: time.Now(),
+ ID: one,
+ TaskID: one,
}
allowUnexported = cmp.AllowUnexported(executorE{}, schedulerC{}, SchedulableTask{})
+
+ scheduledTime = time.Now()
)
for _, test := range []struct {
@@ -45,7 +46,22 @@ func Test_Coordinator_Executor_Methods(t *testing.T) {
},
executor: &executorE{
calls: []interface{}{
- manualRunCall{taskOne.ID, runOne.ID},
+ manualRunCall{taskOne.ID, runOne.ID, false},
+ },
+ },
+ },
+ {
+ name: "RunForcedScheduled",
+ call: func(t *testing.T, c *Coordinator) {
+ rr := runOne
+ rr.ScheduledFor = scheduledTime
+ if err := c.RunForced(context.Background(), taskOne, runOne); err != nil {
+ t.Errorf("expected nil error found %q", err)
+ }
+ },
+ executor: &executorE{
+ calls: []interface{}{
+ manualRunCall{taskOne.ID, runOne.ID, true},
},
},
},
diff --git a/task/backend/coordinator/support_test.go b/task/backend/coordinator/support_test.go
index 4a05832e06..6ee351a5ee 100644
--- a/task/backend/coordinator/support_test.go
+++ b/task/backend/coordinator/support_test.go
@@ -17,8 +17,9 @@ type (
}
manualRunCall struct {
- TaskID platform.ID
- RunID platform.ID
+ TaskID platform.ID
+ RunID platform.ID
+ WasScheduled bool
}
cancelCallC struct {
@@ -96,7 +97,7 @@ func (s *schedulerC) Release(taskID scheduler.ID) error {
}
func (e *executorE) ManualRun(ctx context.Context, id platform.ID, runID platform.ID) (executor.Promise, error) {
- e.calls = append(e.calls, manualRunCall{id, runID})
+ e.calls = append(e.calls, manualRunCall{id, runID, false})
ctx, cancel := context.WithCancel(ctx)
p := promise{
done: make(chan struct{}),
@@ -109,6 +110,11 @@ func (e *executorE) ManualRun(ctx context.Context, id platform.ID, runID platfor
return &p, err
}
+func (e *executorE) ScheduleManualRun(ctx context.Context, id platform.ID, runID platform.ID) error {
+ e.calls = append(e.calls, manualRunCall{id, runID, true})
+ return nil
+}
+
func (e *executorE) Cancel(ctx context.Context, runID platform.ID) error {
e.calls = append(e.calls, cancelCallC{runID})
return nil
diff --git a/task/backend/executor/executor.go b/task/backend/executor/executor.go
index 3d4bef7473..1ebfaa8303 100644
--- a/task/backend/executor/executor.go
+++ b/task/backend/executor/executor.go
@@ -145,6 +145,7 @@ func NewExecutor(log *zap.Logger, qs query.QueryService, us PermissionService, t
ps: us,
currentPromises: sync.Map{},
+ futurePromises: sync.Map{},
promiseQueue: make(chan *promise, maxPromises),
workerLimit: make(chan struct{}, cfg.maxWorkers),
limitFunc: func(*taskmodel.Task, *taskmodel.Run) error { return nil }, // noop
@@ -159,6 +160,8 @@ func NewExecutor(log *zap.Logger, qs query.QueryService, us PermissionService, t
e: e,
}
+ go e.processScheduledTasks()
+
e.workerPool = sync.Pool{New: wm.new}
return e, e.metrics
}
@@ -177,6 +180,9 @@ type Executor struct {
// currentPromises are all the promises we are made that have not been fulfilled
currentPromises sync.Map
+ // futurePromises are promises that are scheduled to be executed in the future
+ futurePromises sync.Map
+
// keep a pool of promise's we have in queue
promiseQueue chan *promise
@@ -191,6 +197,52 @@ type Executor struct {
flagger feature.Flagger
}
+func (e *Executor) LoadExistingScheduleRuns(ctx context.Context) error {
+ tasks, _, err := e.ts.FindTasks(ctx, taskmodel.TaskFilter{})
+ if err != nil {
+ e.log.Error("err finding tasks:", zap.Error(err))
+ return err
+ }
+ for _, t := range tasks {
+ beforeTime := time.Now().Add(time.Hour * 24 * 365).Format(time.RFC3339)
+ runs, _, err := e.ts.FindRuns(ctx, taskmodel.RunFilter{Task: t.ID, BeforeTime: beforeTime})
+ if err != nil {
+ e.log.Error("err finding runs:", zap.Error(err))
+ return err
+ }
+ for _, run := range runs {
+ if run.ScheduledFor.After(time.Now()) {
+ perm, err := e.ps.FindPermissionForUser(ctx, t.OwnerID)
+ if err != nil {
+ e.log.Error("err finding perms:", zap.Error(err))
+ return err
+ }
+
+ ctx, cancel := context.WithCancel(ctx)
+ // create promise
+ p := &promise{
+ run: run,
+ task: t,
+ auth: &influxdb.Authorization{
+ Status: influxdb.Active,
+ UserID: t.OwnerID,
+ ID: platform.ID(1),
+ OrgID: t.OrganizationID,
+ Permissions: perm,
+ },
+ createdAt: time.Now().UTC(),
+ done: make(chan struct{}),
+ ctx: ctx,
+ cancelFunc: cancel,
+ }
+ e.futurePromises.Store(run.ID, p)
+ }
+ }
+ }
+
+ return nil
+}
+
// SetLimitFunc sets the limit func for this task executor
func (e *Executor) SetLimitFunc(l LimitFunc) {
e.limitFunc = l
@@ -241,6 +293,58 @@ func (e *Executor) ManualRun(ctx context.Context, id platform.ID, runID platform
return p, err
}
+func (e *Executor) ScheduleManualRun(ctx context.Context, id platform.ID, runID platform.ID) error {
+ // create promises for any manual runs
+ r, err := e.tcs.StartManualRun(ctx, id, runID)
+ if err != nil {
+ return err
+ }
+
+ auth, err := icontext.GetAuthorizer(ctx)
+ if err != nil {
+ return err
+ }
+
+ // create a new context for running the task in the background so that returning the HTTP response does not cancel the
+ // context of the task to be run
+ ctx = icontext.SetAuthorizer(context.Background(), auth)
+
+ span, ctx := tracing.StartSpanFromContext(ctx)
+ defer span.Finish()
+
+ t, err := e.ts.FindTaskByID(ctx, r.TaskID)
+ if err != nil {
+ return err
+ }
+
+ perm, err := e.ps.FindPermissionForUser(ctx, t.OwnerID)
+ if err != nil {
+ return err
+ }
+
+ ctx, cancel := context.WithCancel(ctx)
+ // create promise
+ p := &promise{
+ run: r,
+ task: t,
+ auth: &influxdb.Authorization{
+ Status: influxdb.Active,
+ UserID: t.OwnerID,
+ ID: platform.ID(1),
+ OrgID: t.OrganizationID,
+ Permissions: perm,
+ },
+ createdAt: time.Now().UTC(),
+ done: make(chan struct{}),
+ ctx: ctx,
+ cancelFunc: cancel,
+ }
+ e.metrics.manualRunsCounter.WithLabelValues(id.String()).Inc()
+
+ e.futurePromises.Store(runID, p)
+ return nil
+}
+
func (e *Executor) ResumeCurrentRun(ctx context.Context, id platform.ID, runID platform.ID) (Promise, error) {
cr, err := e.tcs.CurrentlyRunning(ctx, id)
if err != nil {
@@ -363,6 +467,23 @@ func (e *Executor) createPromise(ctx context.Context, run *taskmodel.Run) (*prom
return p, nil
}
+func (e *Executor) processScheduledTasks() {
+ t := time.Tick(1 * time.Second)
+ for range t {
+ e.futurePromises.Range(func(k any, v any) bool {
+ vv := v.(*promise)
+ if vv.run.ScheduledFor.Equal(time.Now()) || vv.run.ScheduledFor.Before(time.Now()) {
+ if vv.run.RunAt.IsZero() {
+ e.promiseQueue <- vv
+ e.futurePromises.Delete(k)
+ e.startWorker()
+ }
+ }
+ return true
+ })
+ }
+}
+
type workerMaker struct {
e *Executor
}
@@ -445,9 +566,18 @@ func (w *worker) start(p *promise) {
defer span.Finish()
// add to run log
- w.e.tcs.AddRunLog(p.ctx, p.task.ID, p.run.ID, time.Now().UTC(), fmt.Sprintf("Started task from script: %q", p.task.Flux))
+ if err := w.e.tcs.AddRunLog(p.ctx, p.task.ID, p.run.ID, time.Now().UTC(), fmt.Sprintf("Started task from script: %q", p.task.Flux)); err != nil {
+ tid := zap.String("taskID", p.task.ID.String())
+ rid := zap.String("runID", p.run.ID.String())
+ w.e.log.With(zap.Error(err)).With(tid).With(rid).Warn("error adding run log: ")
+ }
+
// update run status
- w.e.tcs.UpdateRunState(ctx, p.task.ID, p.run.ID, time.Now().UTC(), taskmodel.RunStarted)
+ if err := w.e.tcs.UpdateRunState(ctx, p.task.ID, p.run.ID, time.Now().UTC(), taskmodel.RunStarted); err != nil {
+ tid := zap.String("taskID", p.task.ID.String())
+ rid := zap.String("runID", p.run.ID.String())
+ w.e.log.With(zap.Error(err)).With(tid).With(rid).Warn("error updating run state: ")
+ }
// add to metrics
w.e.metrics.StartRun(p.task, time.Since(p.createdAt), time.Since(p.run.RunAt))
diff --git a/task/mock/executor.go b/task/mock/executor.go
index fd02135c43..f1eb25a9c0 100644
--- a/task/mock/executor.go
+++ b/task/mock/executor.go
@@ -108,6 +108,10 @@ func (e *Executor) ManualRun(ctx context.Context, id platform.ID, runID platform
return p, err
}
+func (e *Executor) ScheduleManualRun(ctx context.Context, id platform.ID, runID platform.ID) error {
+ return nil
+}
+
func (e *Executor) Wait() {
e.wg.Wait()
}
|
666cabb1f41e0d2019de655e8792daefb7669c80
|
Ole Kristian (Zee)
|
2022-11-16 22:18:43
|
fix wrong max age transformation from seconds (#23684)
|
* fix: fix wrong max age transformation from seconds
* refactor: clarify max age intent
* refactor: remove unnecessary duration
| null |
fix: fix wrong max age transformation from seconds (#23684)
* fix: fix wrong max age transformation from seconds
* refactor: clarify max age intent
* refactor: remove unnecessary duration
|
diff --git a/replications/internal/queue_management.go b/replications/internal/queue_management.go
index 96589e1b49..c0e40ee778 100644
--- a/replications/internal/queue_management.go
+++ b/replications/internal/queue_management.go
@@ -21,7 +21,7 @@ import (
const (
scannerAdvanceInterval = 10 * time.Second
purgeInterval = 60 * time.Second
- defaultMaxAge = 168 * time.Hour / time.Second
+ defaultMaxAge = 7 * 24 * time.Hour // 1 week
)
type remoteWriter interface {
@@ -72,7 +72,7 @@ func NewDurableQueueManager(log *zap.Logger, queuePath string, metrics *metrics.
}
// InitializeQueue creates and opens a new durable queue which is associated with a replication stream.
-func (qm *durableQueueManager) InitializeQueue(replicationID platform.ID, maxQueueSizeBytes int64, orgID platform.ID, localBucketID platform.ID, maxAge int64) error {
+func (qm *durableQueueManager) InitializeQueue(replicationID platform.ID, maxQueueSizeBytes int64, orgID platform.ID, localBucketID platform.ID, maxAgeSeconds int64) error {
qm.mutex.Lock()
defer qm.mutex.Unlock()
@@ -112,7 +112,7 @@ func (qm *durableQueueManager) InitializeQueue(replicationID platform.ID, maxQue
}
// Map new durable queue and scanner to its corresponding replication stream via replication ID
- rq := qm.newReplicationQueue(replicationID, orgID, localBucketID, newQueue, maxAge)
+ rq := qm.newReplicationQueue(replicationID, orgID, localBucketID, newQueue, maxAgeSeconds)
qm.replicationQueues[replicationID] = rq
rq.Open()
@@ -439,15 +439,15 @@ func (qm *durableQueueManager) EnqueueData(replicationID platform.ID, data []byt
return nil
}
-func (qm *durableQueueManager) newReplicationQueue(id platform.ID, orgID platform.ID, localBucketID platform.ID, queue *durablequeue.Queue, maxAge int64) *replicationQueue {
+func (qm *durableQueueManager) newReplicationQueue(id platform.ID, orgID platform.ID, localBucketID platform.ID, queue *durablequeue.Queue, maxAgeSeconds int64) *replicationQueue {
logger := qm.logger.With(zap.String("replication_id", id.String()))
done := make(chan struct{})
// check for max age minimum
var maxAgeTime time.Duration
- if maxAge < 0 {
+ if maxAgeSeconds < 0 {
maxAgeTime = defaultMaxAge
} else {
- maxAgeTime = time.Duration(maxAge)
+ maxAgeTime = time.Duration(maxAgeSeconds) * time.Second
}
return &replicationQueue{
|
6de18b6544978c287ac76156b40bc9a805df0b56
|
Joe-Blount
|
2023-05-03 15:09:00
|
conditionally parse shard_id from HOSTNAME (#7733)
|
* chore: conditionally parse shard_id from HOSTNAME
* chore: remove HOSTNAME env from test case relying on it not being there.
| null |
chore: conditionally parse shard_id from HOSTNAME (#7733)
* chore: conditionally parse shard_id from HOSTNAME
* chore: remove HOSTNAME env from test case relying on it not being there.
|
diff --git a/clap_blocks/src/compactor2.rs b/clap_blocks/src/compactor2.rs
index 64584a802f..6b6cf5d64f 100644
--- a/clap_blocks/src/compactor2.rs
+++ b/clap_blocks/src/compactor2.rs
@@ -235,6 +235,7 @@ pub struct Compactor2Config {
/// Number of shards.
///
/// If this is set then the shard ID MUST also be set. If both are not provided, sharding is disabled.
+ /// (shard ID can be provided by the host name)
#[clap(
long = "compaction-shard-count",
env = "INFLUXDB_IOX_COMPACTION_SHARD_COUNT",
@@ -254,6 +255,13 @@ pub struct Compactor2Config {
)]
pub shard_id: Option<usize>,
+ /// Host Name
+ ///
+ /// comprised of leading text (e.g. 'iox-shared-compactor-'), ending with shard_id (e.g. '0').
+ /// When shard_count is specified, but shard_id is not specified, the id is extracted from hostname.
+ #[clap(long = "hostname", env = "HOSTNAME", action)]
+ pub hostname: Option<String>,
+
/// Minimum number of L1 files to compact to L2.
///
/// If there are more than this many L1 (by definition non
diff --git a/compactor2/src/components/hardcoded.rs b/compactor2/src/components/hardcoded.rs
index 4a6c04f17b..491e1ad7a0 100644
--- a/compactor2/src/components/hardcoded.rs
+++ b/compactor2/src/components/hardcoded.rs
@@ -6,6 +6,7 @@ use std::{sync::Arc, time::Duration};
use data_types::CompactionLevel;
use object_store::memory::InMemory;
+use observability_deps::tracing::info;
use crate::{
config::{CompactionType, Config, PartitionsSourceConfig},
@@ -156,6 +157,10 @@ fn make_partitions_source_commit_partition_sink(
let mut id_only_partition_filters: Vec<Arc<dyn IdOnlyPartitionFilter>> = vec![];
if let Some(shard_config) = &config.shard_config {
// add shard filter before performing any catalog IO
+ info!(
+ "starting compactor {} of {}",
+ shard_config.shard_id, shard_config.n_shards
+ );
id_only_partition_filters.push(Arc::new(ShardPartitionFilter::new(
shard_config.n_shards,
shard_config.shard_id,
diff --git a/influxdb_iox/src/commands/run/all_in_one.rs b/influxdb_iox/src/commands/run/all_in_one.rs
index ae136ee291..ca519aecf5 100644
--- a/influxdb_iox/src/commands/run/all_in_one.rs
+++ b/influxdb_iox/src/commands/run/all_in_one.rs
@@ -505,6 +505,7 @@ impl Config {
ignore_partition_skip_marker: false,
shard_count: None,
shard_id: None,
+ hostname: None,
min_num_l1_files_to_compact: 1,
process_once: false,
process_all_partitions: false,
diff --git a/influxdb_iox/tests/end_to_end_cases/compactor.rs b/influxdb_iox/tests/end_to_end_cases/compactor.rs
index d9490a11d4..7744c011ad 100644
--- a/influxdb_iox/tests/end_to_end_cases/compactor.rs
+++ b/influxdb_iox/tests/end_to_end_cases/compactor.rs
@@ -66,6 +66,7 @@ fn num_shards_without_shard_id_is_invalid() {
.arg("compactor2")
.env("INFLUXDB_IOX_COMPACTION_SHARD_COUNT", "1") // only provide shard count
.env("INFLUXDB_IOX_CATALOG_TYPE", "memory")
+ .env_remove("HOSTNAME")
.assert()
.failure()
.stderr(predicate::str::contains(
@@ -73,6 +74,20 @@ fn num_shards_without_shard_id_is_invalid() {
));
}
+#[test]
+fn num_shards_with_hostname_is_valid() {
+ Command::cargo_bin("influxdb_iox")
+ .unwrap()
+ .arg("run")
+ .arg("compactor2")
+ .env("INFLUXDB_IOX_COMPACTION_SHARD_COUNT", "3") // provide shard count
+ .env("HOSTNAME", "iox-shared-compactor-8") // provide shard id via hostname
+ .env("INFLUXDB_IOX_CATALOG_TYPE", "memory")
+ .assert()
+ .failure()
+ .stderr(predicate::str::contains("shard_id out of range"));
+}
+
#[tokio::test]
async fn sharded_compactor_0_always_compacts_partition_1() {
test_helpers::maybe_start_logging();
diff --git a/ioxd_compactor2/src/lib.rs b/ioxd_compactor2/src/lib.rs
index 5f4901cc1d..397e4aa24e 100644
--- a/ioxd_compactor2/src/lib.rs
+++ b/ioxd_compactor2/src/lib.rs
@@ -143,11 +143,31 @@ pub async fn create_compactor2_server_type(
) -> Arc<dyn ServerType> {
let backoff_config = BackoffConfig::default();
+ // if shard_count is specified, shard_id must be provided also.
+ // shard_id may be specified explicitly or extracted from the host name.
+ let mut shard_id = compactor_config.shard_id;
+ if shard_id.is_none()
+ && compactor_config.shard_count.is_some()
+ && compactor_config.hostname.is_some()
+ {
+ let parsed_id = compactor_config
+ .hostname
+ .unwrap()
+ .chars()
+ .skip_while(|ch| !ch.is_ascii_digit())
+ .take_while(|ch| ch.is_ascii_digit())
+ .fold(None, |acc, ch| {
+ ch.to_digit(10).map(|b| acc.unwrap_or(0) * 10 + b)
+ });
+ if parsed_id.is_some() {
+ shard_id = Some(parsed_id.unwrap() as usize);
+ }
+ }
assert!(
- compactor_config.shard_id.is_some() == compactor_config.shard_count.is_some(),
+ shard_id.is_some() == compactor_config.shard_count.is_some(),
"must provide or not provide shard ID and count"
);
- let shard_config = compactor_config.shard_id.map(|shard_id| ShardConfig {
+ let shard_config = shard_id.map(|shard_id| ShardConfig {
shard_id,
n_shards: compactor_config.shard_count.expect("just checked"),
});
|
411c9f6c1c5ee5c3112d22756e5e7a02b7ea9ceb
|
Dom Dwyer
|
2023-04-27 16:28:03
|
remove unused dep
|
Config & logic are now decoupled!
| null |
chore(clap_blocks): remove unused dep
Config & logic are now decoupled!
|
diff --git a/Cargo.lock b/Cargo.lock
index a72c02d07d..28845e1776 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -884,7 +884,6 @@ dependencies = [
name = "clap_blocks"
version = "0.1.0"
dependencies = [
- "authz",
"clap 4.2.5",
"data_types",
"futures",
diff --git a/clap_blocks/Cargo.toml b/clap_blocks/Cargo.toml
index f85903810e..fce39986bb 100644
--- a/clap_blocks/Cargo.toml
+++ b/clap_blocks/Cargo.toml
@@ -6,7 +6,6 @@ edition.workspace = true
license.workspace = true
[dependencies]
-authz = { path = "../authz" }
clap = { version = "4", features = ["derive", "env"] }
data_types = { path = "../data_types" }
futures = "0.3"
|
7e595eca88ef12e0b75dc89c6e154cde2953a662
|
Fraser Savage
|
2023-07-12 16:41:40
|
Assert RPC write span contexts can be parsed as encoded
|
This test aims to add some assertion that the span context is correctly
encoded into an RPC write request as long as the [`TraceHeaderParser`]
is responsible for decorating the requests extensions with the added
information.
| null |
test(router): Assert RPC write span contexts can be parsed as encoded
This test aims to add some assertion that the span context is correctly
encoded into an RPC write request as long as the [`TraceHeaderParser`]
is responsible for decorating the requests extensions with the added
information.
|
diff --git a/router/src/dml_handlers/rpc_write/client.rs b/router/src/dml_handlers/rpc_write/client.rs
index fea19d7f2a..068ef72540 100644
--- a/router/src/dml_handlers/rpc_write/client.rs
+++ b/router/src/dml_handlers/rpc_write/client.rs
@@ -7,7 +7,6 @@ use generated_types::influxdata::iox::ingester::v1::{
write_service_client::WriteServiceClient, WriteRequest,
};
use thiserror::Error;
-use tonic::IntoRequest;
use trace::ctx::SpanContext;
use trace_http::ctx::format_jaeger_trace_context;
@@ -82,22 +81,73 @@ impl<'a> WriteClient for TracePropagatingWriteClient<'a> {
op: WriteRequest,
span_ctx: Option<SpanContext>,
) -> Result<(), RpcWriteClientError> {
- let mut req = tonic::Request::new(op).into_request();
-
- if let Some(span_ctx) = span_ctx {
- req.metadata_mut().insert(
- tonic::metadata::MetadataKey::from_bytes(
- self.trace_context_header_name.as_bytes(),
- )?,
- tonic::metadata::MetadataValue::try_from(&format_jaeger_trace_context(&span_ctx))?,
- );
- };
-
+ let req = decorate_request_with_span_context(
+ tonic::Request::new(op),
+ self.trace_context_header_name,
+ span_ctx,
+ )?;
WriteServiceClient::write(&mut self.inner.clone(), req).await?;
Ok(())
}
}
+fn decorate_request_with_span_context<T>(
+ mut req: tonic::Request<T>,
+ trace_context_header_name: &str,
+ span_ctx: Option<SpanContext>,
+) -> Result<tonic::Request<T>, RpcWriteClientError> {
+ if let Some(span_ctx) = span_ctx {
+ req.metadata_mut().insert(
+ tonic::metadata::MetadataKey::from_bytes(trace_context_header_name.as_bytes())?,
+ tonic::metadata::MetadataValue::try_from(&format_jaeger_trace_context(&span_ctx))?,
+ );
+ };
+ Ok(req)
+}
+
+#[cfg(test)]
+mod test {
+ use assert_matches::assert_matches;
+ use trace::{RingBufferTraceCollector, TraceCollector};
+ use trace_http::ctx::TraceHeaderParser;
+
+ use super::*;
+
+ const ARBITRARY_TRACE_CONTEXT_HEADER_NAME: &str = "bananas";
+
+ #[test]
+ fn span_context_can_be_parsed_from_write_request() {
+ // Initialise a trace context to bundle into the request.
+ let trace_collector = Arc::new(RingBufferTraceCollector::new(5));
+ let trace_observer: Arc<dyn TraceCollector> = Arc::new(Arc::clone(&trace_collector));
+ let req_ctx = SpanContext::new(Arc::clone(&trace_observer));
+ let req_span = req_ctx.child("request span");
+
+ // Decorate the request with the context.
+ let req = decorate_request_with_span_context(
+ tonic::Request::new(WriteRequest::default()),
+ ARBITRARY_TRACE_CONTEXT_HEADER_NAME,
+ Some(req_span.ctx),
+ )
+ .expect("must be able to decorate request");
+
+ // Parse the headers as it would be done by the middleware layer.
+ let header_parser = TraceHeaderParser::new()
+ .with_jaeger_trace_context_header_name(ARBITRARY_TRACE_CONTEXT_HEADER_NAME);
+ let headers = req.metadata().clone().into_headers();
+ let got_ctx = header_parser
+ .parse(Some(trace_observer).as_ref(), &headers)
+ .expect("must be able to parse a span context");
+
+ // Ensure that the parsed context shares the trace ID and has the
+ // request span as its parent.
+ assert_matches!(got_ctx, Some(got_ctx) => {
+ assert_eq!(got_ctx.trace_id, req_ctx.trace_id);
+ assert_eq!(got_ctx.parent_span_id, Some(req_ctx.span_id));
+ });
+ }
+}
+
/// Mocks for testing
pub mod mock {
use super::*;
|
ec5b35e3cc786646a97ea4381886aab8cc98398a
|
Dom Dwyer
|
2023-02-01 11:29:56
|
reject negative retention periods
|
Do not allow a negative retention period to be specified.
| null |
fix: reject negative retention periods
Do not allow a negative retention period to be specified.
|
diff --git a/service_grpc_namespace/src/lib.rs b/service_grpc_namespace/src/lib.rs
index bd44b69d22..6ec32a9d49 100644
--- a/service_grpc_namespace/src/lib.rs
+++ b/service_grpc_namespace/src/lib.rs
@@ -64,7 +64,7 @@ impl namespace_service_server::NamespaceService for NamespaceService {
retention_period_ns,
} = request.into_inner();
- let retention_period_ns = map_retention_period(retention_period_ns);
+ let retention_period_ns = map_retention_period(retention_period_ns)?;
debug!(%namespace_name, ?retention_period_ns, "Creating namespace");
@@ -112,7 +112,7 @@ impl namespace_service_server::NamespaceService for NamespaceService {
retention_period_ns,
} = request.into_inner();
- let retention_period_ns = map_retention_period(retention_period_ns);
+ let retention_period_ns = map_retention_period(retention_period_ns)?;
debug!(%namespace_name, ?retention_period_ns, "Updating namespace retention");
@@ -162,27 +162,34 @@ fn create_namespace_to_proto(namespace: CatalogNamespace) -> CreateNamespaceResp
/// 0 is always mapped to [`None`], indicating infinite retention.
///
/// Negative retention periods are rejected with an error.
-fn map_retention_period(v: Option<i64>) -> Option<i64> {
+fn map_retention_period(v: Option<i64>) -> Result<Option<i64>, Status> {
match v {
- Some(0) => None,
- Some(v) => Some(v),
- None => None,
+ Some(0) => Ok(None),
+ Some(v @ 1..) => Ok(Some(v)),
+ Some(_v @ ..=0) => Err(Status::invalid_argument(
+ "invalid negative retention period",
+ )),
+ None => Ok(None),
}
}
#[cfg(test)]
mod tests {
- use super::*;
+ use tonic::Code;
- use assert_matches::assert_matches;
+ use super::*;
#[test]
fn test_retention_mapping() {
- assert_matches!(map_retention_period(None), None);
- assert_matches!(map_retention_period(Some(0)), None);
- assert_matches!(map_retention_period(Some(1)), Some(1));
- assert_matches!(map_retention_period(Some(42)), Some(42));
- assert_matches!(map_retention_period(Some(-1)), Some(-1));
- assert_matches!(map_retention_period(Some(-42)), Some(-42));
+ assert_matches::assert_matches!(map_retention_period(None), Ok(None));
+ assert_matches::assert_matches!(map_retention_period(Some(0)), Ok(None));
+ assert_matches::assert_matches!(map_retention_period(Some(1)), Ok(Some(1)));
+ assert_matches::assert_matches!(map_retention_period(Some(42)), Ok(Some(42)));
+ assert_matches::assert_matches!(map_retention_period(Some(-1)), Err(e) => {
+ assert_eq!(e.code(), Code::InvalidArgument)
+ });
+ assert_matches::assert_matches!(map_retention_period(Some(-42)), Err(e) => {
+ assert_eq!(e.code(), Code::InvalidArgument)
+ });
}
}
|
bbd41228bc1e7d846b495d775c571751da7337e3
|
Dom Dwyer
|
2023-01-10 19:33:43
|
persist on shutdown
|
Persist all buffered data when gracefully stopping an ingester2
instance.
This implementation accounts for both late-arriving writes, and
concurrent persist tasks - it's carefully constructed in a way that it
can discover the presence of, and wait for, outstanding persist tasks
started by other code without having to know about all the possible
places a persist task can be started (currently WAL rotation & hot
partition persistence, but later also a RPC endpoint).
There exists a small race that seems to be so incredibly unlikely to
occur, I didn't cover off (it would have a RPC write cost for little
gain). This is documented in the code comments.
| null |
feat(ingester2): persist on shutdown
Persist all buffered data when gracefully stopping an ingester2
instance.
This implementation accounts for both late-arriving writes, and
concurrent persist tasks - it's carefully constructed in a way that it
can discover the presence of, and wait for, outstanding persist tasks
started by other code without having to know about all the possible
places a persist task can be started (currently WAL rotation & hot
partition persistence, but later also a RPC endpoint).
There exists a small race that seems to be so incredibly unlikely to
occur, I didn't cover off (it would have a RPC write cost for little
gain). This is documented in the code comments.
|
diff --git a/ingester2/src/init.rs b/ingester2/src/init.rs
index 2e2f9eacf2..7582d9d827 100644
--- a/ingester2/src/init.rs
+++ b/ingester2/src/init.rs
@@ -2,11 +2,13 @@ crate::maybe_pub!(
pub use super::wal_replay::*;
);
+mod graceful_shutdown;
mod wal_replay;
use std::{path::PathBuf, sync::Arc, time::Duration};
use backoff::BackoffConfig;
+use futures::{future::Shared, Future, FutureExt};
use generated_types::influxdata::iox::{
catalog::v1::catalog_service_server::{CatalogService, CatalogServiceServer},
ingester::v1::write_service_server::{WriteService, WriteServiceServer},
@@ -14,8 +16,10 @@ use generated_types::influxdata::iox::{
use iox_arrow_flight::flight_service_server::{FlightService, FlightServiceServer};
use iox_catalog::interface::Catalog;
use iox_query::exec::Executor;
+use observability_deps::tracing::*;
use parquet_file::storage::ParquetStorage;
use thiserror::Error;
+use tokio::sync::oneshot;
use wal::Wal;
use crate::{
@@ -33,6 +37,8 @@ use crate::{
TRANSITION_SHARD_INDEX,
};
+use self::graceful_shutdown::graceful_shutdown_handler;
+
/// Acquire opaque handles to the Ingester RPC service implementations.
///
/// This trait serves as the public crate API boundary - callers external to the
@@ -77,18 +83,34 @@ pub struct IngesterGuard<T> {
///
/// Aborted on drop.
rotation_task: tokio::task::JoinHandle<()>,
+
+ /// The task handle executing the graceful shutdown once triggered.
+ graceful_shutdown_handler: tokio::task::JoinHandle<()>,
+ shutdown_complete: Shared<oneshot::Receiver<()>>,
}
-impl<T> IngesterGuard<T> {
+impl<T> IngesterGuard<T>
+where
+ T: Send + Sync,
+{
/// Obtain a handle to the gRPC handlers.
pub fn rpc(&self) -> &T {
&self.rpc
}
+
+ /// Block and wait until the ingester has gracefully stopped.
+ pub async fn join(&self) {
+ self.shutdown_complete
+ .clone()
+ .await
+ .expect("graceful shutdown task panicked")
+ }
}
impl<T> Drop for IngesterGuard<T> {
fn drop(&mut self) {
self.rotation_task.abort();
+ self.graceful_shutdown_handler.abort();
}
}
@@ -148,8 +170,21 @@ pub enum InitError {
/// value should be tuned to be slightly less than the interval between persist
/// operations, but not so long that it causes catalog load spikes at persist
/// time (which can be observed by the catalog instrumentation metrics).
+///
+/// ## Graceful Shutdown
+///
+/// When `shutdown` completes, the ingester blocks ingest (returning an error to
+/// all new write requests) while still executing query requests. The ingester
+/// then persists all data currently buffered.
+///
+/// Callers can wait for this buffer persist to complete by awaiting
+/// [`IngesterGuard::join()`], which will resolve once all data has been flushed
+/// to object storage.
+///
+/// The ingester will continue answering queries until the gRPC server is
+/// stopped by the caller (managed outside of this crate).
#[allow(clippy::too_many_arguments)]
-pub async fn new(
+pub async fn new<F>(
catalog: Arc<dyn Catalog>,
metrics: Arc<metric::Registry>,
persist_background_fetch_time: Duration,
@@ -160,7 +195,11 @@ pub async fn new(
persist_queue_depth: usize,
persist_hot_partition_cost: usize,
object_store: ParquetStorage,
-) -> Result<IngesterGuard<impl IngesterRpcInterface>, InitError> {
+ shutdown: F,
+) -> Result<IngesterGuard<impl IngesterRpcInterface>, InitError>
+where
+ F: Future<Output = ()> + Send + 'static,
+{
// Create the transition shard.
let mut txn = catalog
.start_transaction()
@@ -269,11 +308,11 @@ pub async fn new(
let write_path = WalSink::new(Arc::clone(&buffer), Arc::clone(&wal));
// Spawn a background thread to periodically rotate the WAL segment file.
- let handle = tokio::spawn(periodic_rotation(
- wal,
+ let rotation_task = tokio::spawn(periodic_rotation(
+ Arc::clone(&wal),
wal_rotation_period,
Arc::clone(&buffer),
- persist_handle,
+ Arc::clone(&persist_handle),
));
// Restore the highest sequence number from the WAL files, and default to 0
@@ -288,6 +327,16 @@ pub async fn new(
.unwrap_or(0),
));
+ let (shutdown_tx, shutdown_rx) = oneshot::channel();
+ let shutdown_task = tokio::spawn(graceful_shutdown_handler(
+ shutdown,
+ shutdown_tx,
+ Arc::clone(&ingest_state),
+ Arc::clone(&buffer),
+ persist_handle,
+ wal,
+ ));
+
Ok(IngesterGuard {
rpc: GrpcDelegate::new(
Arc::new(write_path),
@@ -297,6 +346,8 @@ pub async fn new(
catalog,
metrics,
),
- rotation_task: handle,
+ rotation_task,
+ graceful_shutdown_handler: shutdown_task,
+ shutdown_complete: shutdown_rx.shared(),
})
}
diff --git a/ingester2/src/init/graceful_shutdown.rs b/ingester2/src/init/graceful_shutdown.rs
new file mode 100644
index 0000000000..1f72456ce9
--- /dev/null
+++ b/ingester2/src/init/graceful_shutdown.rs
@@ -0,0 +1,372 @@
+use std::{sync::Arc, time::Duration};
+
+use futures::Future;
+use observability_deps::tracing::*;
+use tokio::sync::oneshot;
+
+use crate::{
+ ingest_state::{IngestState, IngestStateError},
+ partition_iter::PartitionIter,
+ persist::{drain_buffer::persist_partitions, queue::PersistQueue},
+};
+
+/// Defines how often the shutdown task polls the partition buffers for
+/// emptiness.
+///
+/// Polls faster in tests to avoid unnecessary delay.
+#[cfg(test)]
+const SHUTDOWN_POLL_INTERVAL: Duration = Duration::from_millis(50);
+#[cfg(not(test))]
+const SHUTDOWN_POLL_INTERVAL: Duration = Duration::from_secs(1);
+
+/// Awaits `fut`, before blocking ingest and persisting all data.
+///
+/// Returns once all outstanding persist jobs have completed (regardless of what
+/// started them) and all buffered data has been flushed to object store.
+///
+/// Correctly accounts for persist jobs that have been started (by a call to
+/// [`PartitionData::mark_persisting()`] but not yet enqueued).
+///
+/// Ingest is blocked by setting [`IngestStateError::GracefulStop`] in the
+/// [`IngestState`].
+///
+/// [`PartitionData::mark_persisting()`]:
+/// crate::buffer_tree::partition::PartitionData::mark_persisting()
+pub(super) async fn graceful_shutdown_handler<F, T, P>(
+ fut: F,
+ complete: oneshot::Sender<()>,
+ ingest_state: Arc<IngestState>,
+ buffer: T,
+ persist: P,
+ wal: Arc<wal::Wal>,
+) where
+ F: Future<Output = ()> + Send,
+ T: PartitionIter + Sync,
+ P: PersistQueue + Clone,
+{
+ fut.await;
+ info!("gracefully stopping ingester");
+
+ // Reject RPC writes.
+ //
+ // There MAY be writes ongoing that started before this state was set.
+ ingest_state.set(IngestStateError::GracefulStop);
+
+ info!("persisting all data before shutdown");
+
+ // Drain the buffer tree, persisting all data.
+ //
+ // Returns once the persist jobs it starts have complete.
+ persist_partitions(buffer.partition_iter(), &persist).await;
+
+ // There may have been concurrent persist jobs started previously by hot
+ // partition persistence or WAL rotation (or some other, arbitrary persist
+ // source) that have not yet completed (this is unlikely). There may also be
+ // late arriving writes that started before ingest was blocked, but did not
+ // buffer until after the persist was completed above (also unlikely).
+ //
+ // Wait until there is no data in the buffer at all before proceeding,
+ // therefore ensuring those concurrent persist operations have completed and
+ // no late arriving data remains buffered.
+ //
+ // NOTE: There is a small race in which a late arriving write starts before
+ // ingest is blocked, is then stalled the entire time partitions are
+ // persisted, remains stalled while this "empty" check occurs, and then
+ // springs to life and buffers in the buffer tree after this check has
+ // completed - I think this is extreme enough to accept as a theoretical
+ // possibility that doesn't need covering off in practice.
+ while buffer
+ .partition_iter()
+ .any(|p| p.lock().get_query_data().is_some())
+ {
+ if persist_partitions(buffer.partition_iter(), &persist).await != 0 {
+ // Late arriving writes needed persisting.
+ debug!("re-persisting late arriving data");
+ } else {
+ // At least one partition is returning data, and there is no data to
+ // start persisting, therefore there is an outstanding persist
+ // operation that hasn't yet been marked as complete.
+ debug!("waiting for concurrent persist to complete");
+ }
+
+ tokio::time::sleep(SHUTDOWN_POLL_INTERVAL).await;
+ }
+
+ // There is now no data buffered in the ingester - all data has been
+ // persisted to object storage.
+ //
+ // Therefore there are no ops that need replaying to rebuild the (now empty)
+ // buffer state, therefore all WAL segments can be deleted to prevent
+ // spurious replay and re-uploading of the same data.
+ //
+ // This should be made redundant by persist-driven WAL dropping:
+ //
+ // https://github.com/influxdata/influxdb_iox/issues/6566
+ //
+ wal.rotate().expect("failed to rotate wal");
+ for file in wal.closed_segments() {
+ if let Err(error) = wal.delete(file.id()).await {
+ // This MAY occur due to concurrent segment deletion driven by the
+ // WAL rotation task.
+ //
+ // If this is a legitimate failure to delete (not a "not found")
+ // then this causes the data to be re-uploaded - an acceptable
+ // outcome, and preferable to panicking here and not dropping the
+ // rest of the deletable files.
+ warn!(%error, "failed to drop WAL segment");
+ }
+ }
+
+ info!("persisted all data - stopping ingester");
+ let _ = complete.send(());
+}
+
+#[cfg(test)]
+mod tests {
+ use std::{future::ready, sync::Arc, task::Poll};
+
+ use assert_matches::assert_matches;
+ use data_types::{NamespaceId, PartitionId, PartitionKey, SequenceNumber, ShardId, TableId};
+ use futures::FutureExt;
+ use lazy_static::lazy_static;
+ use mutable_batch_lp::test_helpers::lp_to_mutable_batch;
+ use parking_lot::Mutex;
+ use test_helpers::timeout::FutureTimeout;
+
+ use crate::{
+ buffer_tree::{
+ namespace::NamespaceName, partition::PartitionData, partition::SortKeyState,
+ table::TableName,
+ },
+ deferred_load::DeferredLoad,
+ persist::queue::mock::MockPersistQueue,
+ };
+
+ use super::*;
+
+ const PARTITION_ID: PartitionId = PartitionId::new(1);
+ const TRANSITION_SHARD_ID: ShardId = ShardId::new(84);
+
+ lazy_static! {
+ static ref PARTITION_KEY: PartitionKey = PartitionKey::from("platanos");
+ static ref TABLE_NAME: TableName = TableName::from("bananas");
+ static ref NAMESPACE_NAME: NamespaceName = NamespaceName::from("namespace-bananas");
+ }
+
+ // Initialise a partition containing buffered data.
+ fn new_partition() -> Arc<Mutex<PartitionData>> {
+ let mut partition = PartitionData::new(
+ PARTITION_ID,
+ PARTITION_KEY.clone(),
+ NamespaceId::new(3),
+ Arc::new(DeferredLoad::new(Duration::from_secs(1), async {
+ NAMESPACE_NAME.clone()
+ })),
+ TableId::new(4),
+ Arc::new(DeferredLoad::new(Duration::from_secs(1), async {
+ TABLE_NAME.clone()
+ })),
+ SortKeyState::Provided(None),
+ TRANSITION_SHARD_ID,
+ );
+
+ let mb = lp_to_mutable_batch(r#"bananas,city=London people=2,pigeons="millions" 10"#).1;
+ partition
+ .buffer_write(mb, SequenceNumber::new(1))
+ .expect("failed to write dummy data");
+
+ Arc::new(Mutex::new(partition))
+ }
+
+ // Initialise a WAL with > 1 segment.
+ async fn new_wal() -> (tempfile::TempDir, Arc<wal::Wal>) {
+ let dir = tempfile::tempdir().expect("failed to get temporary WAL directory");
+ let wal = wal::Wal::new(dir.path())
+ .await
+ .expect("failed to initialise WAL to write");
+
+ wal.rotate().expect("failed to rotate WAL");
+
+ (dir, wal)
+ }
+
+ #[tokio::test]
+ async fn test_graceful_shutdown() {
+ let persist = Arc::new(MockPersistQueue::default());
+ let ingest_state = Arc::new(IngestState::default());
+ let (_tempdir, wal) = new_wal().await;
+ let partition = new_partition();
+
+ let (tx, rx) = oneshot::channel();
+ graceful_shutdown_handler(
+ ready(()),
+ tx,
+ ingest_state,
+ vec![Arc::clone(&partition)],
+ Arc::clone(&persist),
+ Arc::clone(&wal),
+ )
+ .await;
+
+ // Wait for the shutdown to complete.
+ rx.with_timeout_panic(Duration::from_secs(5))
+ .await
+ .expect("shutdown task panicked");
+
+ // Assert the data was persisted
+ let persist_calls = persist.calls();
+ assert_matches!(&*persist_calls, [p] => {
+ assert!(Arc::ptr_eq(p, &partition));
+ });
+
+ // Assert there are now no WAL segment files that will be replayed
+ assert!(wal.closed_segments().is_empty());
+ }
+
+ #[tokio::test]
+ async fn test_graceful_shutdown_concurrent_persist() {
+ let persist = Arc::new(MockPersistQueue::default());
+ let ingest_state = Arc::new(IngestState::default());
+ let (_tempdir, wal) = new_wal().await;
+ let partition = new_partition();
+
+ // Mark the partition as persisting
+ let persist_job = partition
+ .lock()
+ .mark_persisting()
+ .expect("non-empty partition should begin persisting");
+
+ // Start the graceful shutdown job in another thread, as it SHOULD block
+ // until the persist job is marked as complete.
+ let (tx, rx) = oneshot::channel();
+ let handle = tokio::spawn(graceful_shutdown_handler(
+ ready(()),
+ tx,
+ ingest_state,
+ vec![Arc::clone(&partition)],
+ Arc::clone(&persist),
+ Arc::clone(&wal),
+ ));
+
+ // Wait a small duration of time for the first buffer emptiness check to
+ // fire.
+ tokio::time::sleep(Duration::from_millis(200)).await;
+
+ // Assert the shutdown hasn't completed.
+ //
+ // This is racy, but will fail false negative and will not flake in CI.
+ // If this fails in CI, it is a legitimate bug (shutdown task should not
+ // have stopped).
+ let rx = rx.shared();
+ assert_matches!(futures::poll!(rx.clone()), Poll::Pending);
+
+ // Mark the persist job as having completed, unblocking the shutdown
+ // task.
+ partition.lock().mark_persisted(persist_job);
+
+ // Wait for the shutdown to complete.
+ rx.with_timeout_panic(Duration::from_secs(5))
+ .await
+ .expect("shutdown task panicked");
+
+ assert!(handle
+ .with_timeout_panic(Duration::from_secs(1))
+ .await
+ .is_ok());
+
+ // Assert the data was not passed to the persist task (it couldn't have
+ // been, as this caller held the PersistData)
+ assert!(persist.calls().is_empty());
+
+ // Assert there are now no WAL segment files that will be replayed
+ assert!(wal.closed_segments().is_empty());
+ }
+
+ /// An implementation of [`PartitionIter`] that yields an extra new,
+ /// non-empty partition each time [`PartitionIter::partition_iter()`] is
+ /// called.
+ #[derive(Debug)]
+ struct SneakyPartitionBuffer {
+ max: usize,
+ partitions: Mutex<Vec<Arc<Mutex<PartitionData>>>>,
+ }
+
+ impl SneakyPartitionBuffer {
+ fn new(max: usize) -> Self {
+ Self {
+ max,
+ partitions: Default::default(),
+ }
+ }
+
+ fn partitions(&self) -> Vec<Arc<Mutex<PartitionData>>> {
+ self.partitions.lock().clone()
+ }
+ }
+
+ impl PartitionIter for SneakyPartitionBuffer {
+ fn partition_iter(&self) -> Box<dyn Iterator<Item = Arc<Mutex<PartitionData>>> + Send> {
+ let mut partitions = self.partitions.lock();
+
+ // If this hasn't reached the maximum number of times to be sneaky,
+ // add another partition.
+ if partitions.len() != self.max {
+ partitions.push(new_partition());
+ }
+
+ Box::new(partitions.clone().into_iter())
+ }
+ }
+
+ #[tokio::test]
+ async fn test_graceful_shutdown_concurrent_new_writes() {
+ let persist = Arc::new(MockPersistQueue::default());
+ let ingest_state = Arc::new(IngestState::default());
+ let (_tempdir, wal) = new_wal().await;
+
+ // Initialise a buffer that keeps yielding more and more newly wrote
+ // data, up until the maximum.
+ const MAX_NEW_PARTITIONS: usize = 3;
+ let buffer = Arc::new(SneakyPartitionBuffer::new(MAX_NEW_PARTITIONS));
+
+ // Start the graceful shutdown job in another thread, as it SHOULD block
+ // until the persist job is marked as complete.
+ let (tx, rx) = oneshot::channel();
+ let handle = tokio::spawn(graceful_shutdown_handler(
+ ready(()),
+ tx,
+ ingest_state,
+ Arc::clone(&buffer),
+ Arc::clone(&persist),
+ Arc::clone(&wal),
+ ));
+
+ // Wait for the shutdown to complete.
+ rx.with_timeout_panic(Duration::from_secs(5))
+ .await
+ .expect("shutdown task panicked");
+
+ assert!(handle
+ .with_timeout_panic(Duration::from_secs(1))
+ .await
+ .is_ok());
+
+ // Assert all the data yielded by the sneaky buffer was passed to the
+ // persist task.
+ let persist_calls = persist.calls();
+ let must_have_persisted = |p: &Arc<Mutex<PartitionData>>| {
+ for call in &persist_calls {
+ if Arc::ptr_eq(call, p) {
+ return true;
+ }
+ }
+ false
+ };
+ if !buffer.partitions().iter().all(must_have_persisted) {
+ panic!("at least one sneaky buffer was not passed to the persist system");
+ }
+
+ // Assert there are now no WAL segment files that will be replayed
+ assert!(wal.closed_segments().is_empty());
+ }
+}
diff --git a/ingester2/src/partition_iter.rs b/ingester2/src/partition_iter.rs
index 2944795770..11c04d2520 100644
--- a/ingester2/src/partition_iter.rs
+++ b/ingester2/src/partition_iter.rs
@@ -18,3 +18,9 @@ where
(**self).partition_iter()
}
}
+
+impl PartitionIter for Vec<Arc<Mutex<PartitionData>>> {
+ fn partition_iter(&self) -> Box<dyn Iterator<Item = Arc<Mutex<PartitionData>>> + Send> {
+ Box::new(self.clone().into_iter())
+ }
+}
diff --git a/ioxd_ingester2/src/lib.rs b/ioxd_ingester2/src/lib.rs
index 940dfe3c19..23584c2bfc 100644
--- a/ioxd_ingester2/src/lib.rs
+++ b/ioxd_ingester2/src/lib.rs
@@ -45,10 +45,11 @@ impl<I: IngesterRpcInterface> IngesterServerType<I> {
metrics: Arc<Registry>,
common_state: &CommonServerState,
max_simultaneous_queries: usize,
+ shutdown: CancellationToken,
) -> Self {
Self {
server,
- shutdown: CancellationToken::new(),
+ shutdown,
metrics,
trace_collector: common_state.trace_collector(),
max_simultaneous_queries,
@@ -101,7 +102,7 @@ impl<I: IngesterRpcInterface + Sync + Send + Debug + 'static> ServerType for Ing
}
async fn join(self: Arc<Self>) {
- self.shutdown.cancelled().await;
+ self.server.join().await;
}
fn shutdown(&self) {
@@ -148,6 +149,8 @@ pub async fn create_ingester_server_type(
exec: Arc<Executor>,
object_store: ParquetStorage,
) -> Result<Arc<dyn ServerType>> {
+ let shutdown = CancellationToken::new();
+
let grpc = ingester2::new(
catalog,
Arc::clone(&metrics),
@@ -159,6 +162,10 @@ pub async fn create_ingester_server_type(
ingester_config.persist_queue_depth,
ingester_config.persist_hot_partition_cost,
object_store,
+ {
+ let shutdown = shutdown.clone();
+ async move { shutdown.cancelled().await }
+ },
)
.await?;
@@ -167,5 +174,6 @@ pub async fn create_ingester_server_type(
metrics,
common_state,
ingester_config.concurrent_query_limit,
+ shutdown,
)))
}
|
b9f7f12c0c699c400d88b67fcf5c7c6f4355e785
|
Dom Dwyer
|
2023-02-22 20:43:57
|
avoid buffer allocation in writer
|
Eliminate buffer allocation (& growing) in the WAL file writer by
reusing a single buffer each time.
This implementation shrinks the buffer size down to 128KiB if it grows
above that amount to prevent one large write from consuming memory
forever more (128KiB should be plenty more than the common write size).
| null |
perf(wal): avoid buffer allocation in writer
Eliminate buffer allocation (& growing) in the WAL file writer by
reusing a single buffer each time.
This implementation shrinks the buffer size down to 128KiB if it grows
above that amount to prevent one large write from consuming memory
forever more (128KiB should be plenty more than the common write size).
|
diff --git a/wal/src/blocking/writer.rs b/wal/src/blocking/writer.rs
index 2f885f40ab..1767fb796d 100644
--- a/wal/src/blocking/writer.rs
+++ b/wal/src/blocking/writer.rs
@@ -13,6 +13,17 @@ use std::{
},
};
+/// Defines the desired maximum size of the re-used write
+/// [`OpenSegmentFileWriter`] buffer.
+///
+/// The buffer is free to exceed this soft limit as necessary, but will always
+/// be shrunk back down to at most this size eventually.
+///
+/// Setting this too low causes needless reallocations for each write that
+/// exceeds it. Setting it too high wastes memory. Configure it to a tolerable
+/// amount of memory overhead for the lifetime of the writer.
+const SOFT_MAX_BUFFER_LEN: usize = 1024 * 128; // 128kiB
+
/// Struct for writing data to a segment file in a wal
#[derive(Debug)]
pub struct OpenSegmentFileWriter {
@@ -20,6 +31,8 @@ pub struct OpenSegmentFileWriter {
path: PathBuf,
f: File,
bytes_written: usize,
+
+ buffer: Vec<u8>,
}
impl OpenSegmentFileWriter {
@@ -53,6 +66,7 @@ impl OpenSegmentFileWriter {
path,
f,
bytes_written,
+ buffer: Vec::with_capacity(8 * 1204), // 8kiB initial size
})
}
@@ -61,28 +75,28 @@ impl OpenSegmentFileWriter {
}
pub fn write(&mut self, data: &[u8]) -> Result<WriteSummary> {
+ // Ensure the write buffer is always empty before using it.
+ self.buffer.clear();
+ // And shrink the buffer below the maximum permitted size should the odd
+ // large batch grow it. This is a NOP if the size is less than
+ // SOFT_MAX_BUFFER_LEN already.
+ self.buffer.shrink_to(SOFT_MAX_BUFFER_LEN);
+
// Only designed to support chunks up to `u32::max` bytes long.
let uncompressed_len = data.len();
u32::try_from(uncompressed_len).context(ChunkSizeTooLargeSnafu {
actual: uncompressed_len,
})?;
- // Allocate a buffer to hold the compressed data + chunk header fields.
- //
- // This allocates another buffer of "data.len()", and because we expect
- // the compressed representation to be smaller, this wastes some memory
- // for a very short period of time, but prevents the need to
- // grow/reallocate the buffer in a latency sensitive part of the code.
- let mut buf = Vec::with_capacity(uncompressed_len);
-
// The chunk header is two u32 values, so write a dummy u64 value and
// come back to fill them in later.
- buf.write_u64::<BigEndian>(0)
+ self.buffer
+ .write_u64::<BigEndian>(0)
.expect("cannot fail to write to buffer");
- // Compress the payload into the buffer, recording the crc hash as it is
- // wrote.
- let mut encoder = snap::write::FrameEncoder::new(HasherWrapper::new(buf));
+ // Compress the payload into the reused buffer, recording the crc hash
+ // as it is wrote.
+ let mut encoder = snap::write::FrameEncoder::new(HasherWrapper::new(&mut self.buffer));
encoder.write_all(data).context(UnableToCompressDataSnafu)?;
let (checksum, buf) = encoder
.into_inner()
@@ -108,7 +122,7 @@ impl OpenSegmentFileWriter {
// Write the entire buffer to the file
let buf = buf.into_inner();
let bytes_written = buf.len();
- self.f.write_all(&buf).context(SegmentWriteDataSnafu)?;
+ self.f.write_all(buf).context(SegmentWriteDataSnafu)?;
// fsync the fd
self.f.sync_all().expect("fsync failure");
|
12b8095c46501624c1ccde10dd12d1ef33580043
|
Carol (Nichols || Goulding)
|
2023-08-29 01:57:38
|
Upgrade to Rust 1.72.0 (#8589)
|
* feat: Upgrade to Rust 1.72.0
* fix: Allow a warning about an error we're intentionally creating
This is a test for an error. This lint warns that this code will cause
an error. Thanks lint, that's what we wanted!
* chore: rustfmt 1.72
* fix: Remove unnecessary hashes in raw string literals
Thanks Clippy!
https://rust-lang.github.io/rust-clippy/master/index.html#/needless_raw_string_hashes
Note that there are a number of false negatives with this lint; see
https://github.com/rust-lang/rust-clippy/issues/11420
* fix: Remove unnecessary explicit iteration
Looks like clippy::explicit_iter_loop was improved.
https://rust-lang.github.io/rust-clippy/master/index.html#/explicit_iter_loop
* fix: Allow clippy::manual_try_fold in a few places
Some of these might not be possible to rewrite with try_fold, or at
least not trivially. I don't feel confident enough to change these, in
any case. I think the lint is good to have on for future code though, so
that new code can be written with try_fold.
* fix: Remove useless creation of vectors when an array will do
Mostly in tests. Also fix some long lines.
Thanks Clippy!
https://rust-lang.github.io/rust-clippy/master/index.html#/useless_vec
* fix: Allow a single range in a vec init, which is actually what we want
Looks like Clippy's trying to catch a common mistake here, but for realz
we actually want `Vec<Range<usize>>` not `Vec<usize>`
https://rust-lang.github.io/rust-clippy/master/index.html#/single_range_in_vec_init
* fix: Remove a useless conversion
This looks like removing explicit iteration, but it's actually caught by
useless_conversion.
https://rust-lang.github.io/rust-clippy/master/index.html#/useless_conversion
* fix: Remove redundant pattern matching
Thanks Clippy!
https://rust-lang.github.io/rust-clippy/master/index.html#/redundant_pat
* fix: Allow an unwrap on a literal None in a test
This matches with the other tests better, and also when I tried to
remove the `unwrap_or_default` it changed the JSON sent from something
with an empty value to `null`, so I think the `or_default` part is
actually changing from one `None` to another `None`.
|
https://rust-lang.github.io/rust-clippy/master/index.html#/unnecessary_literal_unwrap
|
feat: Upgrade to Rust 1.72.0 (#8589)
* feat: Upgrade to Rust 1.72.0
* fix: Allow a warning about an error we're intentionally creating
This is a test for an error. This lint warns that this code will cause
an error. Thanks lint, that's what we wanted!
* chore: rustfmt 1.72
* fix: Remove unnecessary hashes in raw string literals
Thanks Clippy!
https://rust-lang.github.io/rust-clippy/master/index.html#/needless_raw_string_hashes
Note that there are a number of false negatives with this lint; see
https://github.com/rust-lang/rust-clippy/issues/11420
* fix: Remove unnecessary explicit iteration
Looks like clippy::explicit_iter_loop was improved.
https://rust-lang.github.io/rust-clippy/master/index.html#/explicit_iter_loop
* fix: Allow clippy::manual_try_fold in a few places
Some of these might not be possible to rewrite with try_fold, or at
least not trivially. I don't feel confident enough to change these, in
any case. I think the lint is good to have on for future code though, so
that new code can be written with try_fold.
* fix: Remove useless creation of vectors when an array will do
Mostly in tests. Also fix some long lines.
Thanks Clippy!
https://rust-lang.github.io/rust-clippy/master/index.html#/useless_vec
* fix: Allow a single range in a vec init, which is actually what we want
Looks like Clippy's trying to catch a common mistake here, but for realz
we actually want `Vec<Range<usize>>` not `Vec<usize>`
https://rust-lang.github.io/rust-clippy/master/index.html#/single_range_in_vec_init
* fix: Remove a useless conversion
This looks like removing explicit iteration, but it's actually caught by
useless_conversion.
https://rust-lang.github.io/rust-clippy/master/index.html#/useless_conversion
* fix: Remove redundant pattern matching
Thanks Clippy!
https://rust-lang.github.io/rust-clippy/master/index.html#/redundant_pat
* fix: Allow an unwrap on a literal None in a test
This matches with the other tests better, and also when I tried to
remove the `unwrap_or_default` it changed the JSON sent from something
with an empty value to `null`, so I think the `or_default` part is
actually changing from one `None` to another `None`.
https://rust-lang.github.io/rust-clippy/master/index.html#/unnecessary_literal_unwrap
|
diff --git a/arrow_util/src/test_util.rs b/arrow_util/src/test_util.rs
index 5e788fa73a..17e80f88c4 100644
--- a/arrow_util/src/test_util.rs
+++ b/arrow_util/src/test_util.rs
@@ -221,15 +221,15 @@ static REGEX_LINESEP: Lazy<Regex> = Lazy::new(|| Regex::new(r#"[+-]{6,}"#).expec
///
/// ` |` -> ` |`
/// ` |` -> ` |`
-static REGEX_COL: Lazy<Regex> = Lazy::new(|| Regex::new(r#"\s+\|"#).expect("col regex"));
+static REGEX_COL: Lazy<Regex> = Lazy::new(|| Regex::new(r"\s+\|").expect("col regex"));
/// Matches line like `metrics=[foo=1, bar=2]`
static REGEX_METRICS: Lazy<Regex> =
- Lazy::new(|| Regex::new(r#"metrics=\[([^\]]*)\]"#).expect("metrics regex"));
+ Lazy::new(|| Regex::new(r"metrics=\[([^\]]*)\]").expect("metrics regex"));
/// Matches things like `1s`, `1.2ms` and `10.2ΞΌs`
static REGEX_TIMING: Lazy<Regex> =
- Lazy::new(|| Regex::new(r#"[0-9]+(\.[0-9]+)?.s"#).expect("timing regex"));
+ Lazy::new(|| Regex::new(r"[0-9]+(\.[0-9]+)?.s").expect("timing regex"));
/// Matches things like `FilterExec: .*` and `ParquetExec: .*`
///
diff --git a/compactor/src/components/split_or_compact/files_to_compact.rs b/compactor/src/components/split_or_compact/files_to_compact.rs
index c53762350f..8885ef78cb 100644
--- a/compactor/src/components/split_or_compact/files_to_compact.rs
+++ b/compactor/src/components/split_or_compact/files_to_compact.rs
@@ -264,7 +264,7 @@ pub fn limit_files_to_compact(
// All files in start_level_files_to_compact and target_level_files_to_compact will be compacted
let files_to_compact = start_level_files_to_compact
.into_iter()
- .chain(target_level_files_to_compact.into_iter())
+ .chain(target_level_files_to_compact)
.collect::<Vec<_>>();
// Sanity check
diff --git a/compactor/src/components/split_or_compact/start_level_files_to_split.rs b/compactor/src/components/split_or_compact/start_level_files_to_split.rs
index 0b285b06c6..5906b60804 100644
--- a/compactor/src/components/split_or_compact/start_level_files_to_split.rs
+++ b/compactor/src/components/split_or_compact/start_level_files_to_split.rs
@@ -338,7 +338,7 @@ pub fn merge_small_l0_chains(
// TODO: this may not be necessary long term (with CompactRanges this might be ok)
let mut matches = 0;
if prior_chain_bytes > 0 {
- for f in chain.iter() {
+ for f in chain {
for f2 in &merged_chains[prior_chain_idx as usize] {
if f.max_l0_created_at == f2.max_l0_created_at {
matches += 1;
diff --git a/compactor/tests/layouts/large_files.rs b/compactor/tests/layouts/large_files.rs
index 96d849dc5c..554307e01c 100644
--- a/compactor/tests/layouts/large_files.rs
+++ b/compactor/tests/layouts/large_files.rs
@@ -723,7 +723,7 @@ async fn target_too_large_1() {
// . one very large overlapped L2
// size of l1s & l2
- let l1_sizes = vec![53 * ONE_MB, 45 * ONE_MB, 5 * ONE_MB];
+ let l1_sizes = [53 * ONE_MB, 45 * ONE_MB, 5 * ONE_MB];
let l2_size = 253 * ONE_MB;
// L2 overlapped with the first L1
@@ -836,7 +836,7 @@ async fn target_too_large_2() {
// . one very large overlapped L2
// size of l1s & l2
- let l1_sizes = vec![69 * ONE_MB, 50 * ONE_MB];
+ let l1_sizes = [69 * ONE_MB, 50 * ONE_MB];
let l2_size = 232 * ONE_MB;
// L2 overlapped with both L1s
@@ -943,7 +943,7 @@ async fn start_too_large_similar_time_range() {
// . total size = L1 & L2 > max_compact_size
// size of l1 & l2 respectively
- let sizes = vec![250 * ONE_MB, 52 * ONE_MB];
+ let sizes = [250 * ONE_MB, 52 * ONE_MB];
for i in 1..=2 {
setup
@@ -1057,7 +1057,7 @@ async fn start_too_large_small_time_range() {
// . total size = L1 & L2 > max_compact_size
// size of l1 & l2 respectively
- let sizes = vec![250 * ONE_MB, 52 * ONE_MB];
+ let sizes = [250 * ONE_MB, 52 * ONE_MB];
for i in 1..=2 {
setup
@@ -1139,7 +1139,7 @@ async fn start_too_large_small_time_range_2() {
// . total size = L1 & L2 > max_compact_size
// size of l1 & l2 respectively
- let sizes = vec![250 * ONE_MB, 52 * ONE_MB];
+ let sizes = [250 * ONE_MB, 52 * ONE_MB];
for i in 1..=2 {
setup
@@ -1220,7 +1220,7 @@ async fn start_too_large_small_time_range_3() {
// . total size = L1 & L2 > max_compact_size
// size of l1 & l2 respectively
- let sizes = vec![250 * ONE_MB, 52 * ONE_MB];
+ let sizes = [250 * ONE_MB, 52 * ONE_MB];
for i in 1..=2 {
setup
diff --git a/compactor/tests/layouts/mod.rs b/compactor/tests/layouts/mod.rs
index a30eb9c391..02572887bc 100644
--- a/compactor/tests/layouts/mod.rs
+++ b/compactor/tests/layouts/mod.rs
@@ -154,7 +154,7 @@ pub(crate) async fn run_layout_scenario(setup: &TestSetup) -> Vec<String> {
]);
let mut breakdown = Vec::new();
- for (op, written) in setup.bytes_written_per_plan.lock().unwrap().iter() {
+ for (op, written) in &*setup.bytes_written_per_plan.lock().unwrap() {
let written = *written as i64;
breakdown.push(format!("{} written by {}", display_size(written), op));
}
diff --git a/compactor_test_utils/src/display.rs b/compactor_test_utils/src/display.rs
index c3d0c1273c..77bed6157c 100644
--- a/compactor_test_utils/src/display.rs
+++ b/compactor_test_utils/src/display.rs
@@ -117,7 +117,7 @@ pub fn format_files_split<P: ParquetFileInfo>(
let strings1 = readable_list_of_files(Some(title1.into()), files1);
let strings2 = readable_list_of_files(Some(title2.into()), files2);
- strings1.into_iter().chain(strings2.into_iter()).collect()
+ strings1.into_iter().chain(strings2).collect()
}
/// default width for printing
diff --git a/compactor_test_utils/src/lib.rs b/compactor_test_utils/src/lib.rs
index 0ac1920fc7..d2c4847c99 100644
--- a/compactor_test_utils/src/lib.rs
+++ b/compactor_test_utils/src/lib.rs
@@ -194,7 +194,7 @@ impl TestSetupBuilder<false> {
let time_5_minutes_future = time_provider.minutes_into_future(5);
// L1 file
- let lp = vec![
+ let lp = [
"table,tag2=PA,tag3=15 field_int=1601i 30000",
"table,tag2=OH,tag3=21 field_int=21i 36000", // will be eliminated due to duplicate
]
@@ -209,7 +209,7 @@ impl TestSetupBuilder<false> {
let level_1_file_1_minute_ago = self.partition.create_parquet_file(builder).await.into();
// L0 file
- let lp = vec![
+ let lp = [
"table,tag1=WA field_int=1000i 8000", // will be eliminated due to duplicate
"table,tag1=VT field_int=10i 10000", // latest L0 compared with duplicate in level_1_file_1_minute_ago_with_duplicates
// keep it
@@ -226,7 +226,7 @@ impl TestSetupBuilder<false> {
let level_0_file_16_minutes_ago = self.partition.create_parquet_file(builder).await.into();
// L0 file
- let lp = vec![
+ let lp = [
"table,tag1=WA field_int=1500i 8000", // latest duplicate and kept
"table,tag1=VT field_int=10i 6000",
"table,tag1=UT field_int=270i 25000",
@@ -242,7 +242,7 @@ impl TestSetupBuilder<false> {
let level_0_file_5_minutes_ago = self.partition.create_parquet_file(builder).await.into();
// L1 file
- let lp = vec![
+ let lp = [
"table,tag1=VT field_int=88i 10000", // will be eliminated due to duplicate.
// Note: created time more recent than level_0_file_16_minutes_ago
// but always considered older ingested data
@@ -260,7 +260,7 @@ impl TestSetupBuilder<false> {
self.partition.create_parquet_file(builder).await.into();
// L0 file
- let lp = vec!["table,tag2=OH,tag3=21 field_int=22i 36000"].join("\n");
+ let lp = ["table,tag2=OH,tag3=21 field_int=22i 36000"].join("\n");
let builder = TestParquetFileBuilder::default()
.with_line_protocol(&lp)
.with_min_time(0)
@@ -273,7 +273,7 @@ impl TestSetupBuilder<false> {
let medium_level_0_file_time_now = self.partition.create_parquet_file(builder).await.into();
// L0 file
- let lp = vec![
+ let lp = [
"table,tag1=VT field_int=10i 68000",
"table,tag2=OH,tag3=21 field_int=210i 136000",
]
@@ -403,7 +403,7 @@ impl TestSetupBuilder<false> {
/// Create 3 L2 files
pub async fn create_three_l2_files(&self, time: TestTimes) -> Vec<ParquetFile> {
// L2.1 file
- let lp = vec![
+ let lp = [
"table,tag1=WA field_int=1000i 8000", // will be eliminated due to duplicate
"table,tag1=VT field_int=88i 10000", // will be eliminated due to duplicate.
"table,tag1=OR field_int=99i 12000",
@@ -419,7 +419,7 @@ impl TestSetupBuilder<false> {
let l2_1 = self.partition.create_parquet_file(builder).await.into();
// L2.2 file
- let lp = vec![
+ let lp = [
"table,tag1=UT field_int=70i 20000",
"table,tag2=PA,tag3=15 field_int=1601i 30000",
]
@@ -434,7 +434,7 @@ impl TestSetupBuilder<false> {
let l2_2 = self.partition.create_parquet_file(builder).await.into();
// L2.3 file
- let lp = vec!["table,tag2=OH,tag3=21 field_int=21i 36000"].join("\n");
+ let lp = ["table,tag2=OH,tag3=21 field_int=21i 36000"].join("\n");
let builder = TestParquetFileBuilder::default()
.with_line_protocol(&lp)
.with_creation_time(Time::from_timestamp_nanos(time.time_3_minutes_future))
@@ -455,7 +455,7 @@ impl TestSetupBuilder<false> {
time: TestTimes,
) -> Vec<ParquetFile> {
// L1.1 file
- let lp = vec![
+ let lp = [
"table,tag1=WA field_int=1500i 8000", // latest duplicate and kept
"table,tag1=VT field_int=10i 10000", // latest duplicate and kept
"table,tag1=VT field_int=10i 6000",
@@ -474,7 +474,7 @@ impl TestSetupBuilder<false> {
let l1_1 = self.partition.create_parquet_file(builder).await.into();
// L1.2 file
- let lp = vec!["table,tag2=OH,tag3=21 field_int=210i 136000"].join("\n");
+ let lp = ["table,tag2=OH,tag3=21 field_int=210i 136000"].join("\n");
let builder = TestParquetFileBuilder::default()
.with_line_protocol(&lp)
.with_min_time(136000)
@@ -494,7 +494,7 @@ impl TestSetupBuilder<false> {
time: TestTimes,
) -> Vec<ParquetFile> {
// L1.1 file
- let lp = vec![
+ let lp = [
"table,tag1=WA field_int=1500i 8000", // latest duplicate and kept
"table,tag1=VT field_int=10i 10000", // latest duplicate and kept
"table,tag1=VT field_int=10i 6000",
@@ -511,7 +511,7 @@ impl TestSetupBuilder<false> {
let l1_1 = self.partition.create_parquet_file(builder).await.into();
// L1.2 file
- let lp = vec![
+ let lp = [
"table,tag2=PA,tag3=15 field_int=1601i 28000",
"table,tag1=VT field_int=10i 68000",
"table,tag2=OH,tag3=21 field_int=210i 136000",
diff --git a/compactor_test_utils/src/simulator.rs b/compactor_test_utils/src/simulator.rs
index a39922ed47..022c03970c 100644
--- a/compactor_test_utils/src/simulator.rs
+++ b/compactor_test_utils/src/simulator.rs
@@ -268,7 +268,7 @@ impl SimulatedRun {
// hook up inputs and outputs
format_files(input_title, &input_parquet_files)
.into_iter()
- .chain(format_files(output_title, &output_params).into_iter())
+ .chain(format_files(output_title, &output_params))
}
}
diff --git a/data_types/src/lib.rs b/data_types/src/lib.rs
index 8f87310239..e002581ac7 100644
--- a/data_types/src/lib.rs
+++ b/data_types/src/lib.rs
@@ -870,7 +870,7 @@ impl std::fmt::Display for DeleteExpr {
write!(
f,
r#""{}"{}{}"#,
- self.column().replace('\\', r#"\\"#).replace('"', r#"\""#),
+ self.column().replace('\\', r"\\").replace('"', r#"\""#),
self.op(),
self.scalar(),
)
@@ -931,11 +931,7 @@ impl std::fmt::Display for Scalar {
_ => write!(f, "{:?}", value.as_ref()),
},
Scalar::String(value) => {
- write!(
- f,
- "'{}'",
- value.replace('\\', r#"\\"#).replace('\'', r#"\'"#),
- )
+ write!(f, "'{}'", value.replace('\\', r"\\").replace('\'', r"\'"))
}
}
}
@@ -1700,7 +1696,7 @@ mod tests {
scalar: Scalar::I64(1),
},
DeleteExpr {
- column: String::from(r#"col\2"#),
+ column: String::from(r"col\2"),
op: Op::Eq,
scalar: Scalar::I64(2),
},
@@ -1836,7 +1832,7 @@ mod tests {
DeleteExpr {
column: String::from("col3"),
op: Op::Eq,
- scalar: Scalar::String(String::from(r#"fo\o"#)),
+ scalar: Scalar::String(String::from(r"fo\o")),
},
DeleteExpr {
column: String::from("col4"),
@@ -2571,15 +2567,12 @@ mod tests {
let schema2 = TableSchema {
id: TableId::new(2),
partition_template: Default::default(),
- columns: ColumnsByName::new(
- [Column {
- id: ColumnId::new(1),
- table_id: TableId::new(2),
- name: String::from("foo"),
- column_type: ColumnType::Bool,
- }]
- .into_iter(),
- ),
+ columns: ColumnsByName::new([Column {
+ id: ColumnId::new(1),
+ table_id: TableId::new(2),
+ name: String::from("foo"),
+ column_type: ColumnType::Bool,
+ }]),
};
assert!(schema1.size() < schema2.size());
}
diff --git a/data_types/src/partition.rs b/data_types/src/partition.rs
index f1e383aaba..b1d8750649 100644
--- a/data_types/src/partition.rs
+++ b/data_types/src/partition.rs
@@ -280,7 +280,7 @@ pub struct PartitionHashId(Arc<[u8; PARTITION_HASH_ID_SIZE_BYTES]>);
impl std::fmt::Display for PartitionHashId {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
- for byte in self.0.iter() {
+ for byte in &*self.0 {
write!(f, "{:02x}", byte)?;
}
Ok(())
diff --git a/flightsql/src/planner.rs b/flightsql/src/planner.rs
index b30aefd6ef..0c37a6806d 100644
--- a/flightsql/src/planner.rs
+++ b/flightsql/src/planner.rs
@@ -350,7 +350,7 @@ async fn plan_get_db_schemas(
// we just got the catalog name from the catalog_list, so it
// should always be Some, but avoid unwrap to be safe
let Some(catalog) = catalog_list.catalog(&catalog_name) else {
- continue
+ continue;
};
builder.append(&catalog_name, "information_schema");
@@ -402,7 +402,7 @@ async fn plan_get_tables(ctx: &IOxSessionContext, cmd: CommandGetTables) -> Resu
// we just got the catalog name from the catalog_list, so it
// should always be Some, but avoid unwrap to be safe
let Some(catalog) = catalog_list.catalog(&catalog_name) else {
- continue
+ continue;
};
// special case the "public"."information_schema" as it is a
@@ -425,12 +425,12 @@ async fn plan_get_tables(ctx: &IOxSessionContext, cmd: CommandGetTables) -> Resu
for schema_name in catalog.schema_names() {
let Some(schema) = catalog.schema(&schema_name) else {
- continue
+ continue;
};
for table_name in schema.table_names() {
let Some(table) = schema.table(&table_name).await else {
- continue
+ continue;
};
let table_type = table_type_name(table.table_type());
diff --git a/garbage_collector/src/objectstore/checker.rs b/garbage_collector/src/objectstore/checker.rs
index 0a91f0f6a6..85cbcbbdba 100644
--- a/garbage_collector/src/objectstore/checker.rs
+++ b/garbage_collector/src/objectstore/checker.rs
@@ -129,7 +129,7 @@ async fn should_delete(
}
let file_name = candidate.location.parts().last();
- if matches!(file_name, None) {
+ if file_name.is_none() {
warn!(
location = %candidate.location,
deleting = true,
diff --git a/import_export/src/file/import.rs b/import_export/src/file/import.rs
index 09d66b76d3..cf36f798f4 100644
--- a/import_export/src/file/import.rs
+++ b/import_export/src/file/import.rs
@@ -318,9 +318,9 @@ impl RemoteImporter {
let bytes = Bytes::from(file_bytes);
let file_size_bytes = bytes.len();
- let Some(iox_parquet_metadata) = IoxParquetMetaData::from_file_bytes(bytes.clone())? else {
+ let Some(iox_parquet_metadata) = IoxParquetMetaData::from_file_bytes(bytes.clone())? else {
return Err(Error::ParquetMetadataNotFound {
- path: PathBuf::from(file_path)
+ path: PathBuf::from(file_path),
});
};
@@ -699,21 +699,21 @@ fn get_min_max_times(
let schema = decoded_iox_parquet_metadata.read_schema()?;
let stats = decoded_iox_parquet_metadata.read_statistics(&schema)?;
- let Some(summary) = stats
- .iter()
- .find(|s| s.name == schema::TIME_COLUMN_NAME) else {
- return Err(Error::BadStats { stats: None });
- };
+ let Some(summary) = stats.iter().find(|s| s.name == schema::TIME_COLUMN_NAME) else {
+ return Err(Error::BadStats { stats: None });
+ };
let Statistics::I64(stats) = &summary.stats else {
- return Err(Error::BadStats { stats: Some(summary.stats.clone()) });
+ return Err(Error::BadStats {
+ stats: Some(summary.stats.clone()),
+ });
};
let (Some(min), Some(max)) = (stats.min, stats.max) else {
- return Err(Error::NoMinMax {
+ return Err(Error::NoMinMax {
min: stats.min,
max: stats.max,
- })
+ });
};
Ok((Timestamp::new(min), Timestamp::new(max)))
diff --git a/influxdb2_client/src/api/query.rs b/influxdb2_client/src/api/query.rs
index 33e60fcdf6..85e8ee6f47 100644
--- a/influxdb2_client/src/api/query.rs
+++ b/influxdb2_client/src/api/query.rs
@@ -236,6 +236,7 @@ mod tests {
.match_header("Content-Type", "application/json")
.match_query(Matcher::UrlEncoded("org".into(), org.into()))
.match_body(
+ #[allow(clippy::unnecessary_literal_unwrap)]
serde_json::to_string(&query.unwrap_or_default())
.unwrap()
.as_str(),
diff --git a/influxdb_influxql_parser/src/string.rs b/influxdb_influxql_parser/src/string.rs
index 7b586ece62..a8a065d173 100644
--- a/influxdb_influxql_parser/src/string.rs
+++ b/influxdb_influxql_parser/src/string.rs
@@ -66,7 +66,7 @@ pub(crate) fn single_quoted_string(i: &str) -> ParseResult<&str, String> {
let escaped = preceded(
char('\\'),
expect(
- r#"invalid escape sequence, expected \\, \' or \n"#,
+ r"invalid escape sequence, expected \\, \' or \n",
alt((char('\\'), char('\''), value('\n', char('n')))),
),
);
@@ -277,10 +277,10 @@ mod test {
);
// escaped characters
- let (_, got) = single_quoted_string(r#"'\n\''"#).unwrap();
+ let (_, got) = single_quoted_string(r"'\n\''").unwrap();
assert_eq!(got, "\n'");
- let (_, got) = single_quoted_string(r#"'\'hello\''"#).unwrap();
+ let (_, got) = single_quoted_string(r"'\'hello\''").unwrap();
assert_eq!(got, "'hello'");
// literal tab
@@ -307,8 +307,8 @@ mod test {
// Invalid escape
assert_expect_error!(
- single_quoted_string(r#"'quick\idraw'"#),
- r#"invalid escape sequence, expected \\, \' or \n"#
+ single_quoted_string(r"'quick\idraw'"),
+ r"invalid escape sequence, expected \\, \' or \n"
);
}
@@ -318,15 +318,15 @@ mod test {
assert_eq!(got, "hello".into());
// handle escaped delimiters "\/"
- let (_, got) = regex(r#"/\/this\/is\/a\/path/"#).unwrap();
+ let (_, got) = regex(r"/\/this\/is\/a\/path/").unwrap();
assert_eq!(got, "/this/is/a/path".into());
// ignores any other possible escape sequence
- let (_, got) = regex(r#"/hello\n/"#).unwrap();
+ let (_, got) = regex(r"/hello\n/").unwrap();
assert_eq!(got, "hello\\n".into());
// can parse possible escape sequence at beginning of regex
- let (_, got) = regex(r#"/\w.*/"#).unwrap();
+ let (_, got) = regex(r"/\w.*/").unwrap();
assert_eq!(got, "\\w.*".into());
// Empty regex
@@ -344,6 +344,6 @@ mod test {
// Single backslash fails, which matches Go implementation
// See: https://go.dev/play/p/_8J1v5-382G
- assert_expect_error!(regex(r#"/\/"#), "unterminated regex literal");
+ assert_expect_error!(regex(r"/\/"), "unterminated regex literal");
}
}
diff --git a/influxdb_influxql_parser/src/time_range.rs b/influxdb_influxql_parser/src/time_range.rs
index 6bde536c16..a1f105df18 100644
--- a/influxdb_influxql_parser/src/time_range.rs
+++ b/influxdb_influxql_parser/src/time_range.rs
@@ -242,7 +242,7 @@ pub fn split_cond(
};
let Some(expr) = expr.expr() else {
- return ControlFlow::Break(error::map::internal("expected Expr"))
+ return ControlFlow::Break(error::map::internal("expected Expr"));
};
// simplify binary expressions to a constant, including resolve `now()`
@@ -296,13 +296,11 @@ pub fn split_cond(
op: op @ (And | Or),
..
}) => {
- let Some(right) = stack
- .pop() else {
- return ControlFlow::Break(error::map::internal("invalid expr stack"))
+ let Some(right) = stack.pop() else {
+ return ControlFlow::Break(error::map::internal("invalid expr stack"));
};
- let Some(left) = stack
- .pop() else {
- return ControlFlow::Break(error::map::internal("invalid expr stack"))
+ let Some(left) = stack.pop() else {
+ return ControlFlow::Break(error::map::internal("invalid expr stack"));
};
stack.push(match (left, right) {
(Some(left), Some(right)) => Some(CE::Binary(ConditionalBinary {
diff --git a/influxdb_iox/tests/end_to_end_cases/querier/influxrpc/read_filter.rs b/influxdb_iox/tests/end_to_end_cases/querier/influxrpc/read_filter.rs
index 57f5a61567..76445dff9e 100644
--- a/influxdb_iox/tests/end_to_end_cases/querier/influxrpc/read_filter.rs
+++ b/influxdb_iox/tests/end_to_end_cases/querier/influxrpc/read_filter.rs
@@ -780,7 +780,7 @@ async fn tag_regex_escaped_predicates() {
// FROM db0.rp0.status_code
// WHERE url =~ /https\:\/\/influxdb\.com/
// ```
- .regex_match_predicate("url", r#"https\://influxdb\.com"#),
+ .regex_match_predicate("url", r"https\://influxdb\.com"),
expected_results: vec![
// expect one series with influxdb.com
"SeriesFrame, tags: _field=value,_measurement=status_code,url=https://influxdb.com, \
@@ -804,7 +804,7 @@ async fn tag_not_match_regex_escaped_predicates() {
// FROM db0.rp0.status_code
// WHERE url !~ /https\:\/\/influxdb\.com/
// ```
- .not_regex_match_predicate("url", r#"https\://influxdb\.com"#),
+ .not_regex_match_predicate("url", r"https\://influxdb\.com"),
expected_results: vec![
// expect one series with example.com
"SeriesFrame, tags: _field=value,_measurement=status_code,url=http://www.example.com, \
diff --git a/influxdb_iox/tests/end_to_end_cases/querier/influxrpc/read_group.rs b/influxdb_iox/tests/end_to_end_cases/querier/influxrpc/read_group.rs
index 230e11edea..4dccbf0d27 100644
--- a/influxdb_iox/tests/end_to_end_cases/querier/influxrpc/read_group.rs
+++ b/influxdb_iox/tests/end_to_end_cases/querier/influxrpc/read_group.rs
@@ -1146,7 +1146,7 @@ impl InfluxRpcTest for ReadGroupTest {
.clone()
.source(cluster)
.aggregate_type(self.aggregate_type)
- .group_keys(self.group_keys.clone().into_iter())
+ .group_keys(self.group_keys.clone())
.group(Group::By)
.build_read_group();
diff --git a/influxdb_iox_client/src/format/influxql.rs b/influxdb_iox_client/src/format/influxql.rs
index 52d4504806..3a1b15012a 100644
--- a/influxdb_iox_client/src/format/influxql.rs
+++ b/influxdb_iox_client/src/format/influxql.rs
@@ -63,7 +63,9 @@ impl Options {
pub fn write_columnar(mut w: impl Write, batches: &[RecordBatch], options: Options) -> Result<()> {
let arrow_opts = arrow::util::display::FormatOptions::default().with_display_error(true);
- let Some(schema) = batches.first().map(|b|b.schema()) else { return Ok(()) };
+ let Some(schema) = batches.first().map(|b| b.schema()) else {
+ return Ok(());
+ };
let md = schema
.metadata()
.get(schema::INFLUXQL_METADATA_KEY)
diff --git a/influxdb_line_protocol/src/builder.rs b/influxdb_line_protocol/src/builder.rs
index e7c03ad674..5be9df81ff 100644
--- a/influxdb_line_protocol/src/builder.rs
+++ b/influxdb_line_protocol/src/builder.rs
@@ -340,7 +340,7 @@ mod tests {
r#""foo""#
);
assert_eq!(
- format!("\"{}\"", escape(r#"foo \ bar"#, DOUBLE_QUOTE)),
+ format!("\"{}\"", escape(r"foo \ bar", DOUBLE_QUOTE)),
r#""foo \\ bar""#
);
assert_eq!(
@@ -361,7 +361,7 @@ mod tests {
const WITH_EQ: &str = "with=eq";
const WITH_DOUBLE_QUOTE: &str = r#"with"doublequote"#;
const WITH_SINGLE_QUOTE: &str = "with'singlequote";
- const WITH_BACKSLASH: &str = r#"with\ backslash"#;
+ const WITH_BACKSLASH: &str = r"with\ backslash";
let builder = LineProtocolBuilder::new()
// line 0
diff --git a/influxdb_line_protocol/src/lib.rs b/influxdb_line_protocol/src/lib.rs
index 09b1e65430..fe3e7ad103 100644
--- a/influxdb_line_protocol/src/lib.rs
+++ b/influxdb_line_protocol/src/lib.rs
@@ -797,8 +797,8 @@ fn field_string_value(i: &str) -> IResult<&str, EscapedStr<'_>> {
// quotes.
let string_data = alt((
map(tag(r#"\""#), |_| r#"""#), // escaped double quote -> double quote
- map(tag(r#"\\"#), |_| r#"\"#), // escaped backslash --> single backslash
- tag(r#"\"#), // unescaped single backslash
+ map(tag(r"\\"), |_| r"\"), // escaped backslash --> single backslash
+ tag(r"\"), // unescaped single backslash
take_while1(|c| c != '\\' && c != '"'), // anything else w/ no special handling
));
@@ -1317,11 +1317,11 @@ mod test {
fn parse_measurement_with_eq() {
let input = "tag1=1 field=1 1234";
let vals = parse(input);
- assert!(matches!(vals, Ok(_)));
+ assert!(vals.is_ok());
let input = "tag1=1,tag2=2 value=1 123";
let vals = parse(input);
- assert!(matches!(vals, Ok(_)));
+ assert!(vals.is_ok());
}
#[test]
@@ -1447,11 +1447,11 @@ mod test {
// Examples from
// https://docs.influxdata.com/influxdb/v1.8/write_protocols/line_protocol_tutorial/#special-characters
(r#"foo asdf="too hot/cold""#, r#"too hot/cold"#),
- (r#"foo asdf="too hot\cold""#, r#"too hot\cold"#),
- (r#"foo asdf="too hot\\cold""#, r#"too hot\cold"#),
- (r#"foo asdf="too hot\\\cold""#, r#"too hot\\cold"#),
- (r#"foo asdf="too hot\\\\cold""#, r#"too hot\\cold"#),
- (r#"foo asdf="too hot\\\\\cold""#, r#"too hot\\\cold"#),
+ (r#"foo asdf="too hot\cold""#, r"too hot\cold"),
+ (r#"foo asdf="too hot\\cold""#, r"too hot\cold"),
+ (r#"foo asdf="too hot\\\cold""#, r"too hot\\cold"),
+ (r#"foo asdf="too hot\\\\cold""#, r"too hot\\cold"),
+ (r#"foo asdf="too hot\\\\\cold""#, r"too hot\\\cold"),
];
for (input, expected_parsed_string_value) in test_data {
@@ -1900,30 +1900,30 @@ bar value2=2i 123"#;
#[test]
fn measurement_allows_escaping_comma() {
- assert_fully_parsed!(measurement(r#"wea\,ther"#), r#"wea,ther"#);
+ assert_fully_parsed!(measurement(r"wea\,ther"), r#"wea,ther"#);
}
#[test]
fn measurement_allows_escaping_space() {
- assert_fully_parsed!(measurement(r#"wea\ ther"#), r#"wea ther"#);
+ assert_fully_parsed!(measurement(r"wea\ ther"), r#"wea ther"#);
}
#[test]
fn measurement_allows_escaping_backslash() {
- assert_fully_parsed!(measurement(r#"\\wea\\ther"#), r#"\wea\ther"#);
+ assert_fully_parsed!(measurement(r"\\wea\\ther"), r"\wea\ther");
}
#[test]
fn measurement_allows_backslash_with_unknown_escape() {
- assert_fully_parsed!(measurement(r#"\wea\ther"#), r#"\wea\ther"#);
+ assert_fully_parsed!(measurement(r"\wea\ther"), r"\wea\ther");
}
#[test]
fn measurement_allows_literal_newline_as_unknown_escape() {
assert_fully_parsed!(
measurement(
- r#"weat\
-her"#
+ r"weat\
+her"
),
"weat\\\nher",
);
@@ -1942,7 +1942,7 @@ her"#,
#[test]
fn measurement_disallows_ending_in_backslash() {
- let parsed = measurement(r#"weather\"#);
+ let parsed = measurement(r"weather\");
assert!(matches!(
parsed,
Err(nom::Err::Failure(super::Error::EndsWithBackslash))
@@ -1951,35 +1951,35 @@ her"#,
#[test]
fn tag_key_allows_escaping_comma() {
- assert_fully_parsed!(tag_key(r#"wea\,ther"#), r#"wea,ther"#);
+ assert_fully_parsed!(tag_key(r"wea\,ther"), r#"wea,ther"#);
}
#[test]
fn tag_key_allows_escaping_equal() {
- assert_fully_parsed!(tag_key(r#"wea\=ther"#), r#"wea=ther"#);
+ assert_fully_parsed!(tag_key(r"wea\=ther"), r#"wea=ther"#);
}
#[test]
fn tag_key_allows_escaping_space() {
- assert_fully_parsed!(tag_key(r#"wea\ ther"#), r#"wea ther"#);
+ assert_fully_parsed!(tag_key(r"wea\ ther"), r#"wea ther"#);
}
#[test]
fn tag_key_allows_escaping_backslash() {
- assert_fully_parsed!(tag_key(r#"\\wea\\ther"#), r#"\wea\ther"#);
+ assert_fully_parsed!(tag_key(r"\\wea\\ther"), r"\wea\ther");
}
#[test]
fn tag_key_allows_backslash_with_unknown_escape() {
- assert_fully_parsed!(tag_key(r#"\wea\ther"#), r#"\wea\ther"#);
+ assert_fully_parsed!(tag_key(r"\wea\ther"), r"\wea\ther");
}
#[test]
fn tag_key_allows_literal_newline_as_unknown_escape() {
assert_fully_parsed!(
tag_key(
- r#"weat\
-her"#
+ r"weat\
+her"
),
"weat\\\nher",
);
@@ -1998,7 +1998,7 @@ her"#,
#[test]
fn tag_key_disallows_ending_in_backslash() {
- let parsed = tag_key(r#"weather\"#);
+ let parsed = tag_key(r"weather\");
assert!(matches!(
parsed,
Err(nom::Err::Failure(super::Error::EndsWithBackslash))
@@ -2007,35 +2007,35 @@ her"#,
#[test]
fn tag_value_allows_escaping_comma() {
- assert_fully_parsed!(tag_value(r#"wea\,ther"#), r#"wea,ther"#);
+ assert_fully_parsed!(tag_value(r"wea\,ther"), r#"wea,ther"#);
}
#[test]
fn tag_value_allows_escaping_equal() {
- assert_fully_parsed!(tag_value(r#"wea\=ther"#), r#"wea=ther"#);
+ assert_fully_parsed!(tag_value(r"wea\=ther"), r#"wea=ther"#);
}
#[test]
fn tag_value_allows_escaping_space() {
- assert_fully_parsed!(tag_value(r#"wea\ ther"#), r#"wea ther"#);
+ assert_fully_parsed!(tag_value(r"wea\ ther"), r#"wea ther"#);
}
#[test]
fn tag_value_allows_escaping_backslash() {
- assert_fully_parsed!(tag_value(r#"\\wea\\ther"#), r#"\wea\ther"#);
+ assert_fully_parsed!(tag_value(r"\\wea\\ther"), r"\wea\ther");
}
#[test]
fn tag_value_allows_backslash_with_unknown_escape() {
- assert_fully_parsed!(tag_value(r#"\wea\ther"#), r#"\wea\ther"#);
+ assert_fully_parsed!(tag_value(r"\wea\ther"), r"\wea\ther");
}
#[test]
fn tag_value_allows_literal_newline_as_unknown_escape() {
assert_fully_parsed!(
tag_value(
- r#"weat\
-her"#
+ r"weat\
+her"
),
"weat\\\nher",
);
@@ -2054,7 +2054,7 @@ her"#,
#[test]
fn tag_value_disallows_ending_in_backslash() {
- let parsed = tag_value(r#"weather\"#);
+ let parsed = tag_value(r"weather\");
assert!(matches!(
parsed,
Err(nom::Err::Failure(super::Error::EndsWithBackslash))
@@ -2063,35 +2063,35 @@ her"#,
#[test]
fn field_key_allows_escaping_comma() {
- assert_fully_parsed!(field_key(r#"wea\,ther"#), r#"wea,ther"#);
+ assert_fully_parsed!(field_key(r"wea\,ther"), r#"wea,ther"#);
}
#[test]
fn field_key_allows_escaping_equal() {
- assert_fully_parsed!(field_key(r#"wea\=ther"#), r#"wea=ther"#);
+ assert_fully_parsed!(field_key(r"wea\=ther"), r#"wea=ther"#);
}
#[test]
fn field_key_allows_escaping_space() {
- assert_fully_parsed!(field_key(r#"wea\ ther"#), r#"wea ther"#);
+ assert_fully_parsed!(field_key(r"wea\ ther"), r#"wea ther"#);
}
#[test]
fn field_key_allows_escaping_backslash() {
- assert_fully_parsed!(field_key(r#"\\wea\\ther"#), r#"\wea\ther"#);
+ assert_fully_parsed!(field_key(r"\\wea\\ther"), r"\wea\ther");
}
#[test]
fn field_key_allows_backslash_with_unknown_escape() {
- assert_fully_parsed!(field_key(r#"\wea\ther"#), r#"\wea\ther"#);
+ assert_fully_parsed!(field_key(r"\wea\ther"), r"\wea\ther");
}
#[test]
fn field_key_allows_literal_newline_as_unknown_escape() {
assert_fully_parsed!(
field_key(
- r#"weat\
-her"#
+ r"weat\
+her"
),
"weat\\\nher",
);
@@ -2110,7 +2110,7 @@ her"#,
#[test]
fn field_key_disallows_ending_in_backslash() {
- let parsed = field_key(r#"weather\"#);
+ let parsed = field_key(r"weather\");
assert!(matches!(
parsed,
Err(nom::Err::Failure(super::Error::EndsWithBackslash))
diff --git a/influxdb_tsm/src/encoders/integer.rs b/influxdb_tsm/src/encoders/integer.rs
index 7181207f58..ebf5065fbc 100644
--- a/influxdb_tsm/src/encoders/integer.rs
+++ b/influxdb_tsm/src/encoders/integer.rs
@@ -216,8 +216,8 @@ mod tests {
#[test]
fn zig_zag_encoding() {
- let input = vec![-2147483648, -2, -1, 0, 1, 2147483647];
- let exp = vec![4294967295, 3, 1, 0, 2, 4294967294];
+ let input = [-2147483648, -2, -1, 0, 1, 2147483647];
+ let exp = [4294967295, 3, 1, 0, 2, 4294967294];
for (i, v) in input.iter().enumerate() {
let encoded = zig_zag_encode(*v);
assert_eq!(encoded, exp[i]);
diff --git a/influxdb_tsm/src/key.rs b/influxdb_tsm/src/key.rs
index 59dfdaaaf8..5decc90d97 100644
--- a/influxdb_tsm/src/key.rs
+++ b/influxdb_tsm/src/key.rs
@@ -582,8 +582,8 @@ mod tests {
do_test_parse_tsm_field_key_value_good("foo#!~#", "foo");
// escaped values
- do_test_parse_tsm_field_key_value_good(r#"foo\ bar#!~#foo bar"#, "foo bar");
- do_test_parse_tsm_field_key_value_good(r#"foo\,bar#!~#foo,bar"#, "foo,bar");
+ do_test_parse_tsm_field_key_value_good(r"foo\ bar#!~#foo bar", "foo bar");
+ do_test_parse_tsm_field_key_value_good(r"foo\,bar#!~#foo,bar", "foo,bar");
// unescaped values
do_test_parse_tsm_field_key_value_bad("foo bar#!~#foo bar", "invalid unescaped ' '");
@@ -595,17 +595,17 @@ mod tests {
// partial delimiters
do_test_parse_tsm_field_key_value_good("foo#!#!~#foo", "foo#!");
do_test_parse_tsm_field_key_value_good("fo#!o#!~#foo", "fo#!o");
- do_test_parse_tsm_field_key_value_good(r#"fo#!\ o#!~#foo"#, "fo#! o");
- do_test_parse_tsm_field_key_value_good(r#"fo#!\,o#!~#foo"#, "fo#!,o");
- do_test_parse_tsm_field_key_value_good(r#"fo#!\=o#!~#foo"#, "fo#!=o");
+ do_test_parse_tsm_field_key_value_good(r"fo#!\ o#!~#foo", "fo#! o");
+ do_test_parse_tsm_field_key_value_good(r"fo#!\,o#!~#foo", "fo#!,o");
+ do_test_parse_tsm_field_key_value_good(r"fo#!\=o#!~#foo", "fo#!=o");
do_test_parse_tsm_field_key_value_good("foo#!~o#!~#foo", "foo#!~o");
do_test_parse_tsm_field_key_value_good("fo#!~o#!~#foo", "fo#!~o");
- do_test_parse_tsm_field_key_value_good(r#"fo#!~\ #!~#foo"#, "fo#!~ ");
+ do_test_parse_tsm_field_key_value_good(r"fo#!~\ #!~#foo", "fo#!~ ");
do_test_parse_tsm_field_key_value_good("foo#!~#!~#foo", "foo"); // matches!
do_test_parse_tsm_field_key_value_good("fo#!~o#!~#foo", "fo#!~o");
- do_test_parse_tsm_field_key_value_good(r#"fo#!~\ #!~#foo"#, "fo#!~ ");
+ do_test_parse_tsm_field_key_value_good(r"fo#!~\ #!~#foo", "fo#!~ ");
// test partial delimiters
do_test_parse_tsm_field_key_value_bad(
@@ -630,9 +630,9 @@ mod tests {
do_test_parse_tsm_field_key_value_bad("foo,bar#!~#foo,bar", "invalid unescaped ','");
do_test_parse_tsm_field_key_value_bad("foo=bar#!~#foo=bar", "invalid unescaped '='");
// but escaped before the delimiter is fine
- do_test_parse_tsm_field_key_value_good(r#"foo\ bar#!~#foo bar"#, "foo bar");
- do_test_parse_tsm_field_key_value_good(r#"foo\,bar#!~#foo,bar"#, "foo,bar");
- do_test_parse_tsm_field_key_value_good(r#"foo\=bar#!~#foo=bar"#, "foo=bar");
+ do_test_parse_tsm_field_key_value_good(r"foo\ bar#!~#foo bar", "foo bar");
+ do_test_parse_tsm_field_key_value_good(r"foo\,bar#!~#foo,bar", "foo,bar");
+ do_test_parse_tsm_field_key_value_good(r"foo\=bar#!~#foo=bar", "foo=bar");
}
#[test]
diff --git a/influxdb_tsm/src/mapper.rs b/influxdb_tsm/src/mapper.rs
index c6a69f8733..82fc21c551 100644
--- a/influxdb_tsm/src/mapper.rs
+++ b/influxdb_tsm/src/mapper.rs
@@ -251,7 +251,7 @@ impl MeasurementTable {
.entry(other_tagset.clone())
.or_default();
- for (other_field_key, other_blocks) in other_field_key_blocks.iter_mut() {
+ for (other_field_key, other_blocks) in &mut *other_field_key_blocks {
match field_key_blocks.get_mut(other_field_key) {
Some(blocks) => {
assert!(
@@ -583,7 +583,7 @@ fn refill_block_buffer(
) -> Result<(), TsmError> {
// Determine for each input block if the destination container needs
// refilling.
- for (field, blocks) in field_blocks.iter_mut() {
+ for (field, blocks) in &mut *field_blocks {
if blocks.is_empty() {
continue; // drained all blocks for this field
}
diff --git a/ingester/src/buffer_tree/partition/buffer/state_machine.rs b/ingester/src/buffer_tree/partition/buffer/state_machine.rs
index f5c855587e..8ae931578c 100644
--- a/ingester/src/buffer_tree/partition/buffer/state_machine.rs
+++ b/ingester/src/buffer_tree/partition/buffer/state_machine.rs
@@ -245,7 +245,7 @@ mod tests {
assert_eq!(w2_data, final_data);
let same_arcs = w2_data
.into_iter()
- .zip(final_data.into_iter())
+ .zip(final_data)
.all(|(a, b)| Arc::ptr_eq(a.column(0), b.column(0)));
assert!(same_arcs);
@@ -351,7 +351,7 @@ mod tests {
assert_eq!(w2_data, final_data);
let same_arcs = w2_data
.into_iter()
- .zip(final_data.into_iter())
+ .zip(final_data)
.all(|(a, b)| Arc::ptr_eq(a.column(0), b.column(0)));
assert!(same_arcs);
diff --git a/ingester/src/test_util.rs b/ingester/src/test_util.rs
index 6ab96fd123..e154cc0bb7 100644
--- a/ingester/src/test_util.rs
+++ b/ingester/src/test_util.rs
@@ -434,7 +434,7 @@ pub(crate) fn assert_write_ops_eq(a: WriteOperation, b: WriteOperation) {
let a = a.into_tables().collect::<BTreeMap<_, _>>();
let b = b.into_tables().collect::<BTreeMap<_, _>>();
- a.into_iter().zip(b.into_iter()).for_each(|(a, b)| {
+ a.into_iter().zip(b).for_each(|(a, b)| {
assert_eq!(a.0, b.0, "table IDs differ - a table is missing!");
assert_eq!(
a.1.partitioned_data()
diff --git a/ingester/src/wal/reference_tracker/actor.rs b/ingester/src/wal/reference_tracker/actor.rs
index b7514fdfd7..f5d3b97051 100644
--- a/ingester/src/wal/reference_tracker/actor.rs
+++ b/ingester/src/wal/reference_tracker/actor.rs
@@ -276,7 +276,7 @@ where
// And then walk the WAL file sets.
let mut remove_ids = Vec::with_capacity(0);
- for (id, file_set) in self.wal_files.iter_mut() {
+ for (id, file_set) in &mut self.wal_files {
// Invariant: files in the file set always have at least 1 reference
assert!(!file_set.is_empty());
diff --git a/iox_catalog/src/migrate.rs b/iox_catalog/src/migrate.rs
index e455470533..ac38fb6dbd 100644
--- a/iox_catalog/src/migrate.rs
+++ b/iox_catalog/src/migrate.rs
@@ -192,7 +192,7 @@ impl Checksum {
impl std::fmt::Debug for Checksum {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
- for b in self.0.iter() {
+ for b in &*self.0 {
write!(f, "{:02x}", b)?;
}
Ok(())
diff --git a/iox_query/src/exec/gapfill/algo.rs b/iox_query/src/exec/gapfill/algo.rs
index aab99fad6e..7a4a46b180 100644
--- a/iox_query/src/exec/gapfill/algo.rs
+++ b/iox_query/src/exec/gapfill/algo.rs
@@ -237,7 +237,7 @@ impl GapFiller {
let mut final_cursor = cursor;
// build the other group columns
- for (idx, ga) in group_arr.iter() {
+ for (idx, ga) in group_arr {
let mut cursor = self.cursor.clone_for_aggr_col(None)?;
let take_vec =
cursor.build_group_take_vec(&self.params, series_ends, input_time_array)?;
@@ -253,7 +253,7 @@ impl GapFiller {
}
// Build the aggregate columns
- for (idx, aa) in aggr_arr.iter() {
+ for (idx, aa) in aggr_arr {
let mut cursor = self.cursor.clone_for_aggr_col(Some(*idx))?;
let output_array =
cursor.build_aggr_col(&self.params, series_ends, input_time_array, aa)?;
@@ -420,7 +420,7 @@ impl Cursor {
/// Update this cursor to reflect that `offset` older rows are being sliced off from the
/// buffered input.
fn slice(&mut self, offset: usize, batch: &RecordBatch) -> Result<()> {
- for (idx, aggr_col_state) in self.aggr_col_states.iter_mut() {
+ for (idx, aggr_col_state) in &mut self.aggr_col_states {
aggr_col_state.slice(offset, batch.column(*idx))?;
}
self.next_input_offset -= offset;
@@ -716,7 +716,7 @@ impl Cursor {
series_ends: &[usize],
vec_builder: &mut impl VecBuilder,
) -> Result<()> {
- for series in series_ends.iter() {
+ for series in series_ends {
if self
.next_ts
.map_or(false, |next_ts| next_ts > params.last_ts)
diff --git a/iox_query/src/exec/gapfill/exec_tests.rs b/iox_query/src/exec/gapfill/exec_tests.rs
index 627130b52d..8bea7af8ef 100644
--- a/iox_query/src/exec/gapfill/exec_tests.rs
+++ b/iox_query/src/exec/gapfill/exec_tests.rs
@@ -1210,7 +1210,7 @@ fn phys_fill_strategies(
let start = records.group_cols.len() + 1; // 1 is for time col
let end = start + records.agg_cols.len();
let mut v = Vec::with_capacity(records.agg_cols.len());
- for f in records.schema().fields()[start..end].iter() {
+ for f in &records.schema().fields()[start..end] {
v.push((
phys_col(f.name(), &records.schema())?,
fill_strategy.clone(),
diff --git a/iox_query/src/exec/seriesset/series.rs b/iox_query/src/exec/seriesset/series.rs
index cc0e5042d9..4f12c5dd9b 100644
--- a/iox_query/src/exec/seriesset/series.rs
+++ b/iox_query/src/exec/seriesset/series.rs
@@ -289,16 +289,13 @@ impl SeriesSet {
.tags
.iter()
.cloned()
- .chain(
- [
- (Arc::from(FIELD_COLUMN_NAME), Arc::from(field_name)),
- (
- Arc::from(MEASUREMENT_COLUMN_NAME),
- Arc::clone(&self.table_name),
- ),
- ]
- .into_iter(),
- )
+ .chain([
+ (Arc::from(FIELD_COLUMN_NAME), Arc::from(field_name)),
+ (
+ Arc::from(MEASUREMENT_COLUMN_NAME),
+ Arc::clone(&self.table_name),
+ ),
+ ])
.collect::<Vec<_>>();
// sort by name
@@ -315,7 +312,7 @@ impl SeriesSet {
fn build_batches<T>(timestamps: Vec<Vec<i64>>, values: Vec<Vec<T>>) -> Vec<Batch<T>> {
timestamps
.into_iter()
- .zip(values.into_iter())
+ .zip(values)
.map(|(timestamps, values)| Batch { timestamps, values })
.collect()
}
diff --git a/iox_query/src/lib.rs b/iox_query/src/lib.rs
index e678801e9a..258dc77d39 100644
--- a/iox_query/src/lib.rs
+++ b/iox_query/src/lib.rs
@@ -324,9 +324,9 @@ pub fn chunks_have_distinct_counts<'a>(
// do not need to compute potential duplicates. We will treat
// as all of them have duplicates
chunks.into_iter().all(|chunk| {
- let Some(col_stats) = &chunk
- .stats()
- .column_statistics else {return false};
+ let Some(col_stats) = &chunk.stats().column_statistics else {
+ return false;
+ };
col_stats.iter().all(|col| col.distinct_count.is_some())
})
}
diff --git a/iox_query/src/logical_optimizer/handle_gapfill.rs b/iox_query/src/logical_optimizer/handle_gapfill.rs
index d4290f7ea7..179f9c7825 100644
--- a/iox_query/src/logical_optimizer/handle_gapfill.rs
+++ b/iox_query/src/logical_optimizer/handle_gapfill.rs
@@ -199,7 +199,10 @@ fn build_gapfill_node(
col(new_aggr_plan.schema().fields()[date_bin_gapfill_index].qualified_column());
let LogicalPlan::Aggregate(aggr) = &new_aggr_plan else {
- return Err(DataFusionError::Internal(format!("Expected Aggregate plan, got {}", new_aggr_plan.display())));
+ return Err(DataFusionError::Internal(format!(
+ "Expected Aggregate plan, got {}",
+ new_aggr_plan.display()
+ )));
};
let mut new_group_expr: Vec<_> = aggr
.schema
@@ -411,7 +414,7 @@ fn handle_projection(proj: &Projection) -> Result<Option<LogicalPlan>> {
}) else {
// If this is not a projection that is a parent to a GapFill node,
// then there is nothing to do.
- return Ok(None)
+ return Ok(None);
};
let mut fill_fn_rewriter = FillFnRewriter {
diff --git a/iox_query/src/physical_optimizer/chunk_extraction.rs b/iox_query/src/physical_optimizer/chunk_extraction.rs
index 9f5f341e33..a462b2973e 100644
--- a/iox_query/src/physical_optimizer/chunk_extraction.rs
+++ b/iox_query/src/physical_optimizer/chunk_extraction.rs
@@ -78,7 +78,7 @@ impl ExtractChunksVisitor {
fn add_sort_key(&mut self, sort_key: Option<&SortKey>) -> Result<(), DataFusionError> {
let Some(sort_key) = sort_key else {
- return Ok(())
+ return Ok(());
};
if let Some(existing) = &self.sort_key {
diff --git a/iox_query/src/physical_optimizer/combine_chunks.rs b/iox_query/src/physical_optimizer/combine_chunks.rs
index 939a86084e..1b103da6d2 100644
--- a/iox_query/src/physical_optimizer/combine_chunks.rs
+++ b/iox_query/src/physical_optimizer/combine_chunks.rs
@@ -58,7 +58,7 @@ impl PhysicalOptimizerRule for CombineChunks {
let Some(union_of_chunks) = union_of_chunks.as_any().downcast_ref::<UnionExec>() else {
return Err(DataFusionError::External(format!("Expected chunks_to_physical_nodes to produce UnionExec but got {union_of_chunks:?}").into()));
};
- let final_union = UnionExec::new(union_of_chunks.inputs().iter().cloned().chain(inputs_other.into_iter()).collect());
+ let final_union = UnionExec::new(union_of_chunks.inputs().iter().cloned().chain(inputs_other).collect());
return Ok(Transformed::Yes(Arc::new(final_union)));
}
}
diff --git a/iox_query/src/physical_optimizer/dedup/dedup_null_columns.rs b/iox_query/src/physical_optimizer/dedup/dedup_null_columns.rs
index de1a5cdffd..9f6539ea4c 100644
--- a/iox_query/src/physical_optimizer/dedup/dedup_null_columns.rs
+++ b/iox_query/src/physical_optimizer/dedup/dedup_null_columns.rs
@@ -40,7 +40,8 @@ impl PhysicalOptimizerRule for DedupNullColumns {
let mut children = dedup_exec.children();
assert_eq!(children.len(), 1);
let child = children.remove(0);
- let Some((schema, chunks, _output_sort_key)) = extract_chunks(child.as_ref()) else {
+ let Some((schema, chunks, _output_sort_key)) = extract_chunks(child.as_ref())
+ else {
return Ok(Transformed::No(plan));
};
diff --git a/iox_query/src/physical_optimizer/dedup/dedup_sort_order.rs b/iox_query/src/physical_optimizer/dedup/dedup_sort_order.rs
index eeb6367514..92cf1209f0 100644
--- a/iox_query/src/physical_optimizer/dedup/dedup_sort_order.rs
+++ b/iox_query/src/physical_optimizer/dedup/dedup_sort_order.rs
@@ -57,8 +57,9 @@ impl PhysicalOptimizerRule for DedupSortOrder {
let mut children = dedup_exec.children();
assert_eq!(children.len(), 1);
let child = children.remove(0);
- let Some((schema, chunks, _output_sort_key)) = extract_chunks(child.as_ref()) else {
- return Ok(Transformed::No(plan))
+ let Some((schema, chunks, _output_sort_key)) = extract_chunks(child.as_ref())
+ else {
+ return Ok(Transformed::No(plan));
};
let mut chunk_sort_keys: Vec<IndexSet<_>> = chunks
diff --git a/iox_query/src/physical_optimizer/sort/parquet_sortness.rs b/iox_query/src/physical_optimizer/sort/parquet_sortness.rs
index 3aa6f31079..ce7f4b9cf8 100644
--- a/iox_query/src/physical_optimizer/sort/parquet_sortness.rs
+++ b/iox_query/src/physical_optimizer/sort/parquet_sortness.rs
@@ -32,19 +32,21 @@ impl PhysicalOptimizerRule for ParquetSortness {
config: &ConfigOptions,
) -> Result<Arc<dyn ExecutionPlan>> {
plan.transform_down(&|plan| {
- let Some(children_with_sort) = detect_children_with_desired_ordering(plan.as_ref()) else {
+ let Some(children_with_sort) = detect_children_with_desired_ordering(plan.as_ref())
+ else {
return Ok(Transformed::No(plan));
};
let mut children_new = Vec::with_capacity(children_with_sort.len());
for (child, desired_ordering) in children_with_sort {
- let mut rewriter = ParquetSortnessRewriter{config, desired_ordering: &desired_ordering};
+ let mut rewriter = ParquetSortnessRewriter {
+ config,
+ desired_ordering: &desired_ordering,
+ };
let child = Arc::clone(&child).rewrite(&mut rewriter)?;
children_new.push(child);
}
- Ok(Transformed::Yes(
- plan.with_new_children(children_new)?
- ))
+ Ok(Transformed::Yes(plan.with_new_children(children_new)?))
})
}
diff --git a/iox_query/src/physical_optimizer/sort/push_sort_through_union.rs b/iox_query/src/physical_optimizer/sort/push_sort_through_union.rs
index 42b3ebd631..2ba3f20440 100644
--- a/iox_query/src/physical_optimizer/sort/push_sort_through_union.rs
+++ b/iox_query/src/physical_optimizer/sort/push_sort_through_union.rs
@@ -118,7 +118,7 @@ fn sort_should_be_pushed_down(sort_exec: &SortExec) -> bool {
}
let Some(union_exec) = input.as_any().downcast_ref::<UnionExec>() else {
- return false
+ return false;
};
let required_ordering = sort_exec.output_ordering().map(sort_exprs_to_requirement);
diff --git a/iox_query/src/plan/fieldlist.rs b/iox_query/src/plan/fieldlist.rs
index 34a21d003b..e2e19f728a 100644
--- a/iox_query/src/plan/fieldlist.rs
+++ b/iox_query/src/plan/fieldlist.rs
@@ -45,8 +45,8 @@ impl FieldListPlan {
/// Append the other plan to ourselves
pub fn append_other(mut self, other: Self) -> Self {
- self.extra_plans.extend(other.extra_plans.into_iter());
- self.known_values.extend(other.known_values.into_iter());
+ self.extra_plans.extend(other.extra_plans);
+ self.known_values.extend(other.known_values);
self
}
diff --git a/iox_query/src/plan/stringset.rs b/iox_query/src/plan/stringset.rs
index 45f3395251..8e49d2cd6f 100644
--- a/iox_query/src/plan/stringset.rs
+++ b/iox_query/src/plan/stringset.rs
@@ -104,7 +104,7 @@ impl StringSetPlanBuilder {
self.strings.append(&mut ss);
}
Err(ssref) => {
- for s in ssref.iter() {
+ for s in &*ssref {
if !self.strings.contains(s) {
self.strings.insert(s.clone());
}
diff --git a/iox_query/src/provider.rs b/iox_query/src/provider.rs
index e20196bf24..54d0e8d140 100644
--- a/iox_query/src/provider.rs
+++ b/iox_query/src/provider.rs
@@ -253,7 +253,9 @@ impl TableProvider for ChunkTableProvider {
split_conjunction(&expr)
.into_iter()
.filter(|expr| {
- let Ok(expr_cols) = expr.to_columns() else {return false};
+ let Ok(expr_cols) = expr.to_columns() else {
+ return false;
+ };
expr_cols
.into_iter()
.all(|c| dedup_cols.contains(c.name.as_str()))
diff --git a/iox_query/src/provider/adapter.rs b/iox_query/src/provider/adapter.rs
index 9f852fd122..c960ad4757 100644
--- a/iox_query/src/provider/adapter.rs
+++ b/iox_query/src/provider/adapter.rs
@@ -153,7 +153,7 @@ impl SchemaAdapterStream {
.collect::<Vec<_>>();
// sanity logic checks
- for input_field in input_schema.fields().iter() {
+ for input_field in input_schema.fields() {
// that there are no fields in the input schema that are
// not present in the desired output schema (otherwise we
// are dropping fields -- theys should have been selected
diff --git a/iox_query/src/provider/physical.rs b/iox_query/src/provider/physical.rs
index 05289e7522..a94fbdee40 100644
--- a/iox_query/src/provider/physical.rs
+++ b/iox_query/src/provider/physical.rs
@@ -199,6 +199,7 @@ pub fn chunks_to_physical_nodes(
// ensure that chunks are actually ordered by chunk order
chunks.sort_by_key(|(_meta, c)| c.order());
+ #[allow(clippy::manual_try_fold)]
let num_rows = chunks.iter().map(|(_meta, c)| c.stats().num_rows).fold(
Some(0usize),
|accu, x| match (accu, x) {
diff --git a/iox_query/src/statistics.rs b/iox_query/src/statistics.rs
index 15a82ac55a..fd5f98cfaa 100644
--- a/iox_query/src/statistics.rs
+++ b/iox_query/src/statistics.rs
@@ -97,8 +97,9 @@ impl<'a> DFStatsAggregator<'a> {
let mut used_cols = vec![false; self.col_idx_map.len()];
for (update_field, update_col) in update_schema.fields().iter().zip(update_cols) {
- let Some(idx) = self.col_idx_map
- .get(update_field.name().as_str()) else {continue;};
+ let Some(idx) = self.col_idx_map.get(update_field.name().as_str()) else {
+ continue;
+ };
let base_col = &mut base_cols[*idx];
used_cols[*idx] = true;
@@ -209,7 +210,9 @@ impl TriStateScalar {
(this @ Self::Valid(_), Some(update)) => {
let mut base = Self::Invalid;
std::mem::swap(this, &mut base);
- let Self::Valid(base) = base else {unreachable!()};
+ let Self::Valid(base) = base else {
+ unreachable!()
+ };
*this = match f(base, update) {
Some(val) => Self::Valid(val),
None => Self::Invalid,
diff --git a/iox_query_influxql/src/plan/expr_type_evaluator.rs b/iox_query_influxql/src/plan/expr_type_evaluator.rs
index e10d2f8eb4..7375f2923f 100644
--- a/iox_query_influxql/src/plan/expr_type_evaluator.rs
+++ b/iox_query_influxql/src/plan/expr_type_evaluator.rs
@@ -124,7 +124,7 @@ impl<'a> TypeEvaluator<'a> {
}
_ => {
let mut data_type: Option<VarRefDataType> = None;
- for tr in self.from.iter() {
+ for tr in self.from {
match tr {
DataSource::Table(name) => match (
data_type,
@@ -324,9 +324,8 @@ impl<'a> TypeEvaluator<'a> {
// These functions require two numeric arguments and return a float
name @ ("atan2" | "pow") => {
- let (Some(arg0), Some(arg1)) = (arg_types
- .get(0), arg_types.get(1)) else {
- return error::query(format!("{name} expects 2 arguments"))
+ let (Some(arg0), Some(arg1)) = (arg_types.get(0), arg_types.get(1)) else {
+ return error::query(format!("{name} expects 2 arguments"));
};
match (arg0, arg1) {
diff --git a/iox_query_influxql/src/plan/planner.rs b/iox_query_influxql/src/plan/planner.rs
index 61fd6ddd8b..313eb13ee5 100644
--- a/iox_query_influxql/src/plan/planner.rs
+++ b/iox_query_influxql/src/plan/planner.rs
@@ -661,7 +661,7 @@ impl<'a> InfluxQLToLogicalPlan<'a> {
let ctx = ctx.subquery(select);
let Some(plan) = self.union_from(&ctx, select)? else {
- return Ok(None)
+ return Ok(None);
};
let group_by_tags = ctx.group_by_tags();
@@ -823,14 +823,16 @@ impl<'a> InfluxQLToLogicalPlan<'a> {
// In addition, the time column is projected as the Unix epoch.
let Some(time_column_index) = find_time_column_index(fields) else {
- return error::internal("unable to find time column")
- };
+ return error::internal("unable to find time column");
+ };
// Take ownership of the alias, so we don't reallocate, and temporarily place a literal
// `NULL` in its place.
- let Expr::Alias(Alias{name: alias, ..}) = std::mem::replace(&mut select_exprs[time_column_index], lit(ScalarValue::Null)) else {
- return error::internal("time column is not an alias")
- };
+ let Expr::Alias(Alias { name: alias, .. }) =
+ std::mem::replace(&mut select_exprs[time_column_index], lit(ScalarValue::Null))
+ else {
+ return error::internal("time column is not an alias");
+ };
select_exprs[time_column_index] = lit_timestamp_nano(0).alias(alias);
@@ -1138,7 +1140,7 @@ impl<'a> InfluxQLToLogicalPlan<'a> {
// gather some time-related metadata
let Some(time_column_index) = find_time_column_index(fields) else {
- return error::internal("unable to find time column")
+ return error::internal("unable to find time column");
};
// if there's only a single selector, wrap non-aggregated fields into that selector
@@ -1211,8 +1213,10 @@ impl<'a> InfluxQLToLogicalPlan<'a> {
let time_column = {
// Take ownership of the alias, so we don't reallocate, and temporarily place a literal
// `NULL` in its place.
- let Expr::Alias(Alias{name: alias, ..}) = std::mem::replace(&mut select_exprs[time_column_index], lit(ScalarValue::Null)) else {
- return error::internal("time column is not an alias")
+ let Expr::Alias(Alias { name: alias, .. }) =
+ std::mem::replace(&mut select_exprs[time_column_index], lit(ScalarValue::Null))
+ else {
+ return error::internal("time column is not an alias");
};
// Rewrite the `time` column projection based on a series of rules in the following
@@ -1448,7 +1452,7 @@ impl<'a> InfluxQLToLogicalPlan<'a> {
.map_err(|err| error::map::internal(format!("display_name: {err}")))?;
let Expr::ScalarUDF(expr::ScalarUDF { fun, args }) = e else {
- return error::internal(format!("udf_to_expr: unexpected expression: {e}"))
+ return error::internal(format!("udf_to_expr: unexpected expression: {e}"));
};
fn derivative_unit(ctx: &Context<'_>, args: &Vec<Expr>) -> Result<ScalarValue> {
@@ -2272,7 +2276,7 @@ impl<'a> InfluxQLToLogicalPlan<'a> {
Some(from) => {
let all_tables = self.s.table_names().into_iter().collect::<HashSet<_>>();
let mut out = HashSet::new();
- for qualified_name in from.iter() {
+ for qualified_name in &*from {
if qualified_name.database.is_some() {
return error::not_implemented("database name in from clause");
}
@@ -2388,8 +2392,12 @@ impl<'a> InfluxQLToLogicalPlan<'a> {
let mut union_plan = None;
for table in tables {
- let Some(table_schema) = self.s.table_schema(&table) else {continue};
- let Some((plan, measurement_expr)) = self.create_table_ref(&table)? else {continue;};
+ let Some(table_schema) = self.s.table_schema(&table) else {
+ continue;
+ };
+ let Some((plan, measurement_expr)) = self.create_table_ref(&table)? else {
+ continue;
+ };
let ds = DataSource::Table(table.clone());
let schema = IQLSchema::new_from_ds_schema(plan.schema(), ds.schema(self.s)?)?;
@@ -2474,7 +2482,9 @@ impl<'a> InfluxQLToLogicalPlan<'a> {
let mut measurement_names_builder = StringDictionaryBuilder::<Int32Type>::new();
let mut tag_key_builder = StringDictionaryBuilder::<Int32Type>::new();
for table in tables {
- let Some(table_schema) = self.s.table_schema(&table) else {continue};
+ let Some(table_schema) = self.s.table_schema(&table) else {
+ continue;
+ };
for (t, f) in table_schema.iter() {
match t {
InfluxColumnType::Tag => {}
@@ -2547,7 +2557,9 @@ impl<'a> InfluxQLToLogicalPlan<'a> {
let mut field_key_builder = StringBuilder::new();
let mut field_type_builder = StringBuilder::new();
for table in tables {
- let Some(table_schema) = self.s.table_schema(&table) else {continue};
+ let Some(table_schema) = self.s.table_schema(&table) else {
+ continue;
+ };
for (t, f) in table_schema.iter() {
let t = match t {
InfluxColumnType::Field(t) => t,
@@ -2625,7 +2637,9 @@ impl<'a> InfluxQLToLogicalPlan<'a> {
let mut union_plan = None;
for table in tables {
- let Some(schema) = self.s.table_schema(&table) else {continue;};
+ let Some(schema) = self.s.table_schema(&table) else {
+ continue;
+ };
let keys = eval_with_key_clause(
schema.tags_iter().map(|field| field.name().as_str()),
@@ -2636,7 +2650,9 @@ impl<'a> InfluxQLToLogicalPlan<'a> {
continue;
}
- let Some((plan, measurement_expr)) = self.create_table_ref(&table)? else {continue;};
+ let Some((plan, measurement_expr)) = self.create_table_ref(&table)? else {
+ continue;
+ };
let ds = DataSource::Table(table.clone());
let schema = IQLSchema::new_from_ds_schema(plan.schema(), ds.schema(self.s)?)?;
@@ -2736,7 +2752,9 @@ impl<'a> InfluxQLToLogicalPlan<'a> {
let mut union_plan = None;
for table in tables {
- let Some((plan, _measurement_expr)) = self.create_table_ref(&table)? else {continue;};
+ let Some((plan, _measurement_expr)) = self.create_table_ref(&table)? else {
+ continue;
+ };
let ds = DataSource::Table(table.clone());
let schema = IQLSchema::new_from_ds_schema(plan.schema(), ds.schema(self.s)?)?;
@@ -2989,7 +3007,10 @@ fn build_gap_fill_node(
};
let LogicalPlan::Aggregate(aggr) = &input else {
- return Err(DataFusionError::Internal(format!("Expected Aggregate plan, got {}", input.display())));
+ return Err(DataFusionError::Internal(format!(
+ "Expected Aggregate plan, got {}",
+ input.display()
+ )));
};
let mut new_group_expr: Vec<_> = aggr
.schema
diff --git a/iox_query_influxql/src/plan/planner/select.rs b/iox_query_influxql/src/plan/planner/select.rs
index e0125f3f50..24fb3726c0 100644
--- a/iox_query_influxql/src/plan/planner/select.rs
+++ b/iox_query_influxql/src/plan/planner/select.rs
@@ -266,7 +266,7 @@ impl<'a> Selector<'a> {
}
fn bottom(call: &'a Call) -> Result<Self> {
- let [field_key, tag_keys @ .., narg] = call.args.as_slice() else {
+ let [field_key, tag_keys @ .., narg] = call.args.as_slice() else {
return error::internal(format!(
"invalid number of arguments for bottom: expected 2 or more, got {}",
call.args.len()
@@ -355,7 +355,7 @@ impl<'a> Selector<'a> {
}
fn top(call: &'a Call) -> Result<Self> {
- let [field_key, tag_keys @ .., narg] = call.args.as_slice() else {
+ let [field_key, tag_keys @ .., narg] = call.args.as_slice() else {
return error::internal(format!(
"invalid number of arguments for top: expected 2 or more, got {}",
call.args.len()
diff --git a/iox_query_influxql/src/plan/rewriter.rs b/iox_query_influxql/src/plan/rewriter.rs
index 329fc9a129..5a0ad563a1 100644
--- a/iox_query_influxql/src/plan/rewriter.rs
+++ b/iox_query_influxql/src/plan/rewriter.rs
@@ -327,7 +327,9 @@ impl RewriteSelect {
// resolve possible tag references in group_by
if let Some(group_by) = group_by.as_mut() {
for dim in group_by.iter_mut() {
- let Dimension::VarRef(var_ref) = dim else { continue };
+ let Dimension::VarRef(var_ref) = dim else {
+ continue;
+ };
if from_tag_set.contains(var_ref.name.as_str()) {
var_ref.data_type = Some(VarRefDataType::Tag);
}
@@ -344,7 +346,7 @@ impl RewriteSelect {
stmt: &SelectStatement,
) -> Result<Vec<DataSource>> {
let mut new_from = Vec::new();
- for ms in stmt.from.iter() {
+ for ms in &*stmt.from {
match ms {
MeasurementSelection::Name(qmn) => match qmn {
QualifiedMeasurementName {
@@ -381,7 +383,9 @@ impl RewriteSelect {
stmt: &SelectStatement,
from: &[DataSource],
) -> Result<Option<WhereClause>> {
- let Some(mut where_clause) = stmt.condition.clone() else { return Ok(None) };
+ let Some(mut where_clause) = stmt.condition.clone() else {
+ return Ok(None);
+ };
let tv = TypeEvaluator::new(s, from);
@@ -522,7 +526,7 @@ fn from_drop_empty(s: &dyn SchemaProvider, stmt: &mut Select) {
stmt.fields.iter().any(|f| {
walk_expr(&f.expr, &mut |e| {
- if matches!(e, Expr::VarRef(VarRef{ name, ..}) if matches!(field_by_name(&q.fields, name.as_str()), Some(_))) {
+ if matches!(e, Expr::VarRef(VarRef{ name, ..}) if field_by_name(&q.fields, name.as_str()).is_some()) {
ControlFlow::Break(())
} else {
ControlFlow::Continue(())
@@ -563,7 +567,9 @@ fn from_field_and_dimensions(
for tr in from {
match tr {
DataSource::Table(name) => {
- let Some((field_set, tag_set)) = field_and_dimensions(s, name.as_str()) else { continue };
+ let Some((field_set, tag_set)) = field_and_dimensions(s, name.as_str()) else {
+ continue;
+ };
// Merge field_set with existing
for (name, ft) in &field_set {
@@ -586,7 +592,9 @@ fn from_field_and_dimensions(
let Field {
name, data_type, ..
} = f;
- let Some(dt) = influx_type_to_var_ref_data_type(*data_type) else { continue };
+ let Some(dt) = influx_type_to_var_ref_data_type(*data_type) else {
+ continue;
+ };
match fs.get(name.as_str()) {
Some(existing_type) => {
@@ -810,7 +818,7 @@ fn fields_resolve_aliases_and_types(
names
.iter()
- .zip(fields.into_iter())
+ .zip(fields)
.map(|(name, field)| {
let expr = field.expr;
let data_type = tv.eval_type(&expr)?;
diff --git a/iox_query_influxrpc/src/lib.rs b/iox_query_influxrpc/src/lib.rs
index 75373fa632..261df17077 100644
--- a/iox_query_influxrpc/src/lib.rs
+++ b/iox_query_influxrpc/src/lib.rs
@@ -1294,8 +1294,8 @@ fn table_chunk_stream<'a>(
futures::stream::iter(table_predicates)
.filter_map(move |(table_name, predicate)| async move {
let Some(table_schema) = meta.table_schema(table_name) else {
- return None;
- };
+ return None;
+ };
let table_schema = Arc::new(table_schema);
Some((table_name, table_schema, predicate))
})
diff --git a/iox_query_influxrpc/src/scan_plan.rs b/iox_query_influxrpc/src/scan_plan.rs
index ca9cde7418..e20c0f5819 100644
--- a/iox_query_influxrpc/src/scan_plan.rs
+++ b/iox_query_influxrpc/src/scan_plan.rs
@@ -103,7 +103,7 @@ impl<'a> ScanPlanBuilder<'a> {
/// Adds `chunks` to the list of Chunks to scan
pub fn with_chunks(mut self, chunks: impl IntoIterator<Item = Arc<dyn QueryChunk>>) -> Self {
- self.chunks.extend(chunks.into_iter());
+ self.chunks.extend(chunks);
self
}
diff --git a/ioxd_common/src/http/utils.rs b/ioxd_common/src/http/utils.rs
index 1b65ee752f..eea9552bfa 100644
--- a/ioxd_common/src/http/utils.rs
+++ b/ioxd_common/src/http/utils.rs
@@ -210,6 +210,6 @@ mod tests {
.unwrap();
let got = parse_body(request, MAX_BYTES).await;
- assert!(matches!(got, Ok(_)));
+ assert!(got.is_ok());
}
}
diff --git a/logfmt/tests/logging.rs b/logfmt/tests/logging.rs
index 339bafd5bb..4cb8cee8a7 100644
--- a/logfmt/tests/logging.rs
+++ b/logfmt/tests/logging.rs
@@ -27,9 +27,9 @@ macro_rules! assert_logs {
assert_eq!(
normalized_expected, normalized_actual,
- "\n\nexpected:\n\n{:#?}\nactual:\n\n{:#?}\n\nnormalized_expected:\n\n{:#?}\nnormalized_actual:\n\n{:#?}\n\n",
- expected_lines, actual_lines,
- normalized_expected, normalized_actual
+ "\n\nexpected:\n\n{:#?}\nactual:\n\n{:#?}\
+ \n\nnormalized_expected:\n\n{:#?}\nnormalized_actual:\n\n{:#?}\n\n",
+ expected_lines, actual_lines, normalized_expected, normalized_actual
)
};
}
@@ -44,12 +44,17 @@ fn level() {
warn!("This is a warn message");
error!("This is a error message");
- let expected = vec![
- "level=info msg=\"This is an info message\" target=\"logging\" location=\"logfmt/tests/logging.rs:36\" time=1612181556329599000",
- "level=debug msg=\"This is a debug message\" target=\"logging\" location=\"logfmt/tests/logging.rs:37\" time=1612181556329618000",
- "level=trace msg=\"This is a trace message\" target=\"logging\" location=\"logfmt/tests/logging.rs:38\" time=1612181556329634000",
- "level=warn msg=\"This is a warn message\" target=\"logging\" location=\"logfmt/tests/logging.rs:39\" time=1612181556329646000",
- "level=error msg=\"This is a error message\" target=\"logging\" location=\"logfmt/tests/logging.rs:40\" time=1612181556329661000",
+ let expected = [
+ "level=info msg=\"This is an info message\" target=\"logging\" \
+ location=\"logfmt/tests/logging.rs:36\" time=1612181556329599000",
+ "level=debug msg=\"This is a debug message\" target=\"logging\" \
+ location=\"logfmt/tests/logging.rs:37\" time=1612181556329618000",
+ "level=trace msg=\"This is a trace message\" target=\"logging\" \
+ location=\"logfmt/tests/logging.rs:38\" time=1612181556329634000",
+ "level=warn msg=\"This is a warn message\" target=\"logging\" \
+ location=\"logfmt/tests/logging.rs:39\" time=1612181556329646000",
+ "level=error msg=\"This is a error message\" target=\"logging\" \
+ location=\"logfmt/tests/logging.rs:40\" time=1612181556329661000",
];
assert_logs!(capture, expected);
@@ -64,8 +69,9 @@ fn event_fields_strings() {
"This is an info message"
);
- let expected = vec![
- "level=info msg=\"This is an info message\" event_name=\"foo bar\" other_event=baz target=\"logging\" location=\"logfmt/tests/logging.rs:59\" time=1612187170712973000",
+ let expected = [
+ "level=info msg=\"This is an info message\" event_name=\"foo bar\" other_event=baz \
+ target=\"logging\" location=\"logfmt/tests/logging.rs:59\" time=1612187170712973000",
];
assert_logs!(capture, expected);
@@ -76,9 +82,12 @@ fn event_fields_strings_quoting() {
let capture = CapturedWriter::new();
info!(foo = r#"body: Body(Full(b"{\"error\": \"Internal error\"}"))"#,);
- let expected = vec![
- r#"level=info foo="body: Body(Full(b\"{\\\"error\\\": \\\"Internal error\\\"}\"))" target="logging" location="logfmt/tests/logging.rs:59" time=1612187170712973000"#,
- ];
+ let escaped_foo_value = r#"body: Body(Full(b\"{\\\"error\\\": \\\"Internal error\\\"}\"))"#;
+
+ let expected = [&format!(
+ "level=info foo=\"{escaped_foo_value}\" target=\"logging\" \
+ location=\"logfmt/tests/logging.rs:59\" time=1612187170712973000"
+ )];
assert_logs!(capture, expected);
}
@@ -107,8 +116,9 @@ fn event_fields_numeric() {
let capture = CapturedWriter::new();
info!(bar = 1, frr = false, "This is an info message");
- let expected = vec![
- "level=info msg=\"This is an info message\" bar=1 frr=false target=\"logging\" location=\"logfmt/tests/logging.rs:72\" time=1612187170712947000",
+ let expected = [
+ "level=info msg=\"This is an info message\" bar=1 frr=false target=\"logging\" \
+ location=\"logfmt/tests/logging.rs:72\" time=1612187170712947000",
];
assert_logs!(capture, expected);
@@ -119,8 +129,9 @@ fn event_fields_repeated() {
let capture = CapturedWriter::new();
info!(bar = 1, bar = 2, "This is an info message");
- let expected = vec![
- "level=info msg=\"This is an info message\" bar=1 bar=2 target=\"logging\" location=\"logfmt/tests/logging.rs:84\" time=1612187170712948000",
+ let expected = [
+ "level=info msg=\"This is an info message\" bar=1 bar=2 target=\"logging\" \
+ location=\"logfmt/tests/logging.rs:84\" time=1612187170712948000",
];
assert_logs!(capture, expected);
@@ -132,11 +143,15 @@ fn event_fields_errors() {
let err: Box<dyn Error + 'static> =
io::Error::new(io::ErrorKind::Other, "shaving yak failed!").into();
-
error!(the_error = err.as_ref(), "This is an error message");
- let expected = vec![
- "level=error msg=\"This is an error message\" the_error=\"\\\"Custom { kind: Other, error: \\\\\\\"shaving yak failed!\\\\\\\" }\\\"\" the_error.display=\"shaving yak failed!\" target=\"logging\" location=\"logfmt/tests/logging.rs:99\" time=1612187170712947000",
+
+ let expected = [
+ "level=error msg=\"This is an error message\" the_error=\"\\\"Custom { kind: Other, \
+ error: \\\\\\\"shaving yak failed!\\\\\\\" }\\\"\" \
+ the_error.display=\"shaving yak failed!\" target=\"logging\" \
+ location=\"logfmt/tests/logging.rs:99\" time=1612187170712947000",
];
+
assert_logs!(capture, expected);
}
@@ -147,8 +162,10 @@ fn event_fields_structs() {
info!(s = ?my_struct, "This is an info message");
- let expected = vec![
- "level=info msg=\"This is an info message\" s=\"TestDebugStruct { b: true, s: \\\"The String\\\" }\" target=\"logging\" location=\"logfmt/tests/logging.rs:111\" time=1612187170712937000",
+ let expected = [
+ "level=info msg=\"This is an info message\" s=\"TestDebugStruct { b: true, \
+ s: \\\"The String\\\" }\" target=\"logging\" \
+ location=\"logfmt/tests/logging.rs:111\" time=1612187170712937000",
];
assert_logs!(capture, expected);
@@ -163,9 +180,10 @@ fn event_spans() {
info!(shave = "mo yak!", "info message in span");
std::mem::drop(enter);
- let expected = vec![
+ let expected = [
"level=info span_name=\"my_span\" foo=bar span=1 time=1612209178717290000",
- "level=info msg=\"info message in span\" shave=\"mo yak!\" span=1 target=\"logging\" location=\"logfmt/tests/logging.rs:132\" time=1612209178717329000",
+ "level=info msg=\"info message in span\" shave=\"mo yak!\" span=1 target=\"logging\" \
+ location=\"logfmt/tests/logging.rs:132\" time=1612209178717329000",
];
assert_logs!(capture, expected);
@@ -190,12 +208,14 @@ fn event_multi_span() {
info!(shave = "mo yak!", "info message in span 3");
}
- let expected = vec![
+ let expected = [
"level=info span_name=\"my_span\" foo=bar span=1 time=1612209327939714000",
"level=info span_name=\"my_second_span\" foo=baz span=2 time=1612209327939743000",
- "level=info msg=\"info message in span 2\" shave=yak! target=\"logging\" location=\"logfmt/tests/logging.rs:154\" time=1612209327939774000",
+ "level=info msg=\"info message in span 2\" shave=yak! target=\"logging\" \
+ location=\"logfmt/tests/logging.rs:154\" time=1612209327939774000",
"level=info span_name=\"my_second_span\" foo=brmp span=3 time=1612209327939795000",
- "level=info msg=\"info message in span 3\" shave=\"mo yak!\" target=\"logging\" location=\"logfmt/tests/logging.rs:160\" time=1612209327939828000",
+ "level=info msg=\"info message in span 3\" shave=\"mo yak!\" target=\"logging\" \
+ location=\"logfmt/tests/logging.rs:160\" time=1612209327939828000",
];
assert_logs!(capture, expected);
@@ -240,7 +260,7 @@ fn normalize<'a>(lines: impl Iterator<Item = &'a String>) -> Vec<String> {
/// s/time=1612187170712947000/time=NORMALIZED/g
fn normalize_timestamp(v: &str) -> String {
- let re = Regex::new(r#"time=\d+"#).unwrap();
+ let re = Regex::new(r"time=\d+").unwrap();
re.replace_all(v, "time=NORMALIZED").to_string()
}
@@ -257,7 +277,7 @@ fn normalize_spans(lines: Vec<String>) -> Vec<String> {
//
// Note: we include leading and trailing spaces so that span=2
// doesn't also match span=21423
- let re = Regex::new(r#" span=(\d+) "#).unwrap();
+ let re = Regex::new(r" span=(\d+) ").unwrap();
// This collect isn't needless: the `fold` below moves `lines`, so this
// iterator can't borrow `lines`, we need to collect into a `Vec` to
diff --git a/metric/src/metric.rs b/metric/src/metric.rs
index 6fd1d5286a..cd0582c7b4 100644
--- a/metric/src/metric.rs
+++ b/metric/src/metric.rs
@@ -96,7 +96,7 @@ impl<T: MetricObserver> Instrument for Metric<T> {
reporter.start_metric(self.name, self.description, T::kind());
let values = self.shared.values.lock();
- for (attributes, metric_value) in values.iter() {
+ for (attributes, metric_value) in &*values {
reporter.report_observation(attributes, metric_value.observe())
}
diff --git a/mutable_batch/src/lib.rs b/mutable_batch/src/lib.rs
index 4875556c8d..681bb5a06b 100644
--- a/mutable_batch/src/lib.rs
+++ b/mutable_batch/src/lib.rs
@@ -94,7 +94,7 @@ impl MutableBatch {
let mut schema_builder = SchemaBuilder::new();
let schema = match selection {
Projection::All => {
- for (column_name, column_idx) in self.column_names.iter() {
+ for (column_name, column_idx) in &self.column_names {
let column = &self.columns[*column_idx];
schema_builder.influx_column(column_name, column.influx_type());
}
diff --git a/mutable_batch/src/payload.rs b/mutable_batch/src/payload.rs
index d22ce0a4e5..7cb85de5a6 100644
--- a/mutable_batch/src/payload.rs
+++ b/mutable_batch/src/payload.rs
@@ -44,6 +44,9 @@ impl<'a> PartitionWrite<'a> {
let time = get_time_column(batch);
let (min_timestamp, max_timestamp) = min_max_time(time);
+ // This `allow` can be removed when this issue is fixed and released:
+ // <https://github.com/rust-lang/rust-clippy/issues/11086>
+ #[allow(clippy::single_range_in_vec_init)]
Self {
batch,
ranges: vec![0..batch.row_count],
diff --git a/mutable_batch_lp/src/lib.rs b/mutable_batch_lp/src/lib.rs
index 630061d4fc..9d0c07c8ef 100644
--- a/mutable_batch_lp/src/lib.rs
+++ b/mutable_batch_lp/src/lib.rs
@@ -208,7 +208,7 @@ pub fn write_line(
// Only allocate the seen tags hashset if there are tags.
if let Some(tags) = &line.series.tag_set {
let mut seen = HashSet::with_capacity(tags.len());
- for (tag_key, tag_value) in tags.iter() {
+ for (tag_key, tag_value) in tags {
// Check if a field with this name has been observed previously.
if !seen.insert(tag_key) {
// This tag_key appears more than once, with differing values.
diff --git a/parquet_file/src/metadata.rs b/parquet_file/src/metadata.rs
index cda38edc7f..fc612e4b95 100644
--- a/parquet_file/src/metadata.rs
+++ b/parquet_file/src/metadata.rs
@@ -850,7 +850,7 @@ fn read_statistics_from_parquet_row_group(
}
fn combine_column_summaries(total: &mut Vec<ColumnSummary>, other: Vec<ColumnSummary>) {
- for col in total.iter_mut() {
+ for col in &mut *total {
if let Some(other_col) = other.iter().find(|c| c.name == col.name) {
col.update_from(other_col);
}
diff --git a/predicate/src/lib.rs b/predicate/src/lib.rs
index 78779384bf..16fa16d026 100644
--- a/predicate/src/lib.rs
+++ b/predicate/src/lib.rs
@@ -354,7 +354,7 @@ impl Predicate {
/// Adds all expressions to the list of general purpose predicates
pub fn with_exprs(mut self, filters: impl IntoIterator<Item = Expr>) -> Self {
- self.exprs.extend(filters.into_iter());
+ self.exprs.extend(filters);
self
}
}
diff --git a/querier/src/parquet/mod.rs b/querier/src/parquet/mod.rs
index 794caba889..c3e2dafaa4 100644
--- a/querier/src/parquet/mod.rs
+++ b/querier/src/parquet/mod.rs
@@ -178,7 +178,7 @@ pub mod tests {
async fn new() -> Self {
let catalog = TestCatalog::new();
- let lp = vec![
+ let lp = [
"table,tag1=WA field_int=1000i 8000",
"table,tag1=VT field_int=10i 10000",
"table,tag1=UT field_int=70i 20000",
diff --git a/querier/src/table/mod.rs b/querier/src/table/mod.rs
index 032174213a..8679d89e87 100644
--- a/querier/src/table/mod.rs
+++ b/querier/src/table/mod.rs
@@ -251,9 +251,10 @@ impl QuerierTable {
.await;
let Some(cached_table) = cached_namespace
.as_ref()
- .and_then(|ns| ns.tables.get(self.table_name.as_ref())) else {
- return Ok(vec![]);
- };
+ .and_then(|ns| ns.tables.get(self.table_name.as_ref()))
+ else {
+ return Ok(vec![]);
+ };
let cached_partitions = self
.fetch_cached_partitions(
cached_table,
@@ -432,18 +433,19 @@ impl QuerierTable {
let columns = self.schema.select_given_and_pk_columns(projection);
// get cached table w/o any must-coverage information
- let Some(cached_table) = self.chunk_adapter
+ let Some(cached_table) = self
+ .chunk_adapter
.catalog_cache()
.namespace()
.get(
Arc::clone(&self.namespace_name),
&[],
- span_recorder.child_span("get namespace")
+ span_recorder.child_span("get namespace"),
)
.await
.and_then(|ns| ns.tables.get(&self.table_name).cloned())
else {
- return Ok(vec![])
+ return Ok(vec![]);
};
// get any chunks from the ingester(s)
diff --git a/query_functions/src/coalesce_struct.rs b/query_functions/src/coalesce_struct.rs
index befe8572b1..0892e721e1 100644
--- a/query_functions/src/coalesce_struct.rs
+++ b/query_functions/src/coalesce_struct.rs
@@ -87,6 +87,7 @@ pub static COALESCE_STRUCT_UDF: Lazy<Arc<ScalarUDF>> = Lazy::new(|| {
});
let fun: ScalarFunctionImplementation = Arc::new(move |args: &[ColumnarValue]| {
+ #[allow(clippy::manual_try_fold)]
args.iter().enumerate().fold(Ok(None), |accu, (pos, arg)| {
let Some(accu) = accu? else {return Ok(Some(arg.clone()))};
diff --git a/query_functions/src/gapfill.rs b/query_functions/src/gapfill.rs
index 202004fe20..d49993870f 100644
--- a/query_functions/src/gapfill.rs
+++ b/query_functions/src/gapfill.rs
@@ -87,7 +87,7 @@ pub const INTERPOLATE_UDF_NAME: &str = "interpolate";
/// `HandleGapFill`.
pub(crate) static INTERPOLATE: Lazy<Arc<ScalarUDF>> = Lazy::new(|| {
let return_type_fn: ReturnTypeFunction = Arc::new(|args| Ok(Arc::new(args[0].clone())));
- let signatures = vec![
+ let signatures = [
InfluxFieldType::Float,
InfluxFieldType::Integer,
InfluxFieldType::UInteger,
diff --git a/query_functions/src/regex.rs b/query_functions/src/regex.rs
index 380312c386..f153a43214 100644
--- a/query_functions/src/regex.rs
+++ b/query_functions/src/regex.rs
@@ -357,22 +357,22 @@ mod test {
fn test_clean_non_meta_escapes() {
let cases = vec![
("", ""),
- (r#"\"#, r#"\"#),
- (r#"\\"#, r#"\\"#),
+ (r"\", r"\"),
+ (r"\\", r"\\"),
// : is not a special meta character
- (r#"\:"#, r#":"#),
+ (r"\:", r#":"#),
// . is a special meta character
- (r#"\."#, r#"\."#),
- (r#"foo\"#, r#"foo\"#),
- (r#"foo\\"#, r#"foo\\"#),
- (r#"foo\:"#, r#"foo:"#),
- (r#"foo\xff"#, r#"foo\xff"#),
- (r#"fo\\o"#, r#"fo\\o"#),
- (r#"fo\:o"#, r#"fo:o"#),
- (r#"fo\:o\x123"#, r#"fo:o\x123"#),
- (r#"fo\:o\x123\:"#, r#"fo:o\x123:"#),
- (r#"foo\\\:bar"#, r#"foo\\:bar"#),
- (r#"foo\\\:bar\\\:"#, r#"foo\\:bar\\:"#),
+ (r"\.", r"\."),
+ (r"foo\", r"foo\"),
+ (r"foo\\", r"foo\\"),
+ (r"foo\:", r#"foo:"#),
+ (r"foo\xff", r"foo\xff"),
+ (r"fo\\o", r"fo\\o"),
+ (r"fo\:o", r#"fo:o"#),
+ (r"fo\:o\x123", r"fo:o\x123"),
+ (r"fo\:o\x123\:", r"fo:o\x123:"),
+ (r"foo\\\:bar", r"foo\\:bar"),
+ (r"foo\\\:bar\\\:", r"foo\\:bar\\:"),
("foo", "foo"),
];
diff --git a/router/benches/namespace_schema_cache.rs b/router/benches/namespace_schema_cache.rs
index b382058e90..c9014d111f 100644
--- a/router/benches/namespace_schema_cache.rs
+++ b/router/benches/namespace_schema_cache.rs
@@ -71,9 +71,7 @@ fn bench_add_new_tables_with_columns(
b.iter_batched(
|| {
(
- init_ns_cache(
- [(ARBITRARY_NAMESPACE.clone(), initial_schema.clone())].into_iter(),
- ),
+ init_ns_cache([(ARBITRARY_NAMESPACE.clone(), initial_schema.clone())]),
ARBITRARY_NAMESPACE.clone(),
schema_update.clone(),
)
@@ -99,9 +97,7 @@ fn bench_add_columns_to_existing_table(
b.iter_batched(
|| {
(
- init_ns_cache(
- [(ARBITRARY_NAMESPACE.clone(), initial_schema.clone())].into_iter(),
- ),
+ init_ns_cache([(ARBITRARY_NAMESPACE.clone(), initial_schema.clone())]),
ARBITRARY_NAMESPACE.clone(),
schema_update.clone(),
)
diff --git a/router/src/server/http.rs b/router/src/server/http.rs
index 86d09ce00a..c42c75d3d5 100644
--- a/router/src/server/http.rs
+++ b/router/src/server/http.rs
@@ -1333,7 +1333,12 @@ mod tests {
),
(
- NonUtf8Body(std::str::from_utf8(&[0, 159]).unwrap_err()),
+ NonUtf8Body(
+ // The lint warns that this call will always error, which is what we want here in
+ // this test for errors
+ #[allow(invalid_from_utf8)]
+ std::str::from_utf8(&[0, 159]).unwrap_err()
+ ),
"body content is not valid utf8: invalid utf-8 sequence of 1 bytes from index 1",
),
diff --git a/rust-toolchain.toml b/rust-toolchain.toml
index 95c46a2526..fbad157cd8 100644
--- a/rust-toolchain.toml
+++ b/rust-toolchain.toml
@@ -1,3 +1,3 @@
[toolchain]
-channel = "1.71"
+channel = "1.72"
components = [ "rustfmt", "clippy" ]
diff --git a/schema/src/sort.rs b/schema/src/sort.rs
index e4a2d199a5..06378a124f 100644
--- a/schema/src/sort.rs
+++ b/schema/src/sort.rs
@@ -186,7 +186,7 @@ impl SortKey {
// Go over short key and check its right-order availability in the long key
let mut prev_long_idx: Option<usize> = None;
- for (col, sort_options) in short_key.columns.iter() {
+ for (col, sort_options) in &*short_key.columns {
if let Some(long_idx) = long_key.find_index(col, sort_options) {
match prev_long_idx {
None => prev_long_idx = Some(long_idx),
diff --git a/service_grpc_flight/src/keep_alive.rs b/service_grpc_flight/src/keep_alive.rs
index e8d1ff0159..4d838e92d3 100644
--- a/service_grpc_flight/src/keep_alive.rs
+++ b/service_grpc_flight/src/keep_alive.rs
@@ -248,9 +248,7 @@ fn check_schema(schema: &Schema) -> bool {
/// This must only be sent AFTER a [`Schema`] was transmitted.
fn build_empty_batch_msg(schema: Option<&SchemaRef>) -> Option<FlightData> {
let Some(schema) = schema else {
- warn!(
- "cannot send keep-alive because no schema was transmitted yet",
- );
+ warn!("cannot send keep-alive because no schema was transmitted yet",);
return None;
};
@@ -324,7 +322,7 @@ mod tests {
let s = FlightRecordBatchStream::new_from_flight_data(s);
let batches: Vec<_> = s.try_collect().await.unwrap();
assert_batches_eq!(
- vec!["+---+", "| f |", "+---+", "| 1 |", "| 2 |", "| 3 |", "| 4 |", "| 5 |", "+---+",],
+ ["+---+", "| f |", "+---+", "| 1 |", "| 2 |", "| 3 |", "| 4 |", "| 5 |", "+---+"],
&batches
);
}
diff --git a/service_grpc_influxrpc/src/data.rs b/service_grpc_influxrpc/src/data.rs
index da9c8fcb15..70aa99b11c 100644
--- a/service_grpc_influxrpc/src/data.rs
+++ b/service_grpc_influxrpc/src/data.rs
@@ -165,7 +165,7 @@ fn series_to_frames(
data_type: data_type.into(),
})),
})
- .chain(data_frames.into_iter()),
+ .chain(data_frames),
)
}
diff --git a/service_grpc_influxrpc/src/expr.rs b/service_grpc_influxrpc/src/expr.rs
index e3de7f3dfd..6285fd8e5d 100644
--- a/service_grpc_influxrpc/src/expr.rs
+++ b/service_grpc_influxrpc/src/expr.rs
@@ -414,6 +414,7 @@ impl InListBuilder {
}
// lhs OR rhs
else if Some(RPCValue::Logical(RPCLogical::Or as i32)) == node.value {
+ #[allow(clippy::manual_try_fold)]
node.children
.iter()
.fold(Ok(self), |res, node| res.and_then(|this| this.append(node)))
|
d71f023a5773719fd4e356d9cc1e8a8597f824e2
|
Dom Dwyer
|
2022-11-08 14:54:48
|
inline helpers
|
Inline the hash generation & key comparator.
| null |
refactor: inline helpers
Inline the hash generation & key comparator.
|
diff --git a/ingester/src/arcmap.rs b/ingester/src/arcmap.rs
index d6b7ca6975..a809b2b2aa 100644
--- a/ingester/src/arcmap.rs
+++ b/ingester/src/arcmap.rs
@@ -172,12 +172,14 @@ where
self.map.read().values().map(Arc::clone).collect()
}
+ #[inline]
fn compute_hash<Q: Hash + ?Sized>(&self, key: &Q) -> u64 {
let mut state = self.hasher.build_hasher();
key.hash(&mut state);
state.finish()
}
+ #[inline]
fn key_equal<Q>(q: &Q) -> impl FnMut(&'_ K) -> bool + '_
where
K: Borrow<Q>,
|
dd54d8b7feeec9b6c8e308415a00d906ffcfd1ae
|
Armin Primadi
|
2023-04-19 15:27:38
|
Garbage collector hangs indefinitely on shutdown (#7567)
|
* fix: Garbage collector hangs indefinitely on shutdown
* style(garbage_collector): conform to linter and fmt
---------
|
Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
|
fix: Garbage collector hangs indefinitely on shutdown (#7567)
* fix: Garbage collector hangs indefinitely on shutdown
* style(garbage_collector): conform to linter and fmt
---------
Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
|
diff --git a/Cargo.lock b/Cargo.lock
index d5b51f5010..429301b1a3 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -2033,6 +2033,7 @@ dependencies = [
name = "garbage_collector"
version = "0.1.0"
dependencies = [
+ "bytes",
"chrono",
"clap 4.2.2",
"clap_blocks",
diff --git a/garbage_collector/Cargo.toml b/garbage_collector/Cargo.toml
index aae3302d88..cdebc0c8ef 100644
--- a/garbage_collector/Cargo.toml
+++ b/garbage_collector/Cargo.toml
@@ -22,6 +22,7 @@ tokio-util = { version = "0.7.7" }
uuid = { version = "1", features = ["v4"] }
[dev-dependencies]
+bytes = "1.4"
data_types = { path = "../data_types" }
filetime = "0.2"
metric = { path = "../metric" }
diff --git a/garbage_collector/src/lib.rs b/garbage_collector/src/lib.rs
index 7dba67642e..b9c7114870 100644
--- a/garbage_collector/src/lib.rs
+++ b/garbage_collector/src/lib.rs
@@ -103,6 +103,7 @@ impl GarbageCollector {
sub_config.objectstore_sleep_interval_minutes,
));
let os_checker = tokio::spawn(os_checker::perform(
+ shutdown.clone(),
Arc::clone(&catalog),
chrono::Duration::from_std(sub_config.objectstore_cutoff).map_err(|e| {
Error::CutoffError {
@@ -113,6 +114,7 @@ impl GarbageCollector {
tx2,
));
let os_deleter = tokio::spawn(os_deleter::perform(
+ shutdown.clone(),
object_store,
dry_run,
sub_config.objectstore_concurrent_deletes,
diff --git a/garbage_collector/src/objectstore/checker.rs b/garbage_collector/src/objectstore/checker.rs
index be4dd7e020..f26e1dbda1 100644
--- a/garbage_collector/src/objectstore/checker.rs
+++ b/garbage_collector/src/objectstore/checker.rs
@@ -5,6 +5,7 @@ use observability_deps::tracing::*;
use snafu::prelude::*;
use std::sync::Arc;
use tokio::sync::mpsc;
+use tokio_util::sync::CancellationToken;
#[derive(Debug, Snafu)]
#[allow(missing_docs)]
@@ -12,6 +13,9 @@ pub enum Error {
#[snafu(display("Expected a file name"))]
FileNameMissing,
+ #[snafu(display("Channel closed unexpectedly"))]
+ ChannelClosed,
+
#[snafu(display("The catalog could not be queried for {object_store_id}"))]
GetFile {
source: iox_catalog::interface::Error,
@@ -27,6 +31,7 @@ pub enum Error {
pub(crate) type Result<T, E = Error> = std::result::Result<T, E>;
pub(crate) async fn perform(
+ shutdown: CancellationToken,
catalog: Arc<dyn Catalog>,
cutoff: Duration,
mut items: mpsc::Receiver<ObjectMeta>,
@@ -35,10 +40,26 @@ pub(crate) async fn perform(
let mut repositories = catalog.repositories().await;
let parquet_files = repositories.parquet_files();
- while let Some(item) = items.recv().await {
- let older_than = chrono::offset::Utc::now() - cutoff;
- if should_delete(&item, older_than, parquet_files).await? {
- deleter.send(item).await.context(DeleterExitedSnafu)?;
+ loop {
+ tokio::select! {
+ _ = shutdown.cancelled() => {
+ // Exit gracefully
+ break;
+ }
+ res = items.recv() => {
+ match res {
+ Some(item) => {
+ let older_than = chrono::offset::Utc::now() - cutoff;
+ if should_delete(&item, older_than, parquet_files).await? {
+ deleter.send(item).await.context(DeleterExitedSnafu)?;
+ }
+ }
+ None => {
+ // The channel has been closed unexpectedly
+ return Err(Error::ChannelClosed);
+ }
+ }
+ }
}
}
diff --git a/garbage_collector/src/objectstore/deleter.rs b/garbage_collector/src/objectstore/deleter.rs
index 6cc39aa118..a1255ca426 100644
--- a/garbage_collector/src/objectstore/deleter.rs
+++ b/garbage_collector/src/objectstore/deleter.rs
@@ -4,14 +4,16 @@ use observability_deps::tracing::info;
use snafu::prelude::*;
use std::sync::Arc;
use tokio::sync::mpsc;
+use tokio_util::sync::CancellationToken;
pub(crate) async fn perform(
+ shutdown: CancellationToken,
object_store: Arc<DynObjectStore>,
dry_run: bool,
concurrent_deletes: usize,
items: mpsc::Receiver<ObjectMeta>,
) -> Result<()> {
- tokio_stream::wrappers::ReceiverStream::new(items)
+ let stream_fu = tokio_stream::wrappers::ReceiverStream::new(items)
.map(|item| {
let object_store = Arc::clone(&object_store);
@@ -30,8 +32,17 @@ pub(crate) async fn perform(
}
})
.buffer_unordered(concurrent_deletes)
- .try_collect()
- .await?;
+ .try_collect();
+
+ tokio::select! {
+ _ = shutdown.cancelled() => {
+ // Exit gracefully
+ }
+ res = stream_fu => {
+ // Propagate error
+ res?;
+ }
+ }
Ok(())
}
@@ -47,3 +58,98 @@ pub enum Error {
}
pub(crate) type Result<T, E = Error> = std::result::Result<T, E>;
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use bytes::Bytes;
+ use chrono::Utc;
+ use data_types::{NamespaceId, PartitionId, ShardId, TableId};
+ use object_store::path::Path;
+ use parquet_file::ParquetFilePath;
+ use std::time::Duration;
+ use uuid::Uuid;
+
+ #[tokio::test]
+ async fn perform_shutdown_gracefully() {
+ let shutdown = CancellationToken::new();
+ let nitems = 3;
+ let object_store: Arc<DynObjectStore> = Arc::new(object_store::memory::InMemory::new());
+ let items = populate_os_with_items(&object_store, nitems).await;
+
+ assert_eq!(count_os_element(&object_store).await, nitems);
+
+ let dry_run = false;
+ let concurrent_deletes = 2;
+ let (tx, rx) = mpsc::channel(1000);
+
+ tokio::spawn({
+ let shutdown = shutdown.clone();
+
+ async move {
+ for item in items {
+ tx.send(item.clone()).await.unwrap();
+ }
+
+ // Send a shutdown signal
+ shutdown.cancel();
+
+ // Prevent this thread from exiting. Exiting this thread will
+ // close the channel, which in turns close the processing stream.
+ loop {
+ tokio::time::sleep(std::time::Duration::from_secs(1)).await;
+ }
+ }
+ });
+
+ // This call should terminate because we send shutdown signal, but
+ // nothing can be said about the number of elements in object store.
+ // The processing stream may or may not have chance to process the
+ // items for deletion.
+ let perform_fu = perform(
+ shutdown,
+ Arc::clone(&object_store),
+ dry_run,
+ concurrent_deletes,
+ rx,
+ );
+ // Unusual test because there is no assertion but the call below should
+ // not panic which verifies that the deleter task shutdown gracefully.
+ tokio::time::timeout(Duration::from_secs(3), perform_fu)
+ .await
+ .unwrap()
+ .unwrap();
+ }
+
+ async fn count_os_element(os: &Arc<DynObjectStore>) -> usize {
+ let objects = os.list(None).await.unwrap();
+ objects.fold(0, |acc, _| async move { acc + 1 }).await
+ }
+
+ async fn populate_os_with_items(os: &Arc<DynObjectStore>, nitems: usize) -> Vec<ObjectMeta> {
+ let mut items = vec![];
+ for i in 0..nitems {
+ let object_meta = ObjectMeta {
+ location: new_object_meta_location(),
+ last_modified: Utc::now(),
+ size: 0,
+ };
+ os.put(&object_meta.location, Bytes::from(i.to_string()))
+ .await
+ .unwrap();
+ items.push(object_meta);
+ }
+ items
+ }
+
+ fn new_object_meta_location() -> Path {
+ ParquetFilePath::new(
+ NamespaceId::new(1),
+ TableId::new(2),
+ ShardId::new(3),
+ PartitionId::new(4),
+ Uuid::new_v4(),
+ )
+ .object_store_path()
+ }
+}
diff --git a/garbage_collector/src/objectstore/lister.rs b/garbage_collector/src/objectstore/lister.rs
index 1d3f0907d1..eec5cac241 100644
--- a/garbage_collector/src/objectstore/lister.rs
+++ b/garbage_collector/src/objectstore/lister.rs
@@ -29,9 +29,14 @@ pub(crate) async fn perform(
None => {
// sleep for the configured time, then list again and go around the loop
// again
- sleep(Duration::from_secs(60 * sleep_interval_minutes)).await;
- items = object_store.list(None).await.context(ListingSnafu)?;
- continue;
+ select! {
+ _ = shutdown.cancelled() => {
+ break;
+ }
+ _ = sleep(Duration::from_secs(60 * sleep_interval_minutes)) => {
+ items = object_store.list(None).await.context(ListingSnafu)?;
+ }
+ }
}
}
}
|
e49ffc02f8e51dba70ca2a10cd0d29ee88da3d3c
|
Marco Neumann
|
2022-12-12 15:32:04
|
faster sort key calculation (#6375)
|
Avoid nasty string lookups to dermine which columns make a parquet's
sort key.
For #6358.
|
Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
|
refactor: faster sort key calculation (#6375)
Avoid nasty string lookups to dermine which columns make a parquet's
sort key.
For #6358.
Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
|
diff --git a/querier/src/cache/partition.rs b/querier/src/cache/partition.rs
index f8ec06c6e5..3f3799e87e 100644
--- a/querier/src/cache/partition.rs
+++ b/querier/src/cache/partition.rs
@@ -180,13 +180,14 @@ impl CachedPartition {
pub struct PartitionSortKey {
pub sort_key: Arc<SortKey>,
pub column_set: HashSet<ColumnId>,
+ pub column_order: Vec<ColumnId>,
}
impl PartitionSortKey {
fn new(sort_key: SortKey, column_id_map_rev: &HashMap<Arc<str>, ColumnId>) -> Self {
let sort_key = Arc::new(sort_key);
- let mut column_set: HashSet<ColumnId> = sort_key
+ let mut column_order: Vec<ColumnId> = sort_key
.iter()
.map(|(name, _opts)| {
*column_id_map_rev
@@ -194,11 +195,15 @@ impl PartitionSortKey {
.unwrap_or_else(|| panic!("column_id_map_rev misses data: {name}"))
})
.collect();
+ column_order.shrink_to_fit();
+
+ let mut column_set: HashSet<ColumnId> = column_order.iter().copied().collect();
column_set.shrink_to_fit();
Self {
sort_key,
column_set,
+ column_order,
}
}
@@ -207,6 +212,7 @@ impl PartitionSortKey {
size_of_val(self)
+ self.sort_key.as_ref().size()
+ (self.column_set.capacity() * size_of::<ColumnId>())
+ + (self.column_order.capacity() * size_of::<ColumnId>())
}
}
@@ -321,6 +327,7 @@ mod tests {
&PartitionSortKey {
sort_key: Arc::new(p1.sort_key().unwrap()),
column_set: HashSet::from([c1.column.id, c2.column.id]),
+ column_order: vec![c1.column.id, c2.column.id],
}
);
assert_histogram_metric_count(&catalog.metric_registry, "partition_get_by_id", 1);
@@ -485,6 +492,7 @@ mod tests {
&PartitionSortKey {
sort_key: Arc::new(p_sort_key.clone().unwrap()),
column_set: HashSet::from([c1.column.id, c2.column.id]),
+ column_order: vec![c1.column.id, c2.column.id],
}
);
assert_histogram_metric_count(&catalog.metric_registry, "partition_get_by_id", 3);
diff --git a/querier/src/chunk/mod.rs b/querier/src/chunk/mod.rs
index be8c5d108a..036c7d99ef 100644
--- a/querier/src/chunk/mod.rs
+++ b/querier/src/chunk/mod.rs
@@ -247,9 +247,6 @@ impl ChunkAdapter {
span_recorder.child_span("cache GET partition sort key"),
)
.await
- .map(|sort_key| Arc::clone(&sort_key.sort_key));
- let partition_sort_key_ref = partition_sort_key.as_ref();
- let partition_sort_key_ref = partition_sort_key_ref
.expect("partition sort key should be set when a parquet file exists");
// NOTE: Because we've looked up the sort key AFTER the namespace schema, it may contain columns for which we
@@ -271,15 +268,21 @@ impl ChunkAdapter {
.catalog_cache
.projected_schema()
.get(
- cached_table,
+ Arc::clone(&cached_table),
column_ids,
span_recorder.child_span("cache GET projected schema"),
)
.await;
// calculate sort key
- let pk_cols = schema.primary_key();
- let sort_key = partition_sort_key_ref.filter_to(&pk_cols, parquet_file.partition_id.get());
+ let sort_key = SortKey::from_columns(
+ partition_sort_key
+ .column_order
+ .iter()
+ .filter(|c_id| parquet_file_cols.contains(c_id))
+ .filter_map(|c_id| cached_table.column_id_map.get(c_id))
+ .cloned(),
+ );
assert!(
!sort_key.is_empty(),
"Sort key can never be empty because there should at least be a time column",
@@ -303,7 +306,7 @@ impl ChunkAdapter {
Some(ChunkParts {
meta,
schema,
- partition_sort_key,
+ partition_sort_key: Some(Arc::clone(&partition_sort_key.sort_key)),
})
}
}
|
6fb93a767925b3f10ffc5c977de70721b4cffe1a
|
Stuart Carnie
|
2023-02-23 06:33:49
|
Make InfluxQL planning sync (#7038)
|
* refactor: Move statement parsing to separate fn
* refactor: Remove async from `InfluxQLToLogicalPlan`
Closes #6607
* chore: Remove async functions and tokio::test
* chore: Remove redundant attribute
* chore: Feedback, switch to dynamic dispatch vs generic implementation
| null |
refactor: Make InfluxQL planning sync (#7038)
* refactor: Move statement parsing to separate fn
* refactor: Remove async from `InfluxQLToLogicalPlan`
Closes #6607
* chore: Remove async functions and tokio::test
* chore: Remove redundant attribute
* chore: Feedback, switch to dynamic dispatch vs generic implementation
|
diff --git a/iox_query/src/frontend/influxql.rs b/iox_query/src/frontend/influxql.rs
index 4f3dddb3e9..e6e0503e9d 100644
--- a/iox_query/src/frontend/influxql.rs
+++ b/iox_query/src/frontend/influxql.rs
@@ -1,14 +1,47 @@
+use std::collections::{HashMap, HashSet};
+use std::ops::Deref;
use std::sync::Arc;
use crate::exec::context::IOxSessionContext;
-use crate::plan::influxql::InfluxQLToLogicalPlan;
-use crate::QueryNamespace;
+use crate::plan::influxql;
+use crate::plan::influxql::{InfluxQLToLogicalPlan, SchemaProvider};
+use datafusion::datasource::provider_as_source;
+use datafusion::logical_expr::{LogicalPlan, TableSource};
use datafusion::{
error::{DataFusionError, Result},
physical_plan::ExecutionPlan,
};
+use influxdb_influxql_parser::common::MeasurementName;
use influxdb_influxql_parser::parse_statements;
+use influxdb_influxql_parser::statement::Statement;
+use influxdb_influxql_parser::visit::{Visitable, Visitor};
use observability_deps::tracing::debug;
+use schema::Schema;
+
+struct ContextSchemaProvider {
+ tables: HashMap<String, (Arc<dyn TableSource>, Schema)>,
+}
+
+impl SchemaProvider for ContextSchemaProvider {
+ fn get_table_provider(&self, name: &str) -> Result<Arc<dyn TableSource>> {
+ self.tables
+ .get(name)
+ .map(|(t, _)| Arc::clone(t))
+ .ok_or_else(|| DataFusionError::Plan(format!("measurement does not exist: {name}")))
+ }
+
+ fn table_names(&self) -> Vec<&'_ str> {
+ self.tables.keys().map(|k| k.as_str()).collect::<Vec<_>>()
+ }
+
+ fn table_exists(&self, name: &str) -> bool {
+ self.tables.contains_key(name)
+ }
+
+ fn table_schema(&self, name: &str) -> Option<Schema> {
+ self.tables.get(name).map(|(_, s)| s.clone())
+ }
+}
/// This struct can create plans for running SQL queries against databases
#[derive(Debug, Default)]
@@ -23,13 +56,71 @@ impl InfluxQLQueryPlanner {
/// DataFusion physical execution plan that runs on the query executor.
pub async fn query(
&self,
- database: Arc<dyn QueryNamespace>,
query: &str,
ctx: &IOxSessionContext,
) -> Result<Arc<dyn ExecutionPlan>> {
let ctx = ctx.child_ctx("query");
debug!(text=%query, "planning InfluxQL query");
+ let statement = self.query_to_statement(query)?;
+ let logical_plan = self.statement_to_plan(statement, &ctx).await?;
+
+ // This would only work for SELECT statements at the moment, as the schema queries do
+ // not return ExecutionPlan
+ ctx.create_physical_plan(&logical_plan).await
+ }
+
+ async fn statement_to_plan(
+ &self,
+ statement: Statement,
+ ctx: &IOxSessionContext,
+ ) -> Result<LogicalPlan> {
+ use std::collections::hash_map::Entry;
+
+ let session_cfg = ctx.inner().copied_config();
+ let cfg = session_cfg.config_options();
+ let schema = ctx
+ .inner()
+ .catalog(&cfg.catalog.default_catalog)
+ .ok_or_else(|| {
+ DataFusionError::Plan(format!(
+ "failed to resolve catalog: {}",
+ cfg.catalog.default_catalog
+ ))
+ })?
+ .schema(&cfg.catalog.default_schema)
+ .ok_or_else(|| {
+ DataFusionError::Plan(format!(
+ "failed to resolve schema: {}",
+ cfg.catalog.default_schema
+ ))
+ })?;
+ let names = schema.table_names();
+ let query_tables = find_all_measurements(&statement, &names)?;
+
+ let mut sp = ContextSchemaProvider {
+ tables: HashMap::with_capacity(query_tables.len()),
+ };
+
+ for table_name in &query_tables {
+ if let Entry::Vacant(v) = sp.tables.entry(table_name.to_string()) {
+ if let Some(table) = schema.table(table_name).await {
+ let schema = Schema::try_from(table.schema())
+ .map_err(|err| {
+ DataFusionError::Internal(format!("unable to convert DataFusion schema for measurement {table_name} to IOx schema: {err}"))
+ })?;
+ v.insert((provider_as_source(table), schema));
+ }
+ }
+ }
+
+ let planner = InfluxQLToLogicalPlan::new(&sp);
+ let logical_plan = planner.statement_to_plan(statement)?;
+ debug!(plan=%logical_plan.display_graphviz(), "logical plan");
+ Ok(logical_plan)
+ }
+
+ fn query_to_statement(&self, query: &str) -> Result<Statement> {
let mut statements = parse_statements(query)
.map_err(|e| DataFusionError::External(format!("{e}").into()))?;
@@ -39,12 +130,102 @@ impl InfluxQLQueryPlanner {
));
}
- let planner = InfluxQLToLogicalPlan::new(&ctx, database);
- let logical_plan = planner.statement_to_plan(statements.pop().unwrap()).await?;
- debug!(plan=%logical_plan.display_graphviz(), "logical plan");
+ Ok(statements.pop().unwrap())
+ }
+}
- // This would only work for SELECT statements at the moment, as the schema queries do
- // not return ExecutionPlan
- ctx.create_physical_plan(&logical_plan).await
+fn find_all_measurements(stmt: &Statement, tables: &[String]) -> Result<HashSet<String>> {
+ struct Matcher<'a>(&'a mut HashSet<String>, &'a [String]);
+ impl<'a> Visitor for Matcher<'a> {
+ type Error = DataFusionError;
+
+ fn post_visit_measurement_name(
+ self,
+ mn: &MeasurementName,
+ ) -> std::result::Result<Self, Self::Error> {
+ match mn {
+ MeasurementName::Name(name) => {
+ let name = name.deref();
+ if self.1.contains(name) {
+ self.0.insert(name.to_string());
+ }
+ }
+ MeasurementName::Regex(re) => {
+ let re = influxql::parse_regex(re)?;
+
+ self.1
+ .iter()
+ .filter(|table| re.is_match(table))
+ .for_each(|table| {
+ self.0.insert(table.into());
+ });
+ }
+ }
+
+ Ok(self)
+ }
+ }
+
+ let mut m = HashSet::new();
+ let vis = Matcher(&mut m, tables);
+ stmt.accept(vis)?;
+
+ Ok(m)
+}
+
+#[cfg(test)]
+mod test {
+ use super::*;
+ use itertools::Itertools;
+ use test_helpers::assert_error;
+
+ #[test]
+ fn test_query_to_statement() {
+ let p = InfluxQLQueryPlanner::new();
+
+ // succeeds for a single statement
+ let _ = p.query_to_statement("SELECT foo FROM bar").unwrap();
+
+ // Fallible
+
+ assert_error!(
+ p.query_to_statement("SELECT foo FROM bar; SELECT bar FROM foo"),
+ DataFusionError::NotImplemented(ref s) if s == "The context currently only supports a single InfluxQL statement"
+ );
+ }
+
+ #[test]
+ fn test_find_all_measurements() {
+ fn find(q: &str) -> Vec<String> {
+ let p = InfluxQLQueryPlanner::new();
+ let s = p.query_to_statement(q).unwrap();
+ let tables = vec!["foo".into(), "bar".into(), "foobar".into()];
+ let res = find_all_measurements(&s, &tables).unwrap();
+ res.into_iter().sorted().collect()
+ }
+
+ assert_eq!(find("SELECT * FROM foo"), vec!["foo"]);
+ assert_eq!(find("SELECT * FROM foo, foo"), vec!["foo"]);
+ assert_eq!(find("SELECT * FROM foo, bar"), vec!["bar", "foo"]);
+ assert_eq!(find("SELECT * FROM foo, none"), vec!["foo"]);
+ assert_eq!(find("SELECT * FROM /^foo/"), vec!["foo", "foobar"]);
+ assert_eq!(find("SELECT * FROM foo, /^bar/"), vec!["bar", "foo"]);
+ assert_eq!(find("SELECT * FROM //"), vec!["bar", "foo", "foobar"]);
+
+ // Find all measurements in subqueries
+ assert_eq!(
+ find("SELECT * FROM foo, (SELECT * FROM bar)"),
+ vec!["bar", "foo"]
+ );
+ assert_eq!(
+ find("SELECT * FROM foo, (SELECT * FROM /bar/)"),
+ vec!["bar", "foo", "foobar"]
+ );
+
+ // Finds no measurements
+ assert!(find("SELECT * FROM none").is_empty());
+ assert!(find("SELECT * FROM (SELECT * FROM none)").is_empty());
+ assert!(find("SELECT * FROM /^l/").is_empty());
+ assert!(find("SELECT * FROM (SELECT * FROM /^l/)").is_empty());
}
}
diff --git a/iox_query/src/plan/influxql.rs b/iox_query/src/plan/influxql.rs
index cd108b1960..f1cf7f7cef 100644
--- a/iox_query/src/plan/influxql.rs
+++ b/iox_query/src/plan/influxql.rs
@@ -11,3 +11,5 @@ mod util;
mod var_ref;
pub use planner::InfluxQLToLogicalPlan;
+pub use planner::SchemaProvider;
+pub use util::parse_regex;
diff --git a/iox_query/src/plan/influxql/expr_type_evaluator.rs b/iox_query/src/plan/influxql/expr_type_evaluator.rs
index d4f9d83fbe..89bc51ba25 100644
--- a/iox_query/src/plan/influxql/expr_type_evaluator.rs
+++ b/iox_query/src/plan/influxql/expr_type_evaluator.rs
@@ -1,32 +1,32 @@
use crate::plan::influxql::field::field_by_name;
use crate::plan::influxql::field_mapper::map_type;
+use crate::plan::influxql::SchemaProvider;
use datafusion::common::{DataFusionError, Result};
use influxdb_influxql_parser::common::{MeasurementName, QualifiedMeasurementName};
use influxdb_influxql_parser::expression::{Expr, VarRefDataType};
use influxdb_influxql_parser::literal::Literal;
use influxdb_influxql_parser::select::{Dimension, FromMeasurementClause, MeasurementSelection};
use itertools::Itertools;
-use predicate::rpc_predicate::QueryNamespaceMeta;
/// Evaluate the type of the specified expression.
///
/// Derived from [Go implementation](https://github.com/influxdata/influxql/blob/1ba470371ec093d57a726b143fe6ccbacf1b452b/ast.go#L4796-L4797).
pub(crate) fn evaluate_type(
- namespace: &dyn QueryNamespaceMeta,
+ s: &dyn SchemaProvider,
expr: &Expr,
from: &FromMeasurementClause,
) -> Result<Option<VarRefDataType>> {
- TypeEvaluator::new(from, namespace).eval_type(expr)
+ TypeEvaluator::new(from, s).eval_type(expr)
}
struct TypeEvaluator<'a> {
- namespace: &'a dyn QueryNamespaceMeta,
+ s: &'a dyn SchemaProvider,
from: &'a FromMeasurementClause,
}
impl<'a> TypeEvaluator<'a> {
- fn new(from: &'a FromMeasurementClause, namespace: &'a dyn QueryNamespaceMeta) -> Self {
- Self { from, namespace }
+ fn new(from: &'a FromMeasurementClause, s: &'a dyn SchemaProvider) -> Self {
+ Self { from, s }
}
fn eval_type(&self, expr: &Expr) -> Result<Option<VarRefDataType>> {
@@ -79,7 +79,7 @@ impl<'a> TypeEvaluator<'a> {
MeasurementSelection::Name(QualifiedMeasurementName {
name: MeasurementName::Name(ident),
..
- }) => match (data_type, map_type(self.namespace, ident.as_str(), name)?) {
+ }) => match (data_type, map_type(self.s, ident.as_str(), name)?) {
(Some(existing), Some(res)) => {
if res < existing {
data_type = Some(res)
@@ -91,10 +91,8 @@ impl<'a> TypeEvaluator<'a> {
MeasurementSelection::Subquery(select) => {
// find the field by name
if let Some(field) = field_by_name(select, name) {
- match (
- data_type,
- evaluate_type(self.namespace, &field.expr, &select.from)?,
- ) {
+ match (data_type, evaluate_type(self.s, &field.expr, &select.from)?)
+ {
(Some(existing), Some(res)) => {
if res < existing {
data_type = Some(res)
@@ -151,13 +149,13 @@ impl<'a> TypeEvaluator<'a> {
#[cfg(test)]
mod test {
use crate::plan::influxql::expr_type_evaluator::evaluate_type;
- use crate::plan::influxql::test_utils::{parse_select, MockNamespace};
+ use crate::plan::influxql::test_utils::{parse_select, MockSchemaProvider};
use assert_matches::assert_matches;
use influxdb_influxql_parser::expression::VarRefDataType;
#[test]
fn test_evaluate_type() {
- let namespace = MockNamespace::default();
+ let namespace = MockSchemaProvider::default();
let stmt = parse_select("SELECT shared_field0 FROM temp_01");
let field = stmt.fields.head().unwrap();
diff --git a/iox_query/src/plan/influxql/field_mapper.rs b/iox_query/src/plan/influxql/field_mapper.rs
index 6a40108ea2..5fdcb64b8a 100644
--- a/iox_query/src/plan/influxql/field_mapper.rs
+++ b/iox_query/src/plan/influxql/field_mapper.rs
@@ -1,9 +1,9 @@
#![allow(dead_code)]
use crate::plan::influxql::var_ref::field_type_to_var_ref_data_type;
+use crate::plan::influxql::SchemaProvider;
use datafusion::common::Result;
use influxdb_influxql_parser::expression::VarRefDataType;
-use predicate::rpc_predicate::QueryNamespaceMeta;
use schema::InfluxColumnType;
use std::collections::{HashMap, HashSet};
@@ -11,10 +11,10 @@ pub(crate) type FieldTypeMap = HashMap<String, VarRefDataType>;
pub(crate) type TagSet = HashSet<String>;
pub(crate) fn field_and_dimensions(
- namespace: &dyn QueryNamespaceMeta,
+ s: &dyn SchemaProvider,
name: &str,
) -> Result<Option<(FieldTypeMap, TagSet)>> {
- match namespace.table_schema(name) {
+ match s.table_schema(name) {
Some(iox) => Ok(Some((
FieldTypeMap::from_iter(iox.iter().filter_map(|(col_type, f)| match col_type {
InfluxColumnType::Field(ft) => {
@@ -31,11 +31,11 @@ pub(crate) fn field_and_dimensions(
}
pub(crate) fn map_type(
- namespace: &dyn QueryNamespaceMeta,
+ s: &dyn SchemaProvider,
measurement_name: &str,
field: &str,
) -> Result<Option<VarRefDataType>> {
- match namespace.table_schema(measurement_name) {
+ match s.table_schema(measurement_name) {
Some(iox) => Ok(match iox.find_index_of(field) {
Some(i) => match iox.field(i).0 {
InfluxColumnType::Field(ft) => Some(field_type_to_var_ref_data_type(ft)),
@@ -51,12 +51,12 @@ pub(crate) fn map_type(
#[cfg(test)]
mod test {
use super::*;
- use crate::plan::influxql::test_utils::MockNamespace;
+ use crate::plan::influxql::test_utils::MockSchemaProvider;
use assert_matches::assert_matches;
#[test]
fn test_schema_field_mapper() {
- let namespace = MockNamespace::default();
+ let namespace = MockSchemaProvider::default();
// Measurement exists
let (field_set, tag_set) = field_and_dimensions(&namespace, "cpu").unwrap().unwrap();
diff --git a/iox_query/src/plan/influxql/planner.rs b/iox_query/src/plan/influxql/planner.rs
index 9e6f599f0f..f77b61ca34 100644
--- a/iox_query/src/plan/influxql/planner.rs
+++ b/iox_query/src/plan/influxql/planner.rs
@@ -5,20 +5,18 @@ use crate::plan::influxql::util::{binary_operator_to_df_operator, Schemas};
use crate::plan::influxql::var_ref::{
column_type_to_var_ref_data_type, var_ref_data_type_to_data_type,
};
-use crate::{DataFusionError, IOxSessionContext, QueryNamespace};
+use crate::DataFusionError;
use arrow::datatypes::DataType;
use datafusion::common::{Result, ScalarValue, ToDFSchema};
-use datafusion::datasource::provider_as_source;
use datafusion::logical_expr::expr::Sort;
use datafusion::logical_expr::expr_rewriter::{normalize_col, ExprRewritable, ExprRewriter};
use datafusion::logical_expr::logical_plan::builder::project;
use datafusion::logical_expr::logical_plan::Analyze;
use datafusion::logical_expr::{
lit, BinaryExpr, BuiltinScalarFunction, Explain, Expr, ExprSchemable, LogicalPlan,
- LogicalPlanBuilder, Operator, PlanType, ToStringifiedPlan,
+ LogicalPlanBuilder, Operator, PlanType, TableSource, ToStringifiedPlan,
};
use datafusion::prelude::{binary_expr, Column};
-use datafusion::sql::TableReference;
use datafusion_util::AsExpr;
use influxdb_influxql_parser::common::OrderByClause;
use influxdb_influxql_parser::explain::{ExplainOption, ExplainStatement};
@@ -36,13 +34,32 @@ use influxdb_influxql_parser::{
};
use once_cell::sync::Lazy;
use query_functions::clean_non_meta_escapes;
-use schema::{InfluxColumnType, InfluxFieldType};
+use schema::{InfluxColumnType, InfluxFieldType, Schema};
use std::collections::HashSet;
+use std::fmt::Debug;
use std::iter;
use std::ops::Deref;
use std::str::FromStr;
use std::sync::Arc;
+/// The `SchemaProvider` trait allows the InfluxQL query planner to obtain
+/// meta-data about tables referenced in InfluxQL statements.
+pub trait SchemaProvider {
+ /// Getter for a datasource
+ fn get_table_provider(&self, name: &str) -> Result<Arc<dyn TableSource>>;
+
+ /// The collection of tables for this schema.
+ fn table_names(&self) -> Vec<&'_ str>;
+
+ /// Test if a table with the specified `name` exists.
+ fn table_exists(&self, name: &str) -> bool {
+ self.table_names().contains(&name)
+ }
+
+ /// Get the schema for the specified `table`.
+ fn table_schema(&self, name: &str) -> Option<Schema>;
+}
+
/// Informs the planner which rules should be applied when transforming
/// an InfluxQL expression.
///
@@ -58,20 +75,18 @@ enum ExprScope {
Projection,
}
+#[allow(missing_debug_implementations)]
/// InfluxQL query planner
-#[allow(unused)]
-#[derive(Debug)]
pub struct InfluxQLToLogicalPlan<'a> {
- ctx: &'a IOxSessionContext,
- database: Arc<dyn QueryNamespace>,
+ s: &'a dyn SchemaProvider,
}
impl<'a> InfluxQLToLogicalPlan<'a> {
- pub fn new(ctx: &'a IOxSessionContext, database: Arc<dyn QueryNamespace>) -> Self {
- Self { ctx, database }
+ pub fn new(s: &'a dyn SchemaProvider) -> Self {
+ Self { s }
}
- pub async fn statement_to_plan(&self, statement: Statement) -> Result<LogicalPlan> {
+ pub fn statement_to_plan(&self, statement: Statement) -> Result<LogicalPlan> {
match statement {
Statement::CreateDatabase(_) => {
Err(DataFusionError::NotImplemented("CREATE DATABASE".into()))
@@ -80,8 +95,8 @@ impl<'a> InfluxQLToLogicalPlan<'a> {
Statement::DropMeasurement(_) => {
Err(DataFusionError::NotImplemented("DROP MEASUREMENT".into()))
}
- Statement::Explain(explain) => self.explain_statement_to_plan(*explain).await,
- Statement::Select(select) => self.select_statement_to_plan(*select).await,
+ Statement::Explain(explain) => self.explain_statement_to_plan(*explain),
+ Statement::Select(select) => self.select_statement_to_plan(*select),
Statement::ShowDatabases(_) => {
Err(DataFusionError::NotImplemented("SHOW DATABASES".into()))
}
@@ -103,8 +118,8 @@ impl<'a> InfluxQLToLogicalPlan<'a> {
}
}
- async fn explain_statement_to_plan(&self, explain: ExplainStatement) -> Result<LogicalPlan> {
- let plan = self.select_statement_to_plan(*explain.select).await?;
+ fn explain_statement_to_plan(&self, explain: ExplainStatement) -> Result<LogicalPlan> {
+ let plan = self.select_statement_to_plan(*explain.select)?;
let plan = Arc::new(plan);
let schema = LogicalPlan::explain_schema();
let schema = schema.to_dfschema_ref()?;
@@ -135,11 +150,11 @@ impl<'a> InfluxQLToLogicalPlan<'a> {
}
/// Create a [`LogicalPlan`] from the specified InfluxQL `SELECT` statement.
- async fn select_statement_to_plan(&self, select: SelectStatement) -> Result<LogicalPlan> {
- let select = rewrite_statement(self.database.as_meta(), &select)?;
+ fn select_statement_to_plan(&self, select: SelectStatement) -> Result<LogicalPlan> {
+ let select = rewrite_statement(self.s, &select)?;
// Process FROM clause
- let plans = self.plan_from_tables(select.from).await?;
+ let plans = self.plan_from_tables(select.from)?;
// Only support a single measurement to begin with
let plan = match plans.len() {
@@ -509,13 +524,13 @@ impl<'a> InfluxQLToLogicalPlan<'a> {
/// Generate a list of logical plans for each of the tables references in the `FROM`
/// clause.
- async fn plan_from_tables(&self, from: FromMeasurementClause) -> Result<Vec<LogicalPlan>> {
+ fn plan_from_tables(&self, from: FromMeasurementClause) -> Result<Vec<LogicalPlan>> {
let mut plans = vec![];
for ms in from.iter() {
let plan = match ms {
MeasurementSelection::Name(qn) => match qn.name {
MeasurementName::Name(ref ident) => {
- self.create_table_ref(normalize_identifier(ident)).await
+ self.create_table_ref(normalize_identifier(ident))
}
// rewriter is expected to expand the regular expression
MeasurementName::Regex(_) => Err(DataFusionError::Internal(
@@ -533,11 +548,9 @@ impl<'a> InfluxQLToLogicalPlan<'a> {
/// Create a [LogicalPlan] that refers to the specified `table_name` or
/// an [LogicalPlan::EmptyRelation] if the table does not exist.
- async fn create_table_ref(&self, table_name: String) -> Result<LogicalPlan> {
- let table_ref: TableReference<'_> = table_name.as_str().into();
-
- if let Ok(provider) = self.ctx.inner().table_provider(table_ref).await {
- LogicalPlanBuilder::scan(&table_name, provider_as_source(provider), None)?.build()
+ fn create_table_ref(&self, table_name: String) -> Result<LogicalPlan> {
+ if let Ok(source) = self.s.get_table_provider(&table_name) {
+ LogicalPlanBuilder::scan(&table_name, source, None)?.build()
} else {
LogicalPlanBuilder::empty(false).build()
}
@@ -692,17 +705,18 @@ fn find_expr(cond: &ConditionalExpression) -> Result<&IQLExpr> {
#[cfg(test)]
mod test {
use super::*;
- use crate::exec::{ExecutionContextProvider, Executor};
+ use crate::exec::Executor;
use crate::plan::influxql::test_utils;
+ use crate::plan::influxql::test_utils::TestDatabaseAdapter;
use crate::test::{TestChunk, TestDatabase};
use influxdb_influxql_parser::parse_statements;
use insta::assert_snapshot;
- async fn plan(sql: &str) -> String {
+ fn plan(sql: &str) -> String {
let mut statements = parse_statements(sql).unwrap();
// index of columns in the above chunk: [bar, foo, i64_field, i64_field_2, time]
let executor = Arc::new(Executor::new_testing());
- let test_db = Arc::new(TestDatabase::new(Arc::clone(&executor)));
+ let test_db = TestDatabase::new(Arc::clone(&executor));
test_db.add_chunk(
"my_partition_key",
Arc::new(
@@ -747,10 +761,11 @@ mod test {
test_db.add_chunk("my_partition_key", Arc::clone(c));
});
- let ctx = test_db.new_query_context(None);
- let planner = InfluxQLToLogicalPlan::new(&ctx, test_db);
+ let sp = TestDatabaseAdapter::new(&test_db);
+
+ let planner = InfluxQLToLogicalPlan::new(&sp);
- match planner.statement_to_plan(statements.pop().unwrap()).await {
+ match planner.statement_to_plan(statements.pop().unwrap()) {
Ok(res) => res.display_indent_schema().to_string(),
Err(err) => err.to_string(),
}
@@ -759,17 +774,17 @@ mod test {
/// Verify the list of unsupported statements.
///
/// It is expected certain statements will be unsupported, indefinitely.
- #[tokio::test]
- async fn test_unsupported_statements() {
- assert_snapshot!(plan("CREATE DATABASE foo").await, @"This feature is not implemented: CREATE DATABASE");
- assert_snapshot!(plan("DELETE FROM foo").await, @"This feature is not implemented: DELETE");
- assert_snapshot!(plan("DROP MEASUREMENT foo").await, @"This feature is not implemented: DROP MEASUREMENT");
- assert_snapshot!(plan("SHOW DATABASES").await, @"This feature is not implemented: SHOW DATABASES");
- assert_snapshot!(plan("SHOW MEASUREMENTS").await, @"This feature is not implemented: SHOW MEASUREMENTS");
- assert_snapshot!(plan("SHOW RETENTION POLICIES").await, @"This feature is not implemented: SHOW RETENTION POLICIES");
- assert_snapshot!(plan("SHOW TAG KEYS").await, @"This feature is not implemented: SHOW TAG KEYS");
- assert_snapshot!(plan("SHOW TAG VALUES WITH KEY = bar").await, @"This feature is not implemented: SHOW TAG VALUES");
- assert_snapshot!(plan("SHOW FIELD KEYS").await, @"This feature is not implemented: SHOW FIELD KEYS");
+ #[test]
+ fn test_unsupported_statements() {
+ assert_snapshot!(plan("CREATE DATABASE foo"), @"This feature is not implemented: CREATE DATABASE");
+ assert_snapshot!(plan("DELETE FROM foo"), @"This feature is not implemented: DELETE");
+ assert_snapshot!(plan("DROP MEASUREMENT foo"), @"This feature is not implemented: DROP MEASUREMENT");
+ assert_snapshot!(plan("SHOW DATABASES"), @"This feature is not implemented: SHOW DATABASES");
+ assert_snapshot!(plan("SHOW MEASUREMENTS"), @"This feature is not implemented: SHOW MEASUREMENTS");
+ assert_snapshot!(plan("SHOW RETENTION POLICIES"), @"This feature is not implemented: SHOW RETENTION POLICIES");
+ assert_snapshot!(plan("SHOW TAG KEYS"), @"This feature is not implemented: SHOW TAG KEYS");
+ assert_snapshot!(plan("SHOW TAG VALUES WITH KEY = bar"), @"This feature is not implemented: SHOW TAG VALUES");
+ assert_snapshot!(plan("SHOW FIELD KEYS"), @"This feature is not implemented: SHOW FIELD KEYS");
}
/// Tests to validate InfluxQL `SELECT` statements, where the projections do not matter,
@@ -777,10 +792,10 @@ mod test {
mod select {
use super::*;
- #[tokio::test]
- async fn test_time_range_in_where() {
+ #[test]
+ fn test_time_range_in_where() {
assert_snapshot!(
- plan("SELECT foo, f64_field FROM data where time > now() - 10s").await, @r###"
+ plan("SELECT foo, f64_field FROM data where time > now() - 10s"), @r###"
Projection: data.time, data.foo AS foo, data.f64_field AS f64_field [time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field:Float64;N]
Sort: data.time ASC NULLS LAST [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N]
Filter: data.time > now() - IntervalMonthDayNano("10000000000") [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N]
@@ -788,7 +803,7 @@ mod test {
"###
);
assert_snapshot!(
- plan("SELECT foo, f64_field FROM data where time > '2004-04-09T02:33:45Z'").await, @r###"
+ plan("SELECT foo, f64_field FROM data where time > '2004-04-09T02:33:45Z'"), @r###"
Projection: data.time, data.foo AS foo, data.f64_field AS f64_field [time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field:Float64;N]
Sort: data.time ASC NULLS LAST [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N]
Filter: data.time > TimestampNanosecond(1081478025000000000, None) [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N]
@@ -796,12 +811,12 @@ mod test {
"###
);
assert_snapshot!(
- plan("SELECT foo, f64_field FROM data where time > '2004-04-09T'").await, @r###"Error during planning: invalid expression "'2004-04-09T'": '2004-04-09T' is not a valid timestamp"###
+ plan("SELECT foo, f64_field FROM data where time > '2004-04-09T'"), @r###"Error during planning: invalid expression "'2004-04-09T'": '2004-04-09T' is not a valid timestamp"###
);
// time on the right-hand side
assert_snapshot!(
- plan("SELECT foo, f64_field FROM data where now() - 10s < time").await, @r###"
+ plan("SELECT foo, f64_field FROM data where now() - 10s < time"), @r###"
Projection: data.time, data.foo AS foo, data.f64_field AS f64_field [time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field:Float64;N]
Sort: data.time ASC NULLS LAST [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N]
Filter: now() - IntervalMonthDayNano("10000000000") < data.time [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N]
@@ -811,7 +826,7 @@ mod test {
// Regular expression equality tests
- assert_snapshot!(plan("SELECT foo, f64_field FROM data where foo =~ /f/").await, @r###"
+ assert_snapshot!(plan("SELECT foo, f64_field FROM data where foo =~ /f/"), @r###"
Projection: data.time, data.foo AS foo, data.f64_field AS f64_field [time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field:Float64;N]
Sort: data.time ASC NULLS LAST [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N]
Filter: CAST(data.foo AS Utf8) ~ Utf8("f") [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N]
@@ -819,7 +834,7 @@ mod test {
"###);
// regular expression for a numeric field is rewritten to `false`
- assert_snapshot!(plan("SELECT foo, f64_field FROM data where f64_field =~ /f/").await, @r###"
+ assert_snapshot!(plan("SELECT foo, f64_field FROM data where f64_field =~ /f/"), @r###"
Projection: data.time, data.foo AS foo, data.f64_field AS f64_field [time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field:Float64;N]
Sort: data.time ASC NULLS LAST [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N]
Filter: Boolean(false) [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N]
@@ -828,7 +843,7 @@ mod test {
// regular expression for a non-existent field is rewritten to `false`
assert_snapshot!(
- plan("SELECT foo, f64_field FROM data where non_existent =~ /f/").await, @r###"
+ plan("SELECT foo, f64_field FROM data where non_existent =~ /f/"), @r###"
Projection: data.time, data.foo AS foo, data.f64_field AS f64_field [time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field:Float64;N]
Sort: data.time ASC NULLS LAST [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N]
Filter: Boolean(false) [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N]
@@ -838,7 +853,7 @@ mod test {
// Regular expression inequality tests
- assert_snapshot!(plan("SELECT foo, f64_field FROM data where foo !~ /f/").await, @r###"
+ assert_snapshot!(plan("SELECT foo, f64_field FROM data where foo !~ /f/"), @r###"
Projection: data.time, data.foo AS foo, data.f64_field AS f64_field [time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field:Float64;N]
Sort: data.time ASC NULLS LAST [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N]
Filter: CAST(data.foo AS Utf8) !~ Utf8("f") [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N]
@@ -846,7 +861,7 @@ mod test {
"###);
// regular expression for a numeric field is rewritten to `false`
- assert_snapshot!(plan("SELECT foo, f64_field FROM data where f64_field !~ /f/").await, @r###"
+ assert_snapshot!(plan("SELECT foo, f64_field FROM data where f64_field !~ /f/"), @r###"
Projection: data.time, data.foo AS foo, data.f64_field AS f64_field [time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field:Float64;N]
Sort: data.time ASC NULLS LAST [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N]
Filter: Boolean(false) [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N]
@@ -855,7 +870,7 @@ mod test {
// regular expression for a non-existent field is rewritten to `false`
assert_snapshot!(
- plan("SELECT foo, f64_field FROM data where non_existent !~ /f/").await, @r###"
+ plan("SELECT foo, f64_field FROM data where non_existent !~ /f/"), @r###"
Projection: data.time, data.foo AS foo, data.f64_field AS f64_field [time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field:Float64;N]
Sort: data.time ASC NULLS LAST [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N]
Filter: Boolean(false) [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N]
@@ -864,78 +879,78 @@ mod test {
);
}
- #[tokio::test]
- async fn test_column_matching_rules() {
+ #[test]
+ fn test_column_matching_rules() {
// Cast between numeric types
- assert_snapshot!(plan("SELECT f64_field::integer FROM data").await, @r###"
+ assert_snapshot!(plan("SELECT f64_field::integer FROM data"), @r###"
Projection: data.time, CAST(data.f64_field AS Int64) AS f64_field [time:Timestamp(Nanosecond, None), f64_field:Int64;N]
Sort: data.time ASC NULLS LAST [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N]
TableScan: data [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N]
"###);
- assert_snapshot!(plan("SELECT i64_field::float FROM data").await, @r###"
+ assert_snapshot!(plan("SELECT i64_field::float FROM data"), @r###"
Projection: data.time, CAST(data.i64_field AS Float64) AS i64_field [time:Timestamp(Nanosecond, None), i64_field:Float64;N]
Sort: data.time ASC NULLS LAST [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N]
TableScan: data [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N]
"###);
// use field selector
- assert_snapshot!(plan("SELECT bool_field::field FROM data").await, @r###"
+ assert_snapshot!(plan("SELECT bool_field::field FROM data"), @r###"
Projection: data.time, data.bool_field AS bool_field [time:Timestamp(Nanosecond, None), bool_field:Boolean;N]
Sort: data.time ASC NULLS LAST [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N]
TableScan: data [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N]
"###);
// invalid column reverence
- assert_snapshot!(plan("SELECT not_exists::tag FROM data").await, @r###"
+ assert_snapshot!(plan("SELECT not_exists::tag FROM data"), @r###"
Projection: data.time, NULL AS not_exists [time:Timestamp(Nanosecond, None), not_exists:Null;N]
Sort: data.time ASC NULLS LAST [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N]
TableScan: data [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N]
"###);
- assert_snapshot!(plan("SELECT not_exists::field FROM data").await, @r###"
+ assert_snapshot!(plan("SELECT not_exists::field FROM data"), @r###"
Projection: data.time, NULL AS not_exists [time:Timestamp(Nanosecond, None), not_exists:Null;N]
Sort: data.time ASC NULLS LAST [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N]
TableScan: data [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N]
"###);
// Returns NULL for invalid casts
- assert_snapshot!(plan("SELECT f64_field::string FROM data").await, @r###"
+ assert_snapshot!(plan("SELECT f64_field::string FROM data"), @r###"
Projection: data.time, NULL AS f64_field [time:Timestamp(Nanosecond, None), f64_field:Null;N]
Sort: data.time ASC NULLS LAST [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N]
TableScan: data [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N]
"###);
- assert_snapshot!(plan("SELECT f64_field::boolean FROM data").await, @r###"
+ assert_snapshot!(plan("SELECT f64_field::boolean FROM data"), @r###"
Projection: data.time, NULL AS f64_field [time:Timestamp(Nanosecond, None), f64_field:Null;N]
Sort: data.time ASC NULLS LAST [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N]
TableScan: data [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N]
"###);
- assert_snapshot!(plan("SELECT str_field::boolean FROM data").await, @r###"
+ assert_snapshot!(plan("SELECT str_field::boolean FROM data"), @r###"
Projection: data.time, NULL AS str_field [time:Timestamp(Nanosecond, None), str_field:Null;N]
Sort: data.time ASC NULLS LAST [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N]
TableScan: data [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N]
"###);
}
- #[tokio::test]
- async fn test_explain() {
- assert_snapshot!(plan("EXPLAIN SELECT foo, f64_field FROM data").await, @r###"
+ #[test]
+ fn test_explain() {
+ assert_snapshot!(plan("EXPLAIN SELECT foo, f64_field FROM data"), @r###"
Explain [plan_type:Utf8, plan:Utf8]
Projection: data.time, data.foo AS foo, data.f64_field AS f64_field [time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field:Float64;N]
Sort: data.time ASC NULLS LAST [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N]
TableScan: data [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N]
"###);
- assert_snapshot!(plan("EXPLAIN VERBOSE SELECT foo, f64_field FROM data").await, @r###"
+ assert_snapshot!(plan("EXPLAIN VERBOSE SELECT foo, f64_field FROM data"), @r###"
Explain [plan_type:Utf8, plan:Utf8]
Projection: data.time, data.foo AS foo, data.f64_field AS f64_field [time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field:Float64;N]
Sort: data.time ASC NULLS LAST [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N]
TableScan: data [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N]
"###);
- assert_snapshot!(plan("EXPLAIN ANALYZE SELECT foo, f64_field FROM data").await, @r###"
+ assert_snapshot!(plan("EXPLAIN ANALYZE SELECT foo, f64_field FROM data"), @r###"
Analyze [plan_type:Utf8, plan:Utf8]
Projection: data.time, data.foo AS foo, data.f64_field AS f64_field [time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field:Float64;N]
Sort: data.time ASC NULLS LAST [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N]
TableScan: data [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N]
"###);
- assert_snapshot!(plan("EXPLAIN ANALYZE VERBOSE SELECT foo, f64_field FROM data").await, @r###"
+ assert_snapshot!(plan("EXPLAIN ANALYZE VERBOSE SELECT foo, f64_field FROM data"), @r###"
Analyze [plan_type:Utf8, plan:Utf8]
Projection: data.time, data.foo AS foo, data.f64_field AS f64_field [time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field:Float64;N]
Sort: data.time ASC NULLS LAST [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N]
@@ -943,138 +958,138 @@ mod test {
"###);
}
- #[tokio::test]
- async fn test_select_cast_postfix_operator() {
+ #[test]
+ fn test_select_cast_postfix_operator() {
// Float casting
- assert_snapshot!(plan("SELECT f64_field::float FROM all_types").await, @r###"
+ assert_snapshot!(plan("SELECT f64_field::float FROM all_types"), @r###"
Projection: all_types.time, all_types.f64_field AS f64_field [time:Timestamp(Nanosecond, None), f64_field:Float64;N]
Sort: all_types.time ASC NULLS LAST [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N]
TableScan: all_types [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N]
"###);
- assert_snapshot!(plan("SELECT f64_field::unsigned FROM all_types").await, @r###"
+ assert_snapshot!(plan("SELECT f64_field::unsigned FROM all_types"), @r###"
Projection: all_types.time, CAST(all_types.f64_field AS UInt64) AS f64_field [time:Timestamp(Nanosecond, None), f64_field:UInt64;N]
Sort: all_types.time ASC NULLS LAST [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N]
TableScan: all_types [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N]
"###);
- assert_snapshot!(plan("SELECT f64_field::integer FROM all_types").await, @r###"
+ assert_snapshot!(plan("SELECT f64_field::integer FROM all_types"), @r###"
Projection: all_types.time, CAST(all_types.f64_field AS Int64) AS f64_field [time:Timestamp(Nanosecond, None), f64_field:Int64;N]
Sort: all_types.time ASC NULLS LAST [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N]
TableScan: all_types [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N]
"###);
- assert_snapshot!(plan("SELECT f64_field::string FROM all_types").await, @r###"
+ assert_snapshot!(plan("SELECT f64_field::string FROM all_types"), @r###"
Projection: all_types.time, NULL AS f64_field [time:Timestamp(Nanosecond, None), f64_field:Null;N]
Sort: all_types.time ASC NULLS LAST [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N]
TableScan: all_types [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N]
"###);
- assert_snapshot!(plan("SELECT f64_field::boolean FROM all_types").await, @r###"
+ assert_snapshot!(plan("SELECT f64_field::boolean FROM all_types"), @r###"
Projection: all_types.time, NULL AS f64_field [time:Timestamp(Nanosecond, None), f64_field:Null;N]
Sort: all_types.time ASC NULLS LAST [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N]
TableScan: all_types [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N]
"###);
// Integer casting
- assert_snapshot!(plan("SELECT i64_field::float FROM all_types").await, @r###"
+ assert_snapshot!(plan("SELECT i64_field::float FROM all_types"), @r###"
Projection: all_types.time, CAST(all_types.i64_field AS Float64) AS i64_field [time:Timestamp(Nanosecond, None), i64_field:Float64;N]
Sort: all_types.time ASC NULLS LAST [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N]
TableScan: all_types [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N]
"###);
- assert_snapshot!(plan("SELECT i64_field::unsigned FROM all_types").await, @r###"
+ assert_snapshot!(plan("SELECT i64_field::unsigned FROM all_types"), @r###"
Projection: all_types.time, CAST(all_types.i64_field AS UInt64) AS i64_field [time:Timestamp(Nanosecond, None), i64_field:UInt64;N]
Sort: all_types.time ASC NULLS LAST [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N]
TableScan: all_types [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N]
"###);
- assert_snapshot!(plan("SELECT i64_field::integer FROM all_types").await, @r###"
+ assert_snapshot!(plan("SELECT i64_field::integer FROM all_types"), @r###"
Projection: all_types.time, all_types.i64_field AS i64_field [time:Timestamp(Nanosecond, None), i64_field:Int64;N]
Sort: all_types.time ASC NULLS LAST [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N]
TableScan: all_types [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N]
"###);
- assert_snapshot!(plan("SELECT i64_field::string FROM all_types").await, @r###"
+ assert_snapshot!(plan("SELECT i64_field::string FROM all_types"), @r###"
Projection: all_types.time, NULL AS i64_field [time:Timestamp(Nanosecond, None), i64_field:Null;N]
Sort: all_types.time ASC NULLS LAST [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N]
TableScan: all_types [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N]
"###);
- assert_snapshot!(plan("SELECT i64_field::boolean FROM all_types").await, @r###"
+ assert_snapshot!(plan("SELECT i64_field::boolean FROM all_types"), @r###"
Projection: all_types.time, NULL AS i64_field [time:Timestamp(Nanosecond, None), i64_field:Null;N]
Sort: all_types.time ASC NULLS LAST [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N]
TableScan: all_types [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N]
"###);
// Unsigned casting
- assert_snapshot!(plan("SELECT u64_field::float FROM all_types").await, @r###"
+ assert_snapshot!(plan("SELECT u64_field::float FROM all_types"), @r###"
Projection: all_types.time, CAST(all_types.u64_field AS Float64) AS u64_field [time:Timestamp(Nanosecond, None), u64_field:Float64;N]
Sort: all_types.time ASC NULLS LAST [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N]
TableScan: all_types [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N]
"###);
- assert_snapshot!(plan("SELECT u64_field::unsigned FROM all_types").await, @r###"
+ assert_snapshot!(plan("SELECT u64_field::unsigned FROM all_types"), @r###"
Projection: all_types.time, all_types.u64_field AS u64_field [time:Timestamp(Nanosecond, None), u64_field:UInt64;N]
Sort: all_types.time ASC NULLS LAST [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N]
TableScan: all_types [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N]
"###);
- assert_snapshot!(plan("SELECT u64_field::integer FROM all_types").await, @r###"
+ assert_snapshot!(plan("SELECT u64_field::integer FROM all_types"), @r###"
Projection: all_types.time, CAST(all_types.u64_field AS Int64) AS u64_field [time:Timestamp(Nanosecond, None), u64_field:Int64;N]
Sort: all_types.time ASC NULLS LAST [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N]
TableScan: all_types [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N]
"###);
- assert_snapshot!(plan("SELECT u64_field::string FROM all_types").await, @r###"
+ assert_snapshot!(plan("SELECT u64_field::string FROM all_types"), @r###"
Projection: all_types.time, NULL AS u64_field [time:Timestamp(Nanosecond, None), u64_field:Null;N]
Sort: all_types.time ASC NULLS LAST [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N]
TableScan: all_types [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N]
"###);
- assert_snapshot!(plan("SELECT u64_field::boolean FROM all_types").await, @r###"
+ assert_snapshot!(plan("SELECT u64_field::boolean FROM all_types"), @r###"
Projection: all_types.time, NULL AS u64_field [time:Timestamp(Nanosecond, None), u64_field:Null;N]
Sort: all_types.time ASC NULLS LAST [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N]
TableScan: all_types [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N]
"###);
// String casting
- assert_snapshot!(plan("SELECT str_field::float FROM all_types").await, @r###"
+ assert_snapshot!(plan("SELECT str_field::float FROM all_types"), @r###"
Projection: all_types.time, NULL AS str_field [time:Timestamp(Nanosecond, None), str_field:Null;N]
Sort: all_types.time ASC NULLS LAST [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N]
TableScan: all_types [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N]
"###);
- assert_snapshot!(plan("SELECT str_field::unsigned FROM all_types").await, @r###"
+ assert_snapshot!(plan("SELECT str_field::unsigned FROM all_types"), @r###"
Projection: all_types.time, NULL AS str_field [time:Timestamp(Nanosecond, None), str_field:Null;N]
Sort: all_types.time ASC NULLS LAST [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N]
TableScan: all_types [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N]
"###);
- assert_snapshot!(plan("SELECT str_field::integer FROM all_types").await, @r###"
+ assert_snapshot!(plan("SELECT str_field::integer FROM all_types"), @r###"
Projection: all_types.time, NULL AS str_field [time:Timestamp(Nanosecond, None), str_field:Null;N]
Sort: all_types.time ASC NULLS LAST [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N]
TableScan: all_types [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N]
"###);
- assert_snapshot!(plan("SELECT str_field::string FROM all_types").await, @r###"
+ assert_snapshot!(plan("SELECT str_field::string FROM all_types"), @r###"
Projection: all_types.time, all_types.str_field AS str_field [time:Timestamp(Nanosecond, None), str_field:Utf8;N]
Sort: all_types.time ASC NULLS LAST [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N]
TableScan: all_types [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N]
"###);
- assert_snapshot!(plan("SELECT str_field::boolean FROM all_types").await, @r###"
+ assert_snapshot!(plan("SELECT str_field::boolean FROM all_types"), @r###"
Projection: all_types.time, NULL AS str_field [time:Timestamp(Nanosecond, None), str_field:Null;N]
Sort: all_types.time ASC NULLS LAST [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N]
TableScan: all_types [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N]
"###);
// Boolean casting
- assert_snapshot!(plan("SELECT bool_field::float FROM all_types").await, @r###"
+ assert_snapshot!(plan("SELECT bool_field::float FROM all_types"), @r###"
Projection: all_types.time, NULL AS bool_field [time:Timestamp(Nanosecond, None), bool_field:Null;N]
Sort: all_types.time ASC NULLS LAST [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N]
TableScan: all_types [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N]
"###);
- assert_snapshot!(plan("SELECT bool_field::unsigned FROM all_types").await, @r###"
+ assert_snapshot!(plan("SELECT bool_field::unsigned FROM all_types"), @r###"
Projection: all_types.time, NULL AS bool_field [time:Timestamp(Nanosecond, None), bool_field:Null;N]
Sort: all_types.time ASC NULLS LAST [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N]
TableScan: all_types [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N]
"###);
- assert_snapshot!(plan("SELECT bool_field::integer FROM all_types").await, @r###"
+ assert_snapshot!(plan("SELECT bool_field::integer FROM all_types"), @r###"
Projection: all_types.time, NULL AS bool_field [time:Timestamp(Nanosecond, None), bool_field:Null;N]
Sort: all_types.time ASC NULLS LAST [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N]
TableScan: all_types [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N]
"###);
- assert_snapshot!(plan("SELECT bool_field::string FROM all_types").await, @r###"
+ assert_snapshot!(plan("SELECT bool_field::string FROM all_types"), @r###"
Projection: all_types.time, NULL AS bool_field [time:Timestamp(Nanosecond, None), bool_field:Null;N]
Sort: all_types.time ASC NULLS LAST [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N]
TableScan: all_types [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N]
"###);
- assert_snapshot!(plan("SELECT bool_field::boolean FROM all_types").await, @r###"
+ assert_snapshot!(plan("SELECT bool_field::boolean FROM all_types"), @r###"
Projection: all_types.time, all_types.bool_field AS bool_field [time:Timestamp(Nanosecond, None), bool_field:Boolean;N]
Sort: all_types.time ASC NULLS LAST [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N]
TableScan: all_types [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N]
@@ -1082,13 +1097,13 @@ mod test {
// Validate various projection expressions with casts
- assert_snapshot!(plan("SELECT f64_field::integer + i64_field + u64_field::integer FROM all_types").await, @r###"
+ assert_snapshot!(plan("SELECT f64_field::integer + i64_field + u64_field::integer FROM all_types"), @r###"
Projection: all_types.time, CAST(all_types.f64_field AS Int64) + all_types.i64_field + CAST(all_types.u64_field AS Int64) AS f64_field_i64_field_u64_field [time:Timestamp(Nanosecond, None), f64_field_i64_field_u64_field:Int64;N]
Sort: all_types.time ASC NULLS LAST [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N]
TableScan: all_types [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N]
"###);
- assert_snapshot!(plan("SELECT f64_field::integer + i64_field + str_field::integer FROM all_types").await, @r###"
+ assert_snapshot!(plan("SELECT f64_field::integer + i64_field + str_field::integer FROM all_types"), @r###"
Projection: all_types.time, NULL AS f64_field_i64_field_str_field [time:Timestamp(Nanosecond, None), f64_field_i64_field_str_field:Null;N]
Sort: all_types.time ASC NULLS LAST [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N]
TableScan: all_types [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N]
@@ -1102,44 +1117,44 @@ mod test {
use super::*;
/// Select data from a single measurement
- #[tokio::test]
- async fn test_single_measurement() {
- assert_snapshot!(plan("SELECT f64_field FROM data").await, @r###"
+ #[test]
+ fn test_single_measurement() {
+ assert_snapshot!(plan("SELECT f64_field FROM data"), @r###"
Projection: data.time, data.f64_field AS f64_field [time:Timestamp(Nanosecond, None), f64_field:Float64;N]
Sort: data.time ASC NULLS LAST [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N]
TableScan: data [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N]
"###);
- assert_snapshot!(plan("SELECT time, f64_field FROM data").await, @r###"
+ assert_snapshot!(plan("SELECT time, f64_field FROM data"), @r###"
Projection: data.time AS time, data.f64_field AS f64_field [time:Timestamp(Nanosecond, None), f64_field:Float64;N]
Sort: data.time ASC NULLS LAST [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N]
TableScan: data [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N]
"###);
- assert_snapshot!(plan("SELECT time as timestamp, f64_field FROM data").await, @r###"
+ assert_snapshot!(plan("SELECT time as timestamp, f64_field FROM data"), @r###"
Projection: data.time AS timestamp, data.f64_field AS f64_field [timestamp:Timestamp(Nanosecond, None), f64_field:Float64;N]
Sort: data.time ASC NULLS LAST [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N]
TableScan: data [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N]
"###);
- assert_snapshot!(plan("SELECT foo, f64_field FROM data").await, @r###"
+ assert_snapshot!(plan("SELECT foo, f64_field FROM data"), @r###"
Projection: data.time, data.foo AS foo, data.f64_field AS f64_field [time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field:Float64;N]
Sort: data.time ASC NULLS LAST [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N]
TableScan: data [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N]
"###);
- assert_snapshot!(plan("SELECT foo, f64_field, i64_field FROM data").await, @r###"
+ assert_snapshot!(plan("SELECT foo, f64_field, i64_field FROM data"), @r###"
Projection: data.time, data.foo AS foo, data.f64_field AS f64_field, data.i64_field AS i64_field [time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field:Float64;N, i64_field:Int64;N]
Sort: data.time ASC NULLS LAST [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N]
TableScan: data [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N]
"###);
- assert_snapshot!(plan("SELECT /^f/ FROM data").await, @r###"
+ assert_snapshot!(plan("SELECT /^f/ FROM data"), @r###"
Projection: data.time, data.f64_field AS f64_field, data.foo AS foo [time:Timestamp(Nanosecond, None), f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N]
Sort: data.time ASC NULLS LAST [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N]
TableScan: data [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N]
"###);
- assert_snapshot!(plan("SELECT * FROM data").await, @r###"
+ assert_snapshot!(plan("SELECT * FROM data"), @r###"
Projection: data.time, data.TIME AS TIME, data.bar AS bar, data.bool_field AS bool_field, data.f64_field AS f64_field, data.foo AS foo, data.i64_field AS i64_field, data.mixedCase AS mixedCase, data.str_field AS str_field, data.with space AS with space [time:Timestamp(Nanosecond, None), TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, with space:Float64;N]
Sort: data.time ASC NULLS LAST [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N]
TableScan: data [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N]
"###);
- assert_snapshot!(plan("SELECT TIME FROM data").await, @r###"
+ assert_snapshot!(plan("SELECT TIME FROM data"), @r###"
Projection: data.time, data.TIME AS TIME [time:Timestamp(Nanosecond, None), TIME:Boolean;N]
Sort: data.time ASC NULLS LAST [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N]
TableScan: data [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N]
@@ -1147,24 +1162,24 @@ mod test {
}
/// Arithmetic expressions in the projection list
- #[tokio::test]
- async fn test_simple_arithmetic_in_projection() {
- assert_snapshot!(plan("SELECT foo, f64_field + f64_field FROM data").await, @r###"
+ #[test]
+ fn test_simple_arithmetic_in_projection() {
+ assert_snapshot!(plan("SELECT foo, f64_field + f64_field FROM data"), @r###"
Projection: data.time, data.foo AS foo, data.f64_field + data.f64_field AS f64_field_f64_field [time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field_f64_field:Float64;N]
Sort: data.time ASC NULLS LAST [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N]
TableScan: data [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N]
"###);
- assert_snapshot!(plan("SELECT foo, sin(f64_field) FROM data").await, @r###"
+ assert_snapshot!(plan("SELECT foo, sin(f64_field) FROM data"), @r###"
Projection: data.time, data.foo AS foo, sin(data.f64_field) AS sin [time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, sin:Float64;N]
Sort: data.time ASC NULLS LAST [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N]
TableScan: data [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N]
"###);
- assert_snapshot!(plan("SELECT foo, atan2(f64_field, 2) FROM data").await, @r###"
+ assert_snapshot!(plan("SELECT foo, atan2(f64_field, 2) FROM data"), @r###"
Projection: data.time, data.foo AS foo, atan2(data.f64_field, Int64(2)) AS atan2 [time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, atan2:Float64;N]
Sort: data.time ASC NULLS LAST [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N]
TableScan: data [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N]
"###);
- assert_snapshot!(plan("SELECT foo, f64_field + 0.5 FROM data").await, @r###"
+ assert_snapshot!(plan("SELECT foo, f64_field + 0.5 FROM data"), @r###"
Projection: data.time, data.foo AS foo, data.f64_field + Float64(0.5) AS f64_field [time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field:Float64;N]
Sort: data.time ASC NULLS LAST [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N]
TableScan: data [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N]
@@ -1278,10 +1293,10 @@ mod test {
/// Succeeds and returns null values for the expression
/// **Actual:**
/// Error during planning: 'Float64 + Utf8' can't be evaluated because there isn't a common type to coerce the types to
- #[tokio::test]
+ #[test]
#[ignore]
- async fn test_select_coercion_from_str() {
- assert_snapshot!(plan("SELECT f64_field + str_field::float FROM data").await, @"");
+ fn test_select_coercion_from_str() {
+ assert_snapshot!(plan("SELECT f64_field + str_field::float FROM data"), @"");
}
/// **Issue:**
@@ -1290,14 +1305,14 @@ mod test {
/// Succeeds and plans the query, returning null values for unknown columns
/// **Actual:**
/// Schema error: No field named 'TIME'. Valid fields are 'data'.'bar', 'data'.'bool_field', 'data'.'f64_field', 'data'.'foo', 'data'.'i64_field', 'data'.'mixedCase', 'data'.'str_field', 'data'.'time', 'data'.'with space'.
- #[tokio::test]
+ #[test]
#[ignore]
- async fn test_select_case_sensitivity() {
+ fn test_select_case_sensitivity() {
// should return no results
- assert_snapshot!(plan("SELECT TIME, f64_Field FROM data").await);
+ assert_snapshot!(plan("SELECT TIME, f64_Field FROM data"));
// should bind to time and f64_field, and i64_Field should return NULL values
- assert_snapshot!(plan("SELECT time, f64_field, i64_Field FROM data").await);
+ assert_snapshot!(plan("SELECT time, f64_field, i64_Field FROM data"));
}
}
}
diff --git a/iox_query/src/plan/influxql/rewriter.rs b/iox_query/src/plan/influxql/rewriter.rs
index 335fccc676..f1072c1e0c 100644
--- a/iox_query/src/plan/influxql/rewriter.rs
+++ b/iox_query/src/plan/influxql/rewriter.rs
@@ -3,6 +3,7 @@
use crate::plan::influxql::expr_type_evaluator::evaluate_type;
use crate::plan::influxql::field::field_name;
use crate::plan::influxql::field_mapper::{field_and_dimensions, FieldTypeMap, TagSet};
+use crate::plan::influxql::{util, SchemaProvider};
use datafusion::common::{DataFusionError, Result};
use influxdb_influxql_parser::common::{MeasurementName, QualifiedMeasurementName};
use influxdb_influxql_parser::expression::walk::{walk_expr, walk_expr_mut};
@@ -13,23 +14,13 @@ use influxdb_influxql_parser::select::{
Dimension, Field, FieldList, FromMeasurementClause, GroupByClause, MeasurementSelection,
SelectStatement,
};
-use influxdb_influxql_parser::string::Regex;
use itertools::Itertools;
-use predicate::rpc_predicate::QueryNamespaceMeta;
-use query_functions::clean_non_meta_escapes;
use std::borrow::Borrow;
use std::collections::{HashMap, HashSet};
use std::ops::{ControlFlow, Deref};
-fn parse_regex(re: &Regex) -> Result<regex::Regex> {
- let pattern = clean_non_meta_escapes(re.as_str());
- regex::Regex::new(&pattern).map_err(|e| {
- DataFusionError::External(format!("invalid regular expression '{re}': {e}").into())
- })
-}
-
/// Recursively expand the `from` clause of `stmt` and any subqueries.
-fn rewrite_from(namespace: &dyn QueryNamespaceMeta, stmt: &mut SelectStatement) -> Result<()> {
+fn rewrite_from(s: &dyn SchemaProvider, stmt: &mut SelectStatement) -> Result<()> {
let mut new_from = Vec::new();
for ms in stmt.from.iter() {
match ms {
@@ -38,7 +29,7 @@ fn rewrite_from(namespace: &dyn QueryNamespaceMeta, stmt: &mut SelectStatement)
name: MeasurementName::Name(name),
..
} => {
- if namespace.table_schema(name).is_some() {
+ if s.table_exists(name) {
new_from.push(ms.clone())
}
}
@@ -46,11 +37,10 @@ fn rewrite_from(namespace: &dyn QueryNamespaceMeta, stmt: &mut SelectStatement)
name: MeasurementName::Regex(re),
..
} => {
- let re = parse_regex(re)?;
- namespace
- .table_names()
+ let re = util::parse_regex(re)?;
+ s.table_names()
.into_iter()
- .filter(|table| re.is_match(table.as_str()))
+ .filter(|table| re.is_match(table))
.for_each(|table| {
new_from.push(MeasurementSelection::Name(QualifiedMeasurementName {
database: None,
@@ -62,7 +52,7 @@ fn rewrite_from(namespace: &dyn QueryNamespaceMeta, stmt: &mut SelectStatement)
},
MeasurementSelection::Subquery(q) => {
let mut q = *q.clone();
- rewrite_from(namespace, &mut q)?;
+ rewrite_from(s, &mut q)?;
new_from.push(MeasurementSelection::Subquery(Box::new(q)))
}
}
@@ -73,7 +63,7 @@ fn rewrite_from(namespace: &dyn QueryNamespaceMeta, stmt: &mut SelectStatement)
/// Determine the merged fields and tags of the `FROM` clause.
fn from_field_and_dimensions(
- namespace: &dyn QueryNamespaceMeta,
+ s: &dyn SchemaProvider,
from: &FromMeasurementClause,
) -> Result<(FieldTypeMap, TagSet)> {
let mut fs = FieldTypeMap::new();
@@ -85,7 +75,7 @@ fn from_field_and_dimensions(
name: MeasurementName::Name(name),
..
}) => {
- let (field_set, tag_set) = match field_and_dimensions(namespace, name.as_str())? {
+ let (field_set, tag_set) = match field_and_dimensions(s, name.as_str())? {
Some(res) => res,
None => continue,
};
@@ -108,7 +98,7 @@ fn from_field_and_dimensions(
}
MeasurementSelection::Subquery(select) => {
for f in select.fields.iter() {
- let dt = match evaluate_type(namespace, &f.expr, &select.from)? {
+ let dt = match evaluate_type(s, &f.expr, &select.from)? {
Some(dt) => dt,
None => continue,
};
@@ -195,14 +185,11 @@ fn has_wildcards(stmt: &SelectStatement) -> (bool, bool) {
/// underlying schema.
///
/// Derived from [Go implementation](https://github.com/influxdata/influxql/blob/1ba470371ec093d57a726b143fe6ccbacf1b452b/ast.go#L1185).
-fn rewrite_field_list(
- namespace: &dyn QueryNamespaceMeta,
- stmt: &mut SelectStatement,
-) -> Result<()> {
+fn rewrite_field_list(s: &dyn SchemaProvider, stmt: &mut SelectStatement) -> Result<()> {
// Iterate through the `FROM` clause and rewrite any subqueries first.
for ms in stmt.from.iter_mut() {
if let MeasurementSelection::Subquery(subquery) = ms {
- rewrite_field_list(namespace, subquery)?;
+ rewrite_field_list(s, subquery)?;
}
}
@@ -211,7 +198,7 @@ fn rewrite_field_list(
if let ControlFlow::Break(e) = stmt.fields.iter_mut().try_for_each(|f| {
walk_expr_mut::<DataFusionError>(&mut f.expr, &mut |e| {
if matches!(e, Expr::VarRef { .. }) {
- let new_type = match evaluate_type(namespace, e.borrow(), &stmt.from) {
+ let new_type = match evaluate_type(s, e.borrow(), &stmt.from) {
Err(e) => ControlFlow::Break(e)?,
Ok(v) => v,
};
@@ -231,7 +218,7 @@ fn rewrite_field_list(
return Ok(());
}
- let (field_set, mut tag_set) = from_field_and_dimensions(namespace, &stmt.from)?;
+ let (field_set, mut tag_set) = from_field_and_dimensions(s, &stmt.from)?;
if !has_group_by_wildcard {
if let Some(group_by) = &stmt.group_by {
@@ -298,7 +285,7 @@ fn rewrite_field_list(
}
Expr::Literal(Literal::Regex(re)) => {
- let re = parse_regex(re)?;
+ let re = util::parse_regex(re)?;
fields
.iter()
.filter(|v| re.is_match(v.name.as_str()))
@@ -369,7 +356,7 @@ fn rewrite_field_list(
.for_each(add_field);
}
Some(Expr::Literal(Literal::Regex(re))) => {
- let re = parse_regex(re)?;
+ let re = util::parse_regex(re)?;
fields
.iter()
.filter(|v| {
@@ -434,7 +421,7 @@ fn rewrite_field_list(
group_by_tags.iter().for_each(add_dim);
}
Dimension::Regex(re) => {
- let re = parse_regex(re)?;
+ let re = util::parse_regex(re)?;
group_by_tags
.iter()
@@ -490,12 +477,12 @@ fn rewrite_field_list_aliases(field_list: &mut FieldList) -> Result<()> {
/// Recursively rewrite the specified [`SelectStatement`], expanding any wildcards or regular expressions
/// found in the projection list, `FROM` clause or `GROUP BY` clause.
pub(crate) fn rewrite_statement(
- namespace: &dyn QueryNamespaceMeta,
+ s: &dyn SchemaProvider,
q: &SelectStatement,
) -> Result<SelectStatement> {
let mut stmt = q.clone();
- rewrite_from(namespace, &mut stmt)?;
- rewrite_field_list(namespace, &mut stmt)?;
+ rewrite_from(s, &mut stmt)?;
+ rewrite_field_list(s, &mut stmt)?;
rewrite_field_list_aliases(&mut stmt.fields)?;
Ok(stmt)
@@ -504,12 +491,12 @@ pub(crate) fn rewrite_statement(
#[cfg(test)]
mod test {
use crate::plan::influxql::rewriter::{has_wildcards, rewrite_statement};
- use crate::plan::influxql::test_utils::{parse_select, MockNamespace};
+ use crate::plan::influxql::test_utils::{parse_select, MockSchemaProvider};
use test_helpers::assert_contains;
#[test]
fn test_rewrite_statement() {
- let namespace = MockNamespace::default();
+ let namespace = MockSchemaProvider::default();
// Exact, match
let stmt = parse_select("SELECT usage_user FROM cpu");
let stmt = rewrite_statement(&namespace, &stmt).unwrap();
diff --git a/iox_query/src/plan/influxql/test_utils.rs b/iox_query/src/plan/influxql/test_utils.rs
index 7ebbd77b32..f322dc799c 100644
--- a/iox_query/src/plan/influxql/test_utils.rs
+++ b/iox_query/src/plan/influxql/test_utils.rs
@@ -1,13 +1,19 @@
//! APIs for testing.
#![cfg(test)]
-use crate::test::TestChunk;
+use crate::plan::influxql::SchemaProvider;
+use crate::test::{TestChunk, TestDatabase};
use crate::QueryChunkMeta;
+use datafusion::common::DataFusionError;
+use datafusion::datasource::empty::EmptyTable;
+use datafusion::datasource::provider_as_source;
+use datafusion::logical_expr::TableSource;
use influxdb_influxql_parser::parse_statements;
use influxdb_influxql_parser::select::{Field, SelectStatement};
use influxdb_influxql_parser::statement::Statement;
use predicate::rpc_predicate::QueryNamespaceMeta;
use schema::Schema;
+use std::collections::HashMap;
use std::sync::Arc;
/// Returns the first `Field` of the `SELECT` statement.
@@ -142,27 +148,68 @@ pub(crate) mod database {
}
}
-pub(crate) struct MockNamespace {
+pub(crate) struct MockSchemaProvider {
chunks: Vec<Arc<TestChunk>>,
}
-impl Default for MockNamespace {
+impl Default for MockSchemaProvider {
fn default() -> Self {
let chunks = database::chunks();
Self { chunks }
}
}
-impl QueryNamespaceMeta for MockNamespace {
- fn table_names(&self) -> Vec<String> {
- self.chunks
- .iter()
- .map(|x| x.table_name().to_string())
- .collect()
+impl SchemaProvider for MockSchemaProvider {
+ fn get_table_provider(
+ &self,
+ _name: &str,
+ ) -> crate::exec::context::Result<Arc<dyn TableSource>> {
+ unimplemented!()
}
- fn table_schema(&self, table_name: &str) -> Option<Schema> {
- let c = self.chunks.iter().find(|x| x.table_name() == table_name)?;
+ fn table_names(&self) -> Vec<&'_ str> {
+ self.chunks.iter().map(|x| x.table_name()).collect()
+ }
+
+ fn table_schema(&self, name: &str) -> Option<Schema> {
+ let c = self.chunks.iter().find(|x| x.table_name() == name)?;
Some(c.schema().clone())
}
}
+
+pub(crate) struct TestDatabaseAdapter {
+ tables: HashMap<String, (Arc<dyn TableSource>, Schema)>,
+}
+
+impl SchemaProvider for TestDatabaseAdapter {
+ fn get_table_provider(&self, name: &str) -> crate::exec::context::Result<Arc<dyn TableSource>> {
+ self.tables
+ .get(name)
+ .map(|(t, _)| Arc::clone(t))
+ .ok_or_else(|| DataFusionError::Plan(format!("measurement does not exist: {name}")))
+ }
+
+ fn table_names(&self) -> Vec<&'_ str> {
+ self.tables.keys().map(|k| k.as_str()).collect::<Vec<_>>()
+ }
+
+ fn table_schema(&self, name: &str) -> Option<Schema> {
+ self.tables.get(name).map(|(_, s)| s.clone())
+ }
+}
+
+impl TestDatabaseAdapter {
+ pub(crate) fn new(db: &TestDatabase) -> Self {
+ let table_names = db.table_names();
+ let mut res = Self {
+ tables: HashMap::with_capacity(table_names.len()),
+ };
+ for table in table_names {
+ let schema = db.table_schema(&table).unwrap();
+ let s = Arc::new(EmptyTable::new(schema.as_arrow()));
+ res.tables.insert(table, (provider_as_source(s), schema));
+ }
+
+ res
+ }
+}
diff --git a/iox_query/src/plan/influxql/util.rs b/iox_query/src/plan/influxql/util.rs
index d27e44b547..d23e84f989 100644
--- a/iox_query/src/plan/influxql/util.rs
+++ b/iox_query/src/plan/influxql/util.rs
@@ -1,6 +1,8 @@
use datafusion::common::{DFSchema, DFSchemaRef, DataFusionError, Result};
use datafusion::logical_expr::Operator;
use influxdb_influxql_parser::expression::BinaryOperator;
+use influxdb_influxql_parser::string::Regex;
+use query_functions::clean_non_meta_escapes;
use schema::Schema;
use std::sync::Arc;
@@ -41,3 +43,11 @@ impl Schemas {
})
}
}
+
+/// Sanitize an InfluxQL regular expression and create a compiled [`regex::Regex`].
+pub fn parse_regex(re: &Regex) -> Result<regex::Regex> {
+ let pattern = clean_non_meta_escapes(re.as_str());
+ regex::Regex::new(&pattern).map_err(|e| {
+ DataFusionError::External(format!("invalid regular expression '{re}': {e}").into())
+ })
+}
diff --git a/service_common/src/planner.rs b/service_common/src/planner.rs
index 35041e0838..8044d4a308 100644
--- a/service_common/src/planner.rs
+++ b/service_common/src/planner.rs
@@ -50,7 +50,6 @@ impl Planner {
/// DataFusion physical execution plan.
pub async fn influxql(
&self,
- database: Arc<dyn QueryNamespace>,
query: impl Into<String> + Send,
) -> Result<Arc<dyn ExecutionPlan>> {
let planner = InfluxQLQueryPlanner::new();
@@ -58,7 +57,7 @@ impl Planner {
let ctx = self.ctx.child_ctx("planner influxql");
self.ctx
- .run(async move { planner.query(database, &query, &ctx).await })
+ .run(async move { planner.query(&query, &ctx).await })
.await
}
diff --git a/service_grpc_flight/src/lib.rs b/service_grpc_flight/src/lib.rs
index 17b525b43a..0d0e5a2079 100644
--- a/service_grpc_flight/src/lib.rs
+++ b/service_grpc_flight/src/lib.rs
@@ -374,7 +374,7 @@ where
RunQuery::InfluxQL(sql_query) => {
let token = db.record_query(&ctx, "influxql", Box::new(sql_query.clone()));
let plan = Planner::new(&ctx)
- .influxql(db, sql_query)
+ .influxql(sql_query)
.await
.context(PlanningSnafu)?;
(token, plan)
|
40f1937e63c11d04442a55621d2d716b8adae3d0
|
Dom Dwyer
|
2022-10-18 22:13:13
|
write buffer seeking tests
|
Asserts write buffer seeking behaviour, including:
* Seeking past already persisted data correctly
* Skipping to next available op in non-contiguous offset stream
* Skipping to next available op for dropped ops due to retention
* Panics when seeking beyond available data (into the future)
Removes a pair of tests that covered some of the above due to their
tight coupling with ingester internals.
| null |
test: write buffer seeking tests
Asserts write buffer seeking behaviour, including:
* Seeking past already persisted data correctly
* Skipping to next available op in non-contiguous offset stream
* Skipping to next available op for dropped ops due to retention
* Panics when seeking beyond available data (into the future)
Removes a pair of tests that covered some of the above due to their
tight coupling with ingester internals.
|
diff --git a/ingester/src/handler.rs b/ingester/src/handler.rs
index 27d378c09d..d5c2def7e9 100644
--- a/ingester/src/handler.rs
+++ b/ingester/src/handler.rs
@@ -445,7 +445,6 @@ mod tests {
use write_buffer::mock::{MockBufferForReading, MockBufferSharedState};
use super::*;
- use crate::data::{partition::SnapshotBatch, table::TableName};
#[tokio::test]
async fn test_shutdown() {
@@ -583,88 +582,6 @@ mod tests {
(ingester, shard, namespace)
}
- async fn verify_ingester_buffer_has_data(
- ingester: IngestHandlerImpl,
- shard: Shard,
- namespace: Namespace,
- custom_batch_verification: impl Fn(&SnapshotBatch) + Send,
- ) {
- // give the writes some time to go through the buffer. Exit once we've verified there's
- // data in there
- tokio::time::timeout(Duration::from_secs(1), async move {
- let ns_name = namespace.name.into();
- let table_name = TableName::from("cpu");
- loop {
- let mut has_measurement = false;
-
- if let Some(data) = ingester.data.shard(shard.id) {
- if let Some(data) = data.namespace(&ns_name) {
- // verify there's data in the buffer
- if let Some((b, _)) = data.snapshot(&table_name, &"1970-01-01".into()).await
- {
- if let Some(b) = b.first() {
- custom_batch_verification(b);
-
- if b.data.num_rows() == 1 {
- has_measurement = true;
- }
- }
- }
- }
- }
-
- if has_measurement {
- break;
- }
-
- tokio::time::sleep(Duration::from_millis(200)).await;
- }
- })
- .await
- .expect("timeout");
- }
-
- #[tokio::test]
- async fn seeks_on_initialization() {
- let ingest_ts1 = Time::from_timestamp_millis(42);
- let ingest_ts2 = Time::from_timestamp_millis(1337);
- let write_operations = vec![
- DmlWrite::new(
- "foo",
- lines_to_batches("cpu bar=2 20", 0).unwrap(),
- Some("1970-01-01".into()),
- DmlMeta::sequenced(
- Sequence::new(ShardIndex::new(0), SequenceNumber::new(1)),
- ingest_ts1,
- None,
- 150,
- ),
- ),
- DmlWrite::new(
- "foo",
- lines_to_batches("cpu bar=2 30", 0).unwrap(),
- Some("1970-01-01".into()),
- DmlMeta::sequenced(
- Sequence::new(ShardIndex::new(0), SequenceNumber::new(2)),
- ingest_ts2,
- None,
- 150,
- ),
- ),
- ];
-
- let (ingester, shard, namespace) = ingester_test_setup(write_operations, 2, false).await;
-
- verify_ingester_buffer_has_data(ingester, shard, namespace, |first_batch| {
- if first_batch.min_sequence_number == SequenceNumber::new(1) {
- panic!(
- "initialization did a seek to the beginning rather than the min_unpersisted"
- );
- }
- })
- .await;
- }
-
#[tokio::test]
#[should_panic(expected = "JoinError::Panic")]
async fn sequence_number_no_longer_exists() {
@@ -741,38 +658,6 @@ mod tests {
.unwrap();
}
- #[tokio::test]
- async fn skip_to_oldest_available() {
- maybe_start_logging();
-
- let ingest_ts1 = Time::from_timestamp_millis(42);
- let write_operations = vec![DmlWrite::new(
- "foo",
- lines_to_batches("cpu bar=2 20", 0).unwrap(),
- Some("1970-01-01".into()),
- DmlMeta::sequenced(
- Sequence::new(ShardIndex::new(0), SequenceNumber::new(10)),
- ingest_ts1,
- None,
- 150,
- ),
- )];
-
- // Set the min unpersisted to something bigger than the write's sequence number to
- // cause an UnknownSequenceNumber error. Skip to oldest available = true, so ingester
- // should find data
- let (ingester, shard, namespace) = ingester_test_setup(write_operations, 1, true).await;
-
- verify_ingester_buffer_has_data(ingester, shard, namespace, |first_batch| {
- assert_eq!(
- first_batch.min_sequence_number,
- SequenceNumber::new(10),
- "re-initialization didn't seek to the beginning",
- );
- })
- .await;
- }
-
#[tokio::test]
async fn limits_concurrent_queries() {
let (mut ingester, _, _) = ingester_test_setup(vec![], 0, true).await;
diff --git a/ingester/tests/write.rs b/ingester/tests/write.rs
index 243cb75ea2..cbb6dd2908 100644
--- a/ingester/tests/write.rs
+++ b/ingester/tests/write.rs
@@ -1,8 +1,9 @@
mod common;
use arrow_util::assert_batches_sorted_eq;
+use assert_matches::assert_matches;
pub use common::*;
-use data_types::PartitionKey;
+use data_types::{Partition, PartitionKey, SequenceNumber};
use generated_types::ingester::IngesterQueryRequest;
use iox_time::{SystemProvider, TimeProvider};
use metric::{DurationHistogram, U64Counter, U64Gauge};
@@ -117,3 +118,304 @@ async fn test_write_query() {
let now = SystemProvider::new().now();
assert!(metric < now.timestamp_nanos() as _);
}
+
+// Ensure an ingester correctly seeks to the offset stored in the catalog at
+// startup, skipping any empty offsets.
+#[tokio::test]
+async fn test_seek_on_init() {
+ let mut ctx = TestContext::new().await;
+
+ // Place some writes into the write buffer.
+
+ let partition_key = PartitionKey::from("1970-01-01");
+
+ ctx.ensure_namespace("test_namespace").await;
+ ctx.write_lp(
+ "test_namespace",
+ "bananas greatness=\"unbounded\" 10",
+ partition_key.clone(),
+ 0,
+ )
+ .await;
+
+ // A subsequent write with a non-contiguous sequence number to a different
+ // table.
+ //
+ // Resuming will be configured against an offset in the middle of the two
+ // ranges.
+ let w2 = ctx
+ .write_lp(
+ "test_namespace",
+ "bananas greatness=\"amazing\",platanos=42 20",
+ partition_key.clone(),
+ 7,
+ )
+ .await;
+
+ // Wait for the writes to be processed.
+ ctx.wait_for_readable(w2).await;
+
+ // Assert the data in memory.
+ let data = ctx
+ .query(IngesterQueryRequest {
+ namespace: "test_namespace".to_string(),
+ table: "bananas".to_string(),
+ columns: vec![],
+ predicate: None,
+ })
+ .await
+ .expect("query should succeed")
+ .into_record_batches()
+ .await;
+
+ let expected = vec![
+ "+-----------+----------+--------------------------------+",
+ "| greatness | platanos | time |",
+ "+-----------+----------+--------------------------------+",
+ "| amazing | 42 | 1970-01-01T00:00:00.000000020Z |",
+ "| unbounded | | 1970-01-01T00:00:00.000000010Z |",
+ "+-----------+----------+--------------------------------+",
+ ];
+ assert_batches_sorted_eq!(&expected, &data);
+
+ // Update the catalog state, causing the next boot of the ingester to seek
+ // past the first write, but before the second write.
+ ctx.catalog()
+ .repositories()
+ .await
+ .shards()
+ .update_min_unpersisted_sequence_number(ctx.shard_id(), SequenceNumber::new(3))
+ .await
+ .expect("failed to update persisted marker");
+
+ // Restart the ingester.
+ ctx.restart().await;
+
+ // Wait for the second write to become readable again.
+ ctx.wait_for_readable(w2).await;
+
+ // Assert the data in memory now contains only w2.
+ let data = ctx
+ .query(IngesterQueryRequest {
+ namespace: "test_namespace".to_string(),
+ table: "bananas".to_string(),
+ columns: vec![],
+ predicate: None,
+ })
+ .await
+ .expect("query should succeed")
+ .into_record_batches()
+ .await;
+
+ let expected = vec![
+ "+-----------+----------+--------------------------------+",
+ "| greatness | platanos | time |",
+ "+-----------+----------+--------------------------------+",
+ "| amazing | 42 | 1970-01-01T00:00:00.000000020Z |",
+ "+-----------+----------+--------------------------------+",
+ ];
+ assert_batches_sorted_eq!(&expected, &data);
+}
+
+// Ensure an ingester respects the per-partition persist watermark, skipping
+// already applied ops.
+#[tokio::test]
+async fn test_skip_previously_applied_partition_ops() {
+ let mut ctx = TestContext::new().await;
+
+ // Place some writes into the write buffer.
+ let ns = ctx.ensure_namespace("test_namespace").await;
+ let partition_key = PartitionKey::from("1970-01-01");
+ ctx.write_lp(
+ "test_namespace",
+ "bananas greatness=\"unbounded\" 10",
+ partition_key.clone(),
+ 5,
+ )
+ .await;
+ let w2 = ctx
+ .write_lp(
+ "test_namespace",
+ "bananas greatness=\"amazing\",platanos=42 20",
+ partition_key.clone(),
+ 10,
+ )
+ .await;
+
+ // Wait for the writes to be processed.
+ ctx.wait_for_readable(w2).await;
+
+ // Assert the data in memory.
+ let data = ctx
+ .query(IngesterQueryRequest {
+ namespace: "test_namespace".to_string(),
+ table: "bananas".to_string(),
+ columns: vec![],
+ predicate: None,
+ })
+ .await
+ .expect("query should succeed")
+ .into_record_batches()
+ .await;
+
+ let expected = vec![
+ "+-----------+----------+--------------------------------+",
+ "| greatness | platanos | time |",
+ "+-----------+----------+--------------------------------+",
+ "| amazing | 42 | 1970-01-01T00:00:00.000000020Z |",
+ "| unbounded | | 1970-01-01T00:00:00.000000010Z |",
+ "+-----------+----------+--------------------------------+",
+ ];
+ assert_batches_sorted_eq!(&expected, &data);
+
+ // Read the partition ID of the writes above.
+ let partitions = ctx
+ .catalog()
+ .repositories()
+ .await
+ .partitions()
+ .list_by_namespace(ns.id)
+ .await
+ .unwrap();
+ assert_matches!(&*partitions, &[Partition { .. }]);
+
+ // And set the per-partition persist marker after the first write, but
+ // before the second.
+ ctx.catalog()
+ .repositories()
+ .await
+ .partitions()
+ .update_persisted_sequence_number(partitions[0].id, SequenceNumber::new(6))
+ .await
+ .expect("failed to update persisted marker");
+
+ // Restart the ingester, which shall seek to the shard offset of 0, and
+ // begin replaying ops.
+ ctx.restart().await;
+
+ // Wait for the second write to become readable again.
+ ctx.wait_for_readable(w2).await;
+
+ // Assert the partition replay skipped the first write.
+ let data = ctx
+ .query(IngesterQueryRequest {
+ namespace: "test_namespace".to_string(),
+ table: "bananas".to_string(),
+ columns: vec![],
+ predicate: None,
+ })
+ .await
+ .expect("query should succeed")
+ .into_record_batches()
+ .await;
+
+ let expected = vec![
+ "+-----------+----------+--------------------------------+",
+ "| greatness | platanos | time |",
+ "+-----------+----------+--------------------------------+",
+ "| amazing | 42 | 1970-01-01T00:00:00.000000020Z |",
+ "+-----------+----------+--------------------------------+",
+ ];
+ assert_batches_sorted_eq!(&expected, &data);
+}
+
+// Ensure a seek beyond the actual data available (i.e. into the future) causes
+// a panic to bring about a human response.
+#[tokio::test]
+#[should_panic = "attempted to seek to offset 42, but current high watermark for partition 0 is 0"]
+async fn test_seek_beyond_available_data() {
+ let mut ctx = TestContext::new().await;
+
+ // Place a write into the write buffer so it is not empty.
+ ctx.ensure_namespace("test_namespace").await;
+ ctx.write_lp(
+ "test_namespace",
+ "bananas greatness=\"unbounded\" 10",
+ PartitionKey::from("1970-01-01"),
+ 0,
+ )
+ .await;
+
+ // Update the catalog state, causing the next boot of the ingester to seek
+ // past the write, beyond valid data offsets.
+ ctx.catalog()
+ .repositories()
+ .await
+ .shards()
+ .update_min_unpersisted_sequence_number(ctx.shard_id(), SequenceNumber::new(42))
+ .await
+ .expect("failed to update persisted marker");
+
+ // Restart the ingester.
+ ctx.restart().await;
+}
+
+// Ensure an ingester configured to resume from offset 1 correctly seeks to the
+// oldest available data when that offset no longer exists.
+#[tokio::test]
+async fn test_seek_dropped_offset() {
+ let mut ctx = TestContext::new().await;
+
+ // Place a write into the write buffer so it is not empty.
+ ctx.ensure_namespace("test_namespace").await;
+
+ // A write at offset 42
+ let w1 = ctx
+ .write_lp(
+ "test_namespace",
+ "bananas greatness=\"unbounded\" 10",
+ PartitionKey::from("1970-01-01"),
+ 42,
+ )
+ .await;
+
+ // Configure the ingester to seek to offset 1, which does not exist.
+ ctx.catalog()
+ .repositories()
+ .await
+ .shards()
+ .update_min_unpersisted_sequence_number(ctx.shard_id(), SequenceNumber::new(1))
+ .await
+ .expect("failed to update persisted marker");
+
+ // Restart the ingester.
+ ctx.restart().await;
+
+ // Wait for the op to be applied
+ ctx.wait_for_readable(w1).await;
+
+ // Assert the data in memory now contains only w2.
+ let data = ctx
+ .query(IngesterQueryRequest {
+ namespace: "test_namespace".to_string(),
+ table: "bananas".to_string(),
+ columns: vec![],
+ predicate: None,
+ })
+ .await
+ .expect("query should succeed")
+ .into_record_batches()
+ .await;
+
+ let expected = vec![
+ "+-----------+--------------------------------+",
+ "| greatness | time |",
+ "+-----------+--------------------------------+",
+ "| unbounded | 1970-01-01T00:00:00.000000010Z |",
+ "+-----------+--------------------------------+",
+ ];
+ assert_batches_sorted_eq!(&expected, &data);
+
+ // Ensure the metric was set to cause an alert for potential data loss.
+ let metric = ctx
+ .get_metric::<U64Counter, _>(
+ "shard_reset_count",
+ &[
+ ("kafka_topic", TEST_TOPIC_NAME),
+ ("kafka_partition", "0"),
+ ("potential_data_loss", "true"),
+ ],
+ )
+ .fetch();
+ assert!(metric > 0);
+}
|
5aee3767666be6bddd782a7e61884bbca6a00613
|
Dom Dwyer
|
2023-08-24 22:41:02
|
initialise gossip subsystem
|
Optionally initialise the gossip subsystem in the compactor.
This will cause the compactor to perform PEX and join the cluster, but
as it registers no topic interests, it will not receive any
application-level payloads.
No messages are currently sent (in fact, gossip shuts down immediately).
| null |
feat(compactor): initialise gossip subsystem
Optionally initialise the gossip subsystem in the compactor.
This will cause the compactor to perform PEX and join the cluster, but
as it registers no topic interests, it will not receive any
application-level payloads.
No messages are currently sent (in fact, gossip shuts down immediately).
|
diff --git a/Cargo.lock b/Cargo.lock
index f32997849d..fcfedf2567 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -985,6 +985,9 @@ dependencies = [
"data_types",
"datafusion",
"futures",
+ "generated_types",
+ "gossip",
+ "gossip_compaction",
"insta",
"iox_catalog",
"iox_query",
diff --git a/clap_blocks/src/compactor.rs b/clap_blocks/src/compactor.rs
index d634f8adbc..55ab6c15d8 100644
--- a/clap_blocks/src/compactor.rs
+++ b/clap_blocks/src/compactor.rs
@@ -2,13 +2,17 @@
use std::num::NonZeroUsize;
-use crate::memory_size::MemorySize;
+use crate::{gossip::GossipConfig, memory_size::MemorySize};
use super::compactor_scheduler::CompactorSchedulerConfig;
/// CLI config for compactor
#[derive(Debug, Clone, clap::Parser)]
pub struct CompactorConfig {
+ /// Gossip config.
+ #[clap(flatten)]
+ pub gossip_config: GossipConfig,
+
/// Configuration for the compactor scheduler
#[clap(flatten)]
pub compactor_scheduler_config: CompactorSchedulerConfig,
diff --git a/compactor/Cargo.toml b/compactor/Cargo.toml
index 938c578d51..bb985aedad 100644
--- a/compactor/Cargo.toml
+++ b/compactor/Cargo.toml
@@ -14,6 +14,9 @@ compactor_scheduler = { path = "../compactor_scheduler" }
data_types = { path = "../data_types" }
datafusion = { workspace = true }
futures = "0.3"
+generated_types = { version = "0.1.0", path = "../generated_types" }
+gossip = { version = "0.1.0", path = "../gossip" }
+gossip_compaction = { version = "0.1.0", path = "../gossip_compaction" }
iox_catalog = { path = "../iox_catalog" }
iox_query = { path = "../iox_query" }
iox_time = { path = "../iox_time" }
diff --git a/compactor/src/compactor.rs b/compactor/src/compactor.rs
index be5f4c9de2..72063e7a4b 100644
--- a/compactor/src/compactor.rs
+++ b/compactor/src/compactor.rs
@@ -5,6 +5,8 @@ use futures::{
future::{BoxFuture, Shared},
FutureExt, TryFutureExt,
};
+use generated_types::influxdata::iox::gossip::{v1::CompactionEvent, Topic};
+use gossip::{NopDispatcher, TopicInterests};
use observability_deps::tracing::{info, warn};
use tokio::task::{JoinError, JoinHandle};
use tokio_util::sync::CancellationToken;
@@ -36,7 +38,7 @@ pub struct Compactor {
impl Compactor {
/// Start compactor.
- pub fn start(config: Config) -> Self {
+ pub async fn start(config: Config) -> Self {
info!("compactor starting");
log_config(&config);
@@ -52,6 +54,30 @@ impl Compactor {
));
let df_semaphore = Arc::new(semaphore_metrics.new_semaphore(config.df_concurrency.get()));
+ // Initialise the gossip subsystem, if configured.
+ let _gossip = match config.gossip_bind_address {
+ Some(bind) => {
+ // Initialise the gossip subsystem.
+ let handle = gossip::Builder::<_, Topic>::new(
+ config.gossip_seeds,
+ NopDispatcher::default(),
+ Arc::clone(&config.metric_registry),
+ )
+ // Configure the compactor to subscribe to no topics - it
+ // currently only sends events.
+ .with_topic_filter(TopicInterests::default())
+ .bind(bind)
+ .await
+ .expect("failed to start gossip reactor");
+
+ let event_tx =
+ gossip_compaction::tx::CompactionEventTx::<CompactionEvent>::new(handle);
+
+ Some(event_tx)
+ }
+ None => None,
+ };
+
let worker = tokio::spawn(async move {
tokio::select! {
_ = shutdown_captured.cancelled() => {}
diff --git a/compactor/src/components/report.rs b/compactor/src/components/report.rs
index 1452bf088a..e5709b3968 100644
--- a/compactor/src/components/report.rs
+++ b/compactor/src/components/report.rs
@@ -38,6 +38,8 @@ pub fn log_config(config: &Config) {
max_num_columns_per_table,
max_num_files_per_plan,
max_partition_fetch_queries_per_second,
+ gossip_bind_address,
+ gossip_seeds,
} = &config;
let parquet_files_sink_override = parquet_files_sink_override
@@ -70,6 +72,8 @@ pub fn log_config(config: &Config) {
max_num_columns_per_table,
max_num_files_per_plan,
max_partition_fetch_queries_per_second,
+ ?gossip_bind_address,
+ ?gossip_seeds,
"config",
);
}
diff --git a/compactor/src/config.rs b/compactor/src/config.rs
index b520e09a97..b8ceb86639 100644
--- a/compactor/src/config.rs
+++ b/compactor/src/config.rs
@@ -1,5 +1,5 @@
//! Config-related stuff.
-use std::{num::NonZeroUsize, sync::Arc, time::Duration};
+use std::{net::SocketAddr, num::NonZeroUsize, sync::Arc, time::Duration};
use backoff::BackoffConfig;
use compactor_scheduler::SchedulerConfig;
@@ -135,6 +135,14 @@ pub struct Config {
///
/// Queries are smoothed over the full second.
pub max_partition_fetch_queries_per_second: Option<usize>,
+
+ /// The optional bind address for node to use for gossip communication.
+ pub gossip_bind_address: Option<SocketAddr>,
+
+ /// Gossip seed addresses.
+ ///
+ /// Only used if `gossip_bind_address` is `Some`.
+ pub gossip_seeds: Vec<String>,
}
impl Config {
diff --git a/compactor_test_utils/src/lib.rs b/compactor_test_utils/src/lib.rs
index df03231d2b..efa5da2cf0 100644
--- a/compactor_test_utils/src/lib.rs
+++ b/compactor_test_utils/src/lib.rs
@@ -169,6 +169,8 @@ impl TestSetupBuilder<false> {
max_num_columns_per_table: 200,
max_num_files_per_plan: 200,
max_partition_fetch_queries_per_second: None,
+ gossip_bind_address: None,
+ gossip_seeds: vec![],
};
let bytes_written = Arc::new(AtomicUsize::new(0));
diff --git a/ioxd_compactor/src/lib.rs b/ioxd_compactor/src/lib.rs
index f6d9fb2e9c..337e4d50c7 100644
--- a/ioxd_compactor/src/lib.rs
+++ b/ioxd_compactor/src/lib.rs
@@ -189,7 +189,10 @@ pub async fn create_compactor_server_type(
max_num_files_per_plan: compactor_config.max_num_files_per_plan,
max_partition_fetch_queries_per_second: compactor_config
.max_partition_fetch_queries_per_second,
- });
+ gossip_seeds: compactor_config.gossip_config.seed_list,
+ gossip_bind_address: compactor_config.gossip_config.gossip_bind_address,
+ })
+ .await;
Arc::new(CompactorServerType::new(
compactor,
|
71ffc92559a74a8d3561cd4e90f756980ae658a0
|
Marco Neumann
|
2022-11-18 09:56:11
|
only push safe select expression through de-dup (#6156)
|
* fix: only push safe select expression through de-dup
Fixes #6066.
* docs: improve
Co-authored-by: Andrew Lamb <[email protected]>
* fix: rebase
* test: ensure we do not split ORs
|
Co-authored-by: Andrew Lamb <[email protected]>
|
fix: only push safe select expression through de-dup (#6156)
* fix: only push safe select expression through de-dup
Fixes #6066.
* docs: improve
Co-authored-by: Andrew Lamb <[email protected]>
* fix: rebase
* test: ensure we do not split ORs
Co-authored-by: Andrew Lamb <[email protected]>
|
diff --git a/iox_query/src/provider.rs b/iox_query/src/provider.rs
index 9b2e6341c6..7f57af82f7 100644
--- a/iox_query/src/provider.rs
+++ b/iox_query/src/provider.rs
@@ -519,7 +519,7 @@ impl Deduplicater {
table_name: Arc<str>,
output_schema: Arc<Schema>,
chunks: Vec<Arc<dyn QueryChunk>>,
- predicate: Predicate,
+ mut predicate: Predicate,
output_sort_key: Option<SortKey>,
) -> Result<Arc<dyn ExecutionPlan>> {
// find overlapped chunks and put them into the right group
@@ -532,6 +532,14 @@ impl Deduplicater {
// the chunks have neither overlaps nor duplicates
if !self.deduplication {
debug!(%table_name, "Deduplication is disable. Build only one scan node for all of them.");
+
+ // If we do NOT run de-dup, then we also cannot apply all predicates because we have to assume that the
+ // output of this plan is later fed through de-dup. Currently (2022-11-16) this does not matter because
+ // selection is not used within the ingester, but it will become an issue once this is hooked up.
+ predicate = predicate.push_through_dedup(&Self::compute_chunks_schema(
+ &chunks,
+ &mut self.schema_interner,
+ ));
}
if chunks.no_duplicates() {
debug!(%table_name, "All chunks neither overlap nor duplicate. Build only one scan node for all of them.");
@@ -824,6 +832,9 @@ impl Deduplicater {
// Note that we may need to sort/deduplicate based on tag
// columns which do not appear in the output
+ let predicate =
+ predicate.push_through_dedup(&Self::compute_chunks_schema(&chunks, schema_interner));
+
// We need to sort chunks before creating the execution plan. For that, the chunk order is used. Since the order
// only sorts overlapping chunks, we also use the chunk ID for deterministic outputs.
let chunks = {
@@ -910,6 +921,11 @@ impl Deduplicater {
output_sort_key: &SortKey,
schema_interner: &mut SchemaInterner,
) -> Result<Arc<dyn ExecutionPlan>> {
+ // This will practically never matter because this can only happen for in-memory chunks which are currently
+ // backed by RecordBatches and these don't do anything with the predicate at all. However to prevent weird
+ // future issues, we still transform the predicate here. (@crepererum, 2022-11-16)
+ let predicate = predicate.push_through_dedup(&chunk.schema());
+
let pk_schema = Self::compute_pk_schema(&[Arc::clone(&chunk)], schema_interner);
let input_schema = Self::compute_input_schema(&output_schema, &pk_schema, schema_interner);
@@ -1323,6 +1339,19 @@ impl Deduplicater {
schema_merger.build()
}
+ // Compute schema for all chunks
+ fn compute_chunks_schema<'a>(
+ chunks: impl IntoIterator<Item = &'a Arc<dyn QueryChunk>>,
+ schema_interner: &mut SchemaInterner,
+ ) -> Arc<Schema> {
+ let mut schema_merger = SchemaMerger::new().with_interner(schema_interner);
+ for chunk in chunks {
+ schema_merger = schema_merger.merge(&chunk.schema()).unwrap();
+ }
+
+ schema_merger.build()
+ }
+
/// Find columns required to read from each scan: the output columns + the
/// primary key columns
fn compute_input_schema(
diff --git a/predicate/src/lib.rs b/predicate/src/lib.rs
index 99fff2c386..1bf4b8cb2a 100644
--- a/predicate/src/lib.rs
+++ b/predicate/src/lib.rs
@@ -23,7 +23,12 @@ use arrow::{
use data_types::{InfluxDbType, TableSummary, TimestampRange};
use datafusion::{
error::DataFusionError,
- logical_expr::{binary_expr, utils::expr_to_columns, BinaryExpr, Operator},
+ logical_expr::{
+ binary_expr,
+ expr_visitor::{ExprVisitable, ExpressionVisitor, Recursion},
+ utils::expr_to_columns,
+ BinaryExpr, Operator,
+ },
optimizer::utils::split_conjunction,
physical_optimizer::pruning::{PruningPredicate, PruningStatistics},
prelude::{col, lit_timestamp_nano, Expr},
@@ -484,6 +489,54 @@ impl Predicate {
self
}
+ /// Remove any clauses of this predicate that can not be run before deduplication.
+ ///
+ /// See <https://github.com/influxdata/influxdb_iox/issues/6066> for more details.
+ ///
+ /// Only expressions that are row-based and refer to primary key columns (and constants)
+ /// can be evaluated prior to deduplication.
+ ///
+ /// If a predicate can filter out some but not all of the rows with
+ /// the same primary key, it may filter out the row that should have been updated
+ /// allowing the original through, producing incorrect results.
+ ///
+ /// Any predicate that operates solely on primary key columns will either pass or filter
+ /// all rows with that primary key and thus is safe to push through.
+ pub fn push_through_dedup(self, schema: &schema::Schema) -> Self {
+ let pk: HashSet<_> = schema.primary_key().into_iter().collect();
+
+ let exprs = self
+ .exprs
+ .iter()
+ .flat_map(split_conjunction)
+ .filter(|expr| {
+ let mut columns = HashSet::default();
+ if expr_to_columns(expr, &mut columns).is_err() {
+ // bail out, do NOT include this weird expression
+ return false;
+ }
+
+ // check if all columns are part of the primary key
+ if !columns.into_iter().all(|c| pk.contains(c.name.as_str())) {
+ return false;
+ }
+
+ expr.accept(RowBasedVisitor::default())
+ .expect("never fails")
+ .row_based
+ })
+ .cloned()
+ .collect();
+
+ Self {
+ // can always push time range through de-dup because it is a primary keys set operation
+ range: self.range,
+ exprs,
+ field_columns: None,
+ value_expr: vec![],
+ }
+ }
+
/// Adds only the expressions from `filters` that can be pushed down to
/// execution engines.
pub fn with_pushdown_exprs(mut self, filters: &[Expr]) -> Self {
@@ -587,12 +640,69 @@ impl From<ValueExpr> for Expr {
}
}
+/// Recursively walk an expression tree, checking if the expression is row-based.
+struct RowBasedVisitor {
+ row_based: bool,
+}
+
+impl Default for RowBasedVisitor {
+ fn default() -> Self {
+ Self { row_based: true }
+ }
+}
+
+impl ExpressionVisitor for RowBasedVisitor {
+ fn pre_visit(mut self, expr: &Expr) -> Result<Recursion<Self>, DataFusionError> {
+ match expr {
+ Expr::Alias(_, _)
+ | Expr::Between { .. }
+ | Expr::BinaryExpr { .. }
+ | Expr::Case { .. }
+ | Expr::Cast { .. }
+ | Expr::Column(_)
+ | Expr::Exists { .. }
+ | Expr::GetIndexedField { .. }
+ | Expr::ILike { .. }
+ | Expr::InList { .. }
+ | Expr::InSubquery { .. }
+ | Expr::IsFalse(_)
+ | Expr::IsNotFalse(_)
+ | Expr::IsNotNull(_)
+ | Expr::IsNotTrue(_)
+ | Expr::IsNotUnknown(_)
+ | Expr::IsNull(_)
+ | Expr::IsTrue(_)
+ | Expr::IsUnknown(_)
+ | Expr::Like { .. }
+ | Expr::Literal(_)
+ | Expr::Negative(_)
+ | Expr::Not(_)
+ | Expr::QualifiedWildcard { .. }
+ | Expr::ScalarFunction { .. }
+ | Expr::ScalarSubquery(_)
+ | Expr::ScalarUDF { .. }
+ | Expr::ScalarVariable(_, _)
+ | Expr::SimilarTo { .. }
+ | Expr::Sort { .. }
+ | Expr::TryCast { .. }
+ | Expr::Wildcard => Ok(Recursion::Continue(self)),
+ Expr::AggregateFunction { .. }
+ | Expr::AggregateUDF { .. }
+ | Expr::GroupingSet(_)
+ | Expr::WindowFunction { .. } => {
+ self.row_based = false;
+ Ok(Recursion::Stop(self))
+ }
+ }
+ }
+}
+
#[cfg(test)]
mod tests {
use super::*;
use arrow::datatypes::DataType as ArrowDataType;
use data_types::{ColumnSummary, InfluxDbType, StatValues, MAX_NANO_TIME, MIN_NANO_TIME};
- use datafusion::prelude::{col, lit};
+ use datafusion::prelude::{col, cube, lit};
use schema::builder::SchemaBuilder;
use test_helpers::maybe_start_logging;
@@ -934,4 +1044,120 @@ mod tests {
PredicateMatch::Zero,
);
}
+
+ #[test]
+ fn test_push_through_dedup() {
+ let schema = SchemaBuilder::default()
+ .tag("tag1")
+ .tag("tag2")
+ .field("field1", ArrowDataType::Float64)
+ .unwrap()
+ .field("field2", ArrowDataType::Float64)
+ .unwrap()
+ .timestamp()
+ .build()
+ .unwrap();
+
+ // no-op predicate
+ assert_eq!(
+ Predicate {
+ field_columns: None,
+ range: None,
+ exprs: vec![],
+ value_expr: vec![],
+ }
+ .push_through_dedup(&schema),
+ Predicate {
+ field_columns: None,
+ range: None,
+ exprs: vec![],
+ value_expr: vec![],
+ },
+ );
+
+ // simple case
+ assert_eq!(
+ Predicate {
+ field_columns: Some(BTreeSet::from([
+ String::from("tag1"),
+ String::from("field1"),
+ String::from("time"),
+ ])),
+ range: Some(TimestampRange::new(42, 1337)),
+ exprs: vec![
+ col("tag1").eq(lit("foo")),
+ col("field1").eq(lit(1.0)), // filtered out
+ col("time").eq(lit(1)),
+ ],
+ value_expr: vec![ValueExpr::try_from(col("_value").eq(lit(1.0))).unwrap()],
+ }
+ .push_through_dedup(&schema),
+ Predicate {
+ field_columns: None,
+ range: Some(TimestampRange::new(42, 1337)),
+ exprs: vec![col("tag1").eq(lit("foo")), col("time").eq(lit(1)),],
+ value_expr: vec![],
+ },
+ );
+
+ // disassemble AND
+ assert_eq!(
+ Predicate {
+ field_columns: None,
+ range: None,
+ exprs: vec![col("tag1")
+ .eq(lit("foo"))
+ .and(col("field1").eq(lit(1.0)))
+ .and(col("time").eq(lit(1))),],
+ value_expr: vec![],
+ }
+ .push_through_dedup(&schema),
+ Predicate {
+ field_columns: None,
+ range: None,
+ exprs: vec![col("tag1").eq(lit("foo")), col("time").eq(lit(1)),],
+ value_expr: vec![],
+ },
+ );
+
+ // filter no-row operations
+ assert_eq!(
+ Predicate {
+ field_columns: None,
+ range: None,
+ exprs: vec![
+ col("tag1").eq(lit("foo")),
+ cube(vec![col("time").eq(lit(1))]),
+ ],
+ value_expr: vec![],
+ }
+ .push_through_dedup(&schema),
+ Predicate {
+ field_columns: None,
+ range: None,
+ exprs: vec![col("tag1").eq(lit("foo"))],
+ value_expr: vec![],
+ },
+ );
+
+ // do NOT disassemble OR
+ assert_eq!(
+ Predicate {
+ field_columns: None,
+ range: None,
+ exprs: vec![col("tag1")
+ .eq(lit("foo"))
+ .or(col("field1").eq(lit(1.0)))
+ .or(col("time").eq(lit(1))),],
+ value_expr: vec![],
+ }
+ .push_through_dedup(&schema),
+ Predicate {
+ field_columns: None,
+ range: None,
+ exprs: vec![],
+ value_expr: vec![],
+ },
+ );
+ }
}
diff --git a/query_tests/cases/in/dedup_and_predicates_parquet.expected b/query_tests/cases/in/dedup_and_predicates_parquet.expected
new file mode 100644
index 0000000000..58684b8cea
--- /dev/null
+++ b/query_tests/cases/in/dedup_and_predicates_parquet.expected
@@ -0,0 +1,131 @@
+-- Test Setup: TwoChunksDedupWeirdnessParquet
+-- SQL: SELECT * FROM "table" ORDER BY tag;
++-----+-----+-----+----------------------+
+| bar | foo | tag | time |
++-----+-----+-----+----------------------+
+| 2 | 1 | A | 1970-01-01T00:00:00Z |
+| | 1 | B | 1970-01-01T00:00:00Z |
++-----+-----+-----+----------------------+
+-- SQL: EXPLAIN SELECT * FROM "table" ORDER BY tag;
+-- Results After Normalizing UUIDs
++---------------+--------------------------------------------------------------------------------------------------------------------------------------------+
+| plan_type | plan |
++---------------+--------------------------------------------------------------------------------------------------------------------------------------------+
+| logical_plan | Sort: table.tag ASC NULLS LAST |
+| | Projection: table.bar, table.foo, table.tag, table.time |
+| | TableScan: table projection=[bar, foo, tag, time] |
+| physical_plan | SortExec: [tag@2 ASC NULLS LAST] |
+| | CoalescePartitionsExec |
+| | ProjectionExec: expr=[bar@0 as bar, foo@1 as foo, tag@2 as tag, time@3 as time] |
+| | RepartitionExec: partitioning=RoundRobinBatch(4) |
+| | DeduplicateExec: [tag@2 ASC,time@3 ASC] |
+| | SortPreservingMergeExec: [tag@2 ASC,time@3 ASC] |
+| | UnionExec |
+| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], projection=[bar, foo, tag, time] |
+| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000001.parquet], projection=[bar, foo, tag, time] |
+| | |
++---------------+--------------------------------------------------------------------------------------------------------------------------------------------+
+-- SQL: SELECT * FROM "table" WHERE tag='A';
++-----+-----+-----+----------------------+
+| bar | foo | tag | time |
++-----+-----+-----+----------------------+
+| 2 | 1 | A | 1970-01-01T00:00:00Z |
++-----+-----+-----+----------------------+
+-- SQL: EXPLAIN SELECT * FROM "table" WHERE tag='A';
+-- Results After Normalizing UUIDs
++---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| plan_type | plan |
++---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| logical_plan | Projection: table.bar, table.foo, table.tag, table.time |
+| | Filter: table.tag = Dictionary(Int32, Utf8("A")) |
+| | TableScan: table projection=[bar, foo, tag, time], partial_filters=[table.tag = Dictionary(Int32, Utf8("A"))] |
+| physical_plan | ProjectionExec: expr=[bar@0 as bar, foo@1 as foo, tag@2 as tag, time@3 as time] |
+| | CoalesceBatchesExec: target_batch_size=4096 |
+| | FilterExec: tag@2 = A |
+| | RepartitionExec: partitioning=RoundRobinBatch(4) |
+| | DeduplicateExec: [tag@2 ASC,time@3 ASC] |
+| | SortPreservingMergeExec: [tag@2 ASC,time@3 ASC] |
+| | UnionExec |
+| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], predicate=tag_min@0 <= A AND A <= tag_max@1, projection=[bar, foo, tag, time] |
+| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000001.parquet], predicate=tag_min@0 <= A AND A <= tag_max@1, projection=[bar, foo, tag, time] |
+| | |
++---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+-- SQL: SELECT * FROM "table" WHERE foo=1 AND bar=2;
++-----+-----+-----+----------------------+
+| bar | foo | tag | time |
++-----+-----+-----+----------------------+
+| 2 | 1 | A | 1970-01-01T00:00:00Z |
++-----+-----+-----+----------------------+
+-- SQL: EXPLAIN SELECT * FROM "table" WHERE foo=1 AND bar=2;
+-- Results After Normalizing UUIDs
++---------------+--------------------------------------------------------------------------------------------------------------------------------------------+
+| plan_type | plan |
++---------------+--------------------------------------------------------------------------------------------------------------------------------------------+
+| logical_plan | Projection: table.bar, table.foo, table.tag, table.time |
+| | Filter: table.foo = Float64(1) AND table.bar = Float64(2) |
+| | TableScan: table projection=[bar, foo, tag, time], partial_filters=[table.foo = Float64(1), table.bar = Float64(2)] |
+| physical_plan | ProjectionExec: expr=[bar@0 as bar, foo@1 as foo, tag@2 as tag, time@3 as time] |
+| | CoalesceBatchesExec: target_batch_size=4096 |
+| | FilterExec: foo@1 = 1 AND bar@0 = 2 |
+| | RepartitionExec: partitioning=RoundRobinBatch(4) |
+| | DeduplicateExec: [tag@2 ASC,time@3 ASC] |
+| | SortPreservingMergeExec: [tag@2 ASC,time@3 ASC] |
+| | UnionExec |
+| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], projection=[bar, foo, tag, time] |
+| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000001.parquet], projection=[bar, foo, tag, time] |
+| | |
++---------------+--------------------------------------------------------------------------------------------------------------------------------------------+
+-- SQL: SELECT * FROM "table" WHERE time=to_timestamp('1970-01-01T00:00:00.000000000+00:00') ORDER BY tag;
++-----+-----+-----+----------------------+
+| bar | foo | tag | time |
++-----+-----+-----+----------------------+
+| 2 | 1 | A | 1970-01-01T00:00:00Z |
+| | 1 | B | 1970-01-01T00:00:00Z |
++-----+-----+-----+----------------------+
+-- SQL: EXPLAIN SELECT * FROM "table" WHERE time=to_timestamp('1970-01-01T00:00:00.000000000+00:00') ORDER BY tag;
+-- Results After Normalizing UUIDs
++---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| plan_type | plan |
++---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| logical_plan | Sort: table.tag ASC NULLS LAST |
+| | Projection: table.bar, table.foo, table.tag, table.time |
+| | Filter: table.time = TimestampNanosecond(0, None) |
+| | TableScan: table projection=[bar, foo, tag, time], partial_filters=[table.time = TimestampNanosecond(0, None)] |
+| physical_plan | SortExec: [tag@2 ASC NULLS LAST] |
+| | CoalescePartitionsExec |
+| | ProjectionExec: expr=[bar@0 as bar, foo@1 as foo, tag@2 as tag, time@3 as time] |
+| | CoalesceBatchesExec: target_batch_size=4096 |
+| | FilterExec: time@3 = 0 |
+| | RepartitionExec: partitioning=RoundRobinBatch(4) |
+| | DeduplicateExec: [tag@2 ASC,time@3 ASC] |
+| | SortPreservingMergeExec: [tag@2 ASC,time@3 ASC] |
+| | UnionExec |
+| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], predicate=time_min@0 <= 0 AND 0 <= time_max@1, projection=[bar, foo, tag, time] |
+| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000001.parquet], predicate=time_min@0 <= 0 AND 0 <= time_max@1, projection=[bar, foo, tag, time] |
+| | |
++---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+-- SQL: SELECT * FROM "table" WHERE tag='A' AND foo=1 AND time=to_timestamp('1970-01-01T00:00:00.000000000+00:00');
++-----+-----+-----+----------------------+
+| bar | foo | tag | time |
++-----+-----+-----+----------------------+
+| 2 | 1 | A | 1970-01-01T00:00:00Z |
++-----+-----+-----+----------------------+
+-- SQL: EXPLAIN SELECT * FROM "table" WHERE tag='A' AND foo=1 AND time=to_timestamp('1970-01-01T00:00:00.000000000+00:00');
+-- Results After Normalizing UUIDs
++---------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| plan_type | plan |
++---------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| logical_plan | Projection: table.bar, table.foo, table.tag, table.time |
+| | Filter: table.tag = Dictionary(Int32, Utf8("A")) AND table.foo = Float64(1) AND table.time = TimestampNanosecond(0, None) |
+| | TableScan: table projection=[bar, foo, tag, time], partial_filters=[table.tag = Dictionary(Int32, Utf8("A")), table.foo = Float64(1), table.time = TimestampNanosecond(0, None)] |
+| physical_plan | ProjectionExec: expr=[bar@0 as bar, foo@1 as foo, tag@2 as tag, time@3 as time] |
+| | CoalesceBatchesExec: target_batch_size=4096 |
+| | FilterExec: tag@2 = A AND foo@1 = 1 AND time@3 = 0 |
+| | RepartitionExec: partitioning=RoundRobinBatch(4) |
+| | DeduplicateExec: [tag@2 ASC,time@3 ASC] |
+| | SortPreservingMergeExec: [tag@2 ASC,time@3 ASC] |
+| | UnionExec |
+| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], predicate=tag_min@0 <= A AND A <= tag_max@1 AND time_min@2 <= 0 AND 0 <= time_max@3, projection=[bar, foo, tag, time] |
+| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000001.parquet], predicate=tag_min@0 <= A AND A <= tag_max@1 AND time_min@2 <= 0 AND 0 <= time_max@3, projection=[bar, foo, tag, time] |
+| | |
++---------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
diff --git a/query_tests/cases/in/dedup_and_predicates_parquet.sql b/query_tests/cases/in/dedup_and_predicates_parquet.sql
new file mode 100644
index 0000000000..0e03a3bf74
--- /dev/null
+++ b/query_tests/cases/in/dedup_and_predicates_parquet.sql
@@ -0,0 +1,47 @@
+-- Illustrate the issue reported in <https://github.com/influxdata/influxdb_iox/issues/6066>.
+-- IOX_SETUP: TwoChunksDedupWeirdnessParquet
+
+
+--------------------------------------------------------------------------------
+-- query everything data
+SELECT * FROM "table" ORDER BY tag;
+
+-- explain the above
+-- IOX_COMPARE: uuid
+EXPLAIN SELECT * FROM "table" ORDER BY tag;
+
+
+--------------------------------------------------------------------------------
+-- predicates on tags
+SELECT * FROM "table" WHERE tag='A';
+
+-- explain the above
+-- IOX_COMPARE: uuid
+EXPLAIN SELECT * FROM "table" WHERE tag='A';
+
+
+--------------------------------------------------------------------------------
+-- predicates on fields
+SELECT * FROM "table" WHERE foo=1 AND bar=2;
+
+-- explain the above
+-- IOX_COMPARE: uuid
+EXPLAIN SELECT * FROM "table" WHERE foo=1 AND bar=2;
+
+
+--------------------------------------------------------------------------------
+-- predicates on time
+SELECT * FROM "table" WHERE time=to_timestamp('1970-01-01T00:00:00.000000000+00:00') ORDER BY tag;
+
+-- explain the above
+-- IOX_COMPARE: uuid
+EXPLAIN SELECT * FROM "table" WHERE time=to_timestamp('1970-01-01T00:00:00.000000000+00:00') ORDER BY tag;
+
+
+--------------------------------------------------------------------------------
+-- mixed predicates
+SELECT * FROM "table" WHERE tag='A' AND foo=1 AND time=to_timestamp('1970-01-01T00:00:00.000000000+00:00');
+
+-- explain the above
+-- IOX_COMPARE: uuid
+EXPLAIN SELECT * FROM "table" WHERE tag='A' AND foo=1 AND time=to_timestamp('1970-01-01T00:00:00.000000000+00:00');
diff --git a/query_tests/cases/in/dedup_and_predicates_parquet_ingester.expected b/query_tests/cases/in/dedup_and_predicates_parquet_ingester.expected
new file mode 100644
index 0000000000..2d3e23e811
--- /dev/null
+++ b/query_tests/cases/in/dedup_and_predicates_parquet_ingester.expected
@@ -0,0 +1,136 @@
+-- Test Setup: TwoChunksDedupWeirdnessParquetIngester
+-- SQL: SELECT * FROM "table" ORDER BY tag;
++-----+-----+-----+----------------------+
+| bar | foo | tag | time |
++-----+-----+-----+----------------------+
+| 2 | 1 | A | 1970-01-01T00:00:00Z |
+| | 1 | B | 1970-01-01T00:00:00Z |
++-----+-----+-----+----------------------+
+-- SQL: EXPLAIN SELECT * FROM "table" ORDER BY tag;
+-- Results After Normalizing UUIDs
++---------------+--------------------------------------------------------------------------------------------------------------------------------------------+
+| plan_type | plan |
++---------------+--------------------------------------------------------------------------------------------------------------------------------------------+
+| logical_plan | Sort: table.tag ASC NULLS LAST |
+| | Projection: table.bar, table.foo, table.tag, table.time |
+| | TableScan: table projection=[bar, foo, tag, time] |
+| physical_plan | SortExec: [tag@2 ASC NULLS LAST] |
+| | CoalescePartitionsExec |
+| | ProjectionExec: expr=[bar@0 as bar, foo@1 as foo, tag@2 as tag, time@3 as time] |
+| | RepartitionExec: partitioning=RoundRobinBatch(4) |
+| | DeduplicateExec: [tag@2 ASC,time@3 ASC] |
+| | SortPreservingMergeExec: [tag@2 ASC,time@3 ASC] |
+| | UnionExec |
+| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], projection=[bar, foo, tag, time] |
+| | SortExec: [tag@2 ASC,time@3 ASC] |
+| | RecordBatchesExec: batches_groups=1 batches=1 |
+| | |
++---------------+--------------------------------------------------------------------------------------------------------------------------------------------+
+-- SQL: SELECT * FROM "table" WHERE tag='A';
++-----+-----+-----+----------------------+
+| bar | foo | tag | time |
++-----+-----+-----+----------------------+
+| 2 | 1 | A | 1970-01-01T00:00:00Z |
++-----+-----+-----+----------------------+
+-- SQL: EXPLAIN SELECT * FROM "table" WHERE tag='A';
+-- Results After Normalizing UUIDs
++---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| plan_type | plan |
++---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| logical_plan | Projection: table.bar, table.foo, table.tag, table.time |
+| | Filter: table.tag = Dictionary(Int32, Utf8("A")) |
+| | TableScan: table projection=[bar, foo, tag, time], partial_filters=[table.tag = Dictionary(Int32, Utf8("A"))] |
+| physical_plan | ProjectionExec: expr=[bar@0 as bar, foo@1 as foo, tag@2 as tag, time@3 as time] |
+| | CoalesceBatchesExec: target_batch_size=4096 |
+| | FilterExec: tag@2 = A |
+| | RepartitionExec: partitioning=RoundRobinBatch(4) |
+| | DeduplicateExec: [tag@2 ASC,time@3 ASC] |
+| | SortPreservingMergeExec: [tag@2 ASC,time@3 ASC] |
+| | UnionExec |
+| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], predicate=tag_min@0 <= A AND A <= tag_max@1, projection=[bar, foo, tag, time] |
+| | SortExec: [tag@2 ASC,time@3 ASC] |
+| | RecordBatchesExec: batches_groups=1 batches=1 |
+| | |
++---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+-- SQL: SELECT * FROM "table" WHERE foo=1 AND bar=2;
++-----+-----+-----+----------------------+
+| bar | foo | tag | time |
++-----+-----+-----+----------------------+
+| 2 | 1 | A | 1970-01-01T00:00:00Z |
++-----+-----+-----+----------------------+
+-- SQL: EXPLAIN SELECT * FROM "table" WHERE foo=1 AND bar=2;
+-- Results After Normalizing UUIDs
++---------------+--------------------------------------------------------------------------------------------------------------------------------------------+
+| plan_type | plan |
++---------------+--------------------------------------------------------------------------------------------------------------------------------------------+
+| logical_plan | Projection: table.bar, table.foo, table.tag, table.time |
+| | Filter: table.foo = Float64(1) AND table.bar = Float64(2) |
+| | TableScan: table projection=[bar, foo, tag, time], partial_filters=[table.foo = Float64(1), table.bar = Float64(2)] |
+| physical_plan | ProjectionExec: expr=[bar@0 as bar, foo@1 as foo, tag@2 as tag, time@3 as time] |
+| | CoalesceBatchesExec: target_batch_size=4096 |
+| | FilterExec: foo@1 = 1 AND bar@0 = 2 |
+| | RepartitionExec: partitioning=RoundRobinBatch(4) |
+| | DeduplicateExec: [tag@2 ASC,time@3 ASC] |
+| | SortPreservingMergeExec: [tag@2 ASC,time@3 ASC] |
+| | UnionExec |
+| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], projection=[bar, foo, tag, time] |
+| | SortExec: [tag@2 ASC,time@3 ASC] |
+| | RecordBatchesExec: batches_groups=1 batches=1 |
+| | |
++---------------+--------------------------------------------------------------------------------------------------------------------------------------------+
+-- SQL: SELECT * FROM "table" WHERE time=to_timestamp('1970-01-01T00:00:00.000000000+00:00') ORDER BY tag;
++-----+-----+-----+----------------------+
+| bar | foo | tag | time |
++-----+-----+-----+----------------------+
+| 2 | 1 | A | 1970-01-01T00:00:00Z |
+| | 1 | B | 1970-01-01T00:00:00Z |
++-----+-----+-----+----------------------+
+-- SQL: EXPLAIN SELECT * FROM "table" WHERE time=to_timestamp('1970-01-01T00:00:00.000000000+00:00') ORDER BY tag;
+-- Results After Normalizing UUIDs
++---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| plan_type | plan |
++---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| logical_plan | Sort: table.tag ASC NULLS LAST |
+| | Projection: table.bar, table.foo, table.tag, table.time |
+| | Filter: table.time = TimestampNanosecond(0, None) |
+| | TableScan: table projection=[bar, foo, tag, time], partial_filters=[table.time = TimestampNanosecond(0, None)] |
+| physical_plan | SortExec: [tag@2 ASC NULLS LAST] |
+| | CoalescePartitionsExec |
+| | ProjectionExec: expr=[bar@0 as bar, foo@1 as foo, tag@2 as tag, time@3 as time] |
+| | CoalesceBatchesExec: target_batch_size=4096 |
+| | FilterExec: time@3 = 0 |
+| | RepartitionExec: partitioning=RoundRobinBatch(4) |
+| | DeduplicateExec: [tag@2 ASC,time@3 ASC] |
+| | SortPreservingMergeExec: [tag@2 ASC,time@3 ASC] |
+| | UnionExec |
+| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], predicate=time_min@0 <= 0 AND 0 <= time_max@1, projection=[bar, foo, tag, time] |
+| | SortExec: [tag@2 ASC,time@3 ASC] |
+| | RecordBatchesExec: batches_groups=1 batches=1 |
+| | |
++---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+-- SQL: SELECT * FROM "table" WHERE tag='A' AND foo=1 AND time=to_timestamp('1970-01-01T00:00:00.000000000+00:00');
++-----+-----+-----+----------------------+
+| bar | foo | tag | time |
++-----+-----+-----+----------------------+
+| 2 | 1 | A | 1970-01-01T00:00:00Z |
++-----+-----+-----+----------------------+
+-- SQL: EXPLAIN SELECT * FROM "table" WHERE tag='A' AND foo=1 AND time=to_timestamp('1970-01-01T00:00:00.000000000+00:00');
+-- Results After Normalizing UUIDs
++---------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| plan_type | plan |
++---------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| logical_plan | Projection: table.bar, table.foo, table.tag, table.time |
+| | Filter: table.tag = Dictionary(Int32, Utf8("A")) AND table.foo = Float64(1) AND table.time = TimestampNanosecond(0, None) |
+| | TableScan: table projection=[bar, foo, tag, time], partial_filters=[table.tag = Dictionary(Int32, Utf8("A")), table.foo = Float64(1), table.time = TimestampNanosecond(0, None)] |
+| physical_plan | ProjectionExec: expr=[bar@0 as bar, foo@1 as foo, tag@2 as tag, time@3 as time] |
+| | CoalesceBatchesExec: target_batch_size=4096 |
+| | FilterExec: tag@2 = A AND foo@1 = 1 AND time@3 = 0 |
+| | RepartitionExec: partitioning=RoundRobinBatch(4) |
+| | DeduplicateExec: [tag@2 ASC,time@3 ASC] |
+| | SortPreservingMergeExec: [tag@2 ASC,time@3 ASC] |
+| | UnionExec |
+| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], predicate=tag_min@0 <= A AND A <= tag_max@1 AND time_min@2 <= 0 AND 0 <= time_max@3, projection=[bar, foo, tag, time] |
+| | SortExec: [tag@2 ASC,time@3 ASC] |
+| | RecordBatchesExec: batches_groups=1 batches=1 |
+| | |
++---------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
diff --git a/query_tests/cases/in/dedup_and_predicates_parquet_ingester.sql b/query_tests/cases/in/dedup_and_predicates_parquet_ingester.sql
new file mode 100644
index 0000000000..5ab1efa2d9
--- /dev/null
+++ b/query_tests/cases/in/dedup_and_predicates_parquet_ingester.sql
@@ -0,0 +1,47 @@
+-- Illustrate the issue reported in <https://github.com/influxdata/influxdb_iox/issues/6066>.
+-- IOX_SETUP: TwoChunksDedupWeirdnessParquetIngester
+
+
+--------------------------------------------------------------------------------
+-- query everything data
+SELECT * FROM "table" ORDER BY tag;
+
+-- explain the above
+-- IOX_COMPARE: uuid
+EXPLAIN SELECT * FROM "table" ORDER BY tag;
+
+
+--------------------------------------------------------------------------------
+-- predicates on tags
+SELECT * FROM "table" WHERE tag='A';
+
+-- explain the above
+-- IOX_COMPARE: uuid
+EXPLAIN SELECT * FROM "table" WHERE tag='A';
+
+
+--------------------------------------------------------------------------------
+-- predicates on fields
+SELECT * FROM "table" WHERE foo=1 AND bar=2;
+
+-- explain the above
+-- IOX_COMPARE: uuid
+EXPLAIN SELECT * FROM "table" WHERE foo=1 AND bar=2;
+
+
+--------------------------------------------------------------------------------
+-- predicates on time
+SELECT * FROM "table" WHERE time=to_timestamp('1970-01-01T00:00:00.000000000+00:00') ORDER BY tag;
+
+-- explain the above
+-- IOX_COMPARE: uuid
+EXPLAIN SELECT * FROM "table" WHERE time=to_timestamp('1970-01-01T00:00:00.000000000+00:00') ORDER BY tag;
+
+
+--------------------------------------------------------------------------------
+-- mixed predicates
+SELECT * FROM "table" WHERE tag='A' AND foo=1 AND time=to_timestamp('1970-01-01T00:00:00.000000000+00:00');
+
+-- explain the above
+-- IOX_COMPARE: uuid
+EXPLAIN SELECT * FROM "table" WHERE tag='A' AND foo=1 AND time=to_timestamp('1970-01-01T00:00:00.000000000+00:00');
diff --git a/query_tests/src/cases.rs b/query_tests/src/cases.rs
index 89bb5032ee..90ec72c5c2 100644
--- a/query_tests/src/cases.rs
+++ b/query_tests/src/cases.rs
@@ -20,6 +20,38 @@ async fn test_cases_basic_sql() {
.expect("flush worked");
}
+#[tokio::test]
+// Tests from "dedup_and_predicates_parquet.sql",
+async fn test_cases_dedup_and_predicates_parquet_sql() {
+ test_helpers::maybe_start_logging();
+
+ let input_path = Path::new("cases").join("in").join("dedup_and_predicates_parquet.sql");
+ let mut runner = Runner::new();
+ runner
+ .run(input_path)
+ .await
+ .expect("test failed");
+ runner
+ .flush()
+ .expect("flush worked");
+}
+
+#[tokio::test]
+// Tests from "dedup_and_predicates_parquet_ingester.sql",
+async fn test_cases_dedup_and_predicates_parquet_ingester_sql() {
+ test_helpers::maybe_start_logging();
+
+ let input_path = Path::new("cases").join("in").join("dedup_and_predicates_parquet_ingester.sql");
+ let mut runner = Runner::new();
+ runner
+ .run(input_path)
+ .await
+ .expect("test failed");
+ runner
+ .flush()
+ .expect("flush worked");
+}
+
#[tokio::test]
// Tests from "duplicates_ingester.sql",
async fn test_cases_duplicates_ingester_sql() {
diff --git a/query_tests/src/scenarios.rs b/query_tests/src/scenarios.rs
index a5604c58d9..78cda6ea06 100644
--- a/query_tests/src/scenarios.rs
+++ b/query_tests/src/scenarios.rs
@@ -66,6 +66,8 @@ pub fn get_all_setups() -> &'static HashMap<String, Arc<dyn DbSetup>> {
register_setup!(ManyFieldsSeveralChunks),
register_setup!(TwoChunksMissingColumns),
register_setup!(AllTypes),
+ register_setup!(TwoChunksDedupWeirdnessParquet),
+ register_setup!(TwoChunksDedupWeirdnessParquetIngester),
]
.into_iter()
.map(|(name, setup)| (name.to_string(), setup as Arc<dyn DbSetup>))
diff --git a/query_tests/src/scenarios/library.rs b/query_tests/src/scenarios/library.rs
index 31ce57eb6b..928b16cf43 100644
--- a/query_tests/src/scenarios/library.rs
+++ b/query_tests/src/scenarios/library.rs
@@ -1004,3 +1004,68 @@ impl DbSetup for PeriodsInNames {
all_scenarios_for_one_chunk(vec![], vec![], lp, "measurement.one", partition_key).await
}
}
+
+/// This re-creates <https://github.com/influxdata/influxdb_iox/issues/6066>.
+///
+/// Namely it sets up two chunks to which certain filters MUST NOT be applied prior to deduplication.
+fn two_chunks_dedup_weirdness() -> Vec<ChunkData<'static, 'static>> {
+ let partition_key = "1970-01-01T00";
+
+ let lp_lines1 = vec!["table,tag=A foo=1,bar=1 0"];
+
+ let lp_lines2 = vec!["table,tag=A bar=2 0", "table,tag=B foo=1 0"];
+
+ vec![
+ ChunkData {
+ lp_lines: lp_lines1,
+ partition_key,
+ ..Default::default()
+ },
+ ChunkData {
+ lp_lines: lp_lines2,
+ partition_key,
+ ..Default::default()
+ },
+ ]
+}
+
+#[derive(Debug)]
+pub struct TwoChunksDedupWeirdnessParquet {}
+
+#[async_trait]
+impl DbSetup for TwoChunksDedupWeirdnessParquet {
+ async fn make(&self) -> Vec<DbScenario> {
+ let chunk_data: Vec<_> = two_chunks_dedup_weirdness()
+ .into_iter()
+ .map(|cd| ChunkData {
+ chunk_stage: Some(ChunkStage::Parquet),
+ ..cd
+ })
+ .collect();
+
+ make_n_chunks_scenario(&chunk_data).await
+ }
+}
+
+#[derive(Debug)]
+pub struct TwoChunksDedupWeirdnessParquetIngester {}
+
+#[async_trait]
+impl DbSetup for TwoChunksDedupWeirdnessParquetIngester {
+ async fn make(&self) -> Vec<DbScenario> {
+ let chunk_data = two_chunks_dedup_weirdness();
+ assert_eq!(chunk_data.len(), 2);
+
+ make_n_chunks_scenario(&[
+ ChunkData {
+ chunk_stage: Some(ChunkStage::Parquet),
+ ..chunk_data[0].clone()
+ },
+ ChunkData {
+ chunk_stage: Some(ChunkStage::Ingester),
+ ..chunk_data[1].clone()
+ },
+ ])
+ .await
+ }
+}
|
061b62a09b06e36ada92da958937d77e27434b5b
|
Michael Gattozzi
|
2025-01-24 10:02:44
|
write & query via stdin, string & file (#25907)
|
This change allows *both* the write and query commands to accept input
via stdin, string, or by a file. With this change larger queries are more
feasible to write as they can now be written in a file and smaller
writes via a string are now possible. This also makes the program work
more like people would expect it to, especially on unix based systems.
This commit also contains three tests to make sure the functionality works
as expected.
Closes #25772
Closes #25892
| null |
feat(cli): write & query via stdin, string & file (#25907)
This change allows *both* the write and query commands to accept input
via stdin, string, or by a file. With this change larger queries are more
feasible to write as they can now be written in a file and smaller
writes via a string are now possible. This also makes the program work
more like people would expect it to, especially on unix based systems.
This commit also contains three tests to make sure the functionality works
as expected.
Closes #25772
Closes #25892
|
diff --git a/influxdb3/src/commands/query.rs b/influxdb3/src/commands/query.rs
index 2a70caf705..d8f5600232 100644
--- a/influxdb3/src/commands/query.rs
+++ b/influxdb3/src/commands/query.rs
@@ -2,6 +2,8 @@ use std::str::Utf8Error;
use clap::{Parser, ValueEnum};
use secrecy::ExposeSecret;
+use std::fs;
+use std::io::{stdin, BufReader, IsTerminal, Read};
use tokio::{
fs::OpenOptions,
io::{self, AsyncWriteExt},
@@ -30,6 +32,11 @@ pub(crate) enum Error {
the output as `parquet`"
)]
NoOutputFileForParquet,
+ #[error(
+ "No input from stdin detected, no string was passed in, and no file \
+ path was given"
+ )]
+ NoInput,
}
pub type Result<T> = std::result::Result<T, Error>;
@@ -61,8 +68,12 @@ pub struct Config {
#[clap(short = 'o', long = "output")]
output_file_path: Option<String>,
+ /// A file containing sql statements to execute
+ #[clap(short = 'f', long = "file")]
+ file_path: Option<String>,
+
/// The query string to execute
- query: Vec<String>,
+ query: Option<Vec<String>>,
}
#[derive(Debug, ValueEnum, Clone)]
@@ -82,7 +93,21 @@ pub(crate) async fn command(config: Config) -> Result<()> {
client = client.with_auth_token(t.expose_secret());
}
- let query = parse_query(config.query)?;
+ let query = if let Some(query) = config.query {
+ parse_query(query)?
+ } else if let Some(file_path) = config.file_path {
+ fs::read_to_string(file_path)?
+ } else {
+ let stdin = stdin();
+ // Checks if stdin has had data passed to it via a pipe
+ if stdin.is_terminal() {
+ return Err(Error::NoInput);
+ }
+ let mut reader = BufReader::new(stdin);
+ let mut buffer = String::new();
+ reader.read_to_string(&mut buffer)?;
+ buffer
+ };
// make the query using the client
let mut resp_bytes = match config.language {
diff --git a/influxdb3/src/commands/write.rs b/influxdb3/src/commands/write.rs
index f053d804bb..8b1fc2d6ff 100644
--- a/influxdb3/src/commands/write.rs
+++ b/influxdb3/src/commands/write.rs
@@ -1,9 +1,11 @@
+use std::{
+ fs,
+ io::{stdin, BufReader, IsTerminal, Read},
+};
+
use clap::Parser;
use secrecy::ExposeSecret;
-use tokio::{
- fs::File,
- io::{self, AsyncReadExt},
-};
+use tokio::io;
use super::common::InfluxDb3Config;
@@ -14,6 +16,18 @@ pub(crate) enum Error {
#[error("error reading file: {0}")]
Io(#[from] io::Error),
+
+ #[error("No input from stdin detected, no string was passed in, and no file path was given")]
+ NoInput,
+
+ #[error("no line protocol string provided")]
+ NoLine,
+
+ #[error(
+ "ensure that a single protocol line string is provided as the final \
+ argument, enclosed in quotes"
+ )]
+ MoreThanOne,
}
pub(crate) type Result<T> = std::result::Result<T, Error>;
@@ -29,13 +43,16 @@ pub struct Config {
///
/// Currently, only files containing line protocol are supported.
#[clap(short = 'f', long = "file")]
- file_path: String,
+ file_path: Option<String>,
/// Flag to request the server accept partial writes
///
/// Invalid lines in the input data will be ignored by the server.
#[clap(long = "accept-partial")]
accept_partial_writes: bool,
+
+ /// Give a quoted line protocol line via the command line
+ line_protocol: Option<Vec<String>>,
}
pub(crate) async fn command(config: Config) -> Result<()> {
@@ -49,9 +66,21 @@ pub(crate) async fn command(config: Config) -> Result<()> {
client = client.with_auth_token(t.expose_secret());
}
- let mut f = File::open(config.file_path).await?;
- let mut writes = Vec::new();
- f.read_to_end(&mut writes).await?;
+ let writes = if let Some(line) = config.line_protocol {
+ parse_line(line)?
+ } else if let Some(file_path) = config.file_path {
+ fs::read_to_string(file_path)?
+ } else {
+ let stdin = stdin();
+ // Checks if stdin has had data passed to it via a pipe
+ if stdin.is_terminal() {
+ return Err(Error::NoInput);
+ }
+ let mut reader = BufReader::new(stdin);
+ let mut buffer = String::new();
+ reader.read_to_string(&mut buffer)?;
+ buffer
+ };
let mut req = client.api_v3_write_lp(database_name);
if config.accept_partial_writes {
@@ -63,3 +92,16 @@ pub(crate) async fn command(config: Config) -> Result<()> {
Ok(())
}
+
+/// Parse the user-inputted line protocol string
+/// NOTE: This is only necessary because clap will not accept a single string for a trailing arg
+fn parse_line(mut input: Vec<String>) -> Result<String> {
+ if input.is_empty() {
+ Err(Error::NoLine)?
+ }
+ if input.len() > 1 {
+ Err(Error::MoreThanOne)?
+ } else {
+ Ok(input.remove(0))
+ }
+}
diff --git a/influxdb3/tests/server/cli.rs b/influxdb3/tests/server/cli.rs
index 0f98ae391b..baa5f3855a 100644
--- a/influxdb3/tests/server/cli.rs
+++ b/influxdb3/tests/server/cli.rs
@@ -102,6 +102,29 @@ pub fn run_with_confirmation_and_err(args: &[&str]) -> String {
.into()
}
+pub fn run_with_stdin_input(input: impl Into<String>, args: &[&str]) -> String {
+ let input = input.into();
+ let mut child_process = Command::cargo_bin("influxdb3")
+ .unwrap()
+ .args(args)
+ .stdin(Stdio::piped())
+ .stdout(Stdio::piped())
+ .spawn()
+ .unwrap();
+
+ let mut stdin = child_process.stdin.take().expect("failed to open stdin");
+ thread::spawn(move || {
+ stdin
+ .write_all(input.as_bytes())
+ .expect("cannot write to stdin");
+ });
+
+ String::from_utf8(child_process.wait_with_output().unwrap().stdout)
+ .unwrap()
+ .trim()
+ .into()
+}
+
// Helper function to create a temporary Python plugin file
fn create_plugin_file(code: &str) -> NamedTempFile {
let mut file = NamedTempFile::new().unwrap();
@@ -1690,3 +1713,107 @@ def process_request(influxdb3_local, query_parameters, request_headers, request_
let body = serde_json::from_str::<serde_json::Value>(&body).unwrap();
assert_eq!(body, json!({"status": "updated"}));
}
+
+#[test_log::test(tokio::test)]
+async fn write_and_query_via_stdin() {
+ let server = TestServer::spawn().await;
+ let server_addr = server.client_addr();
+ let db_name = "foo";
+ let result = run_with_stdin_input(
+ "bar,tag1=1,tag2=2 field1=1,field2=2 0",
+ &["write", "--database", db_name, "--host", &server_addr],
+ );
+ assert_eq!("success", result);
+ debug!(result = ?result, "wrote data to database");
+ let result = run_with_stdin_input(
+ "SELECT * FROM bar",
+ &["query", "--database", db_name, "--host", &server_addr],
+ );
+ debug!(result = ?result, "queried data to database");
+ assert_eq!(
+ [
+ "+--------+--------+------+------+---------------------+",
+ "| field1 | field2 | tag1 | tag2 | time |",
+ "+--------+--------+------+------+---------------------+",
+ "| 1.0 | 2.0 | 1 | 2 | 1970-01-01T00:00:00 |",
+ "+--------+--------+------+------+---------------------+",
+ ]
+ .join("\n"),
+ result
+ );
+}
+
+#[test_log::test(tokio::test)]
+async fn write_and_query_via_file() {
+ let server = TestServer::spawn().await;
+ let server_addr = server.client_addr();
+ let db_name = "foo";
+ let result = run(&[
+ "write",
+ "--database",
+ db_name,
+ "--host",
+ &server_addr,
+ "--file",
+ "tests/server/fixtures/file.lp",
+ ]);
+ assert_eq!("success", result);
+ debug!(result = ?result, "wrote data to database");
+ let result = run(&[
+ "query",
+ "--database",
+ db_name,
+ "--host",
+ &server_addr,
+ "--file",
+ "tests/server/fixtures/file.sql",
+ ]);
+ debug!(result = ?result, "queried data to database");
+ assert_eq!(
+ [
+ "+--------+--------+------+------+---------------------+",
+ "| field1 | field2 | tag1 | tag2 | time |",
+ "+--------+--------+------+------+---------------------+",
+ "| 1.0 | 2.0 | 1 | 2 | 1970-01-01T00:00:00 |",
+ "+--------+--------+------+------+---------------------+",
+ ]
+ .join("\n"),
+ result
+ );
+}
+#[test_log::test(tokio::test)]
+async fn write_and_query_via_string() {
+ let server = TestServer::spawn().await;
+ let server_addr = server.client_addr();
+ let db_name = "foo";
+ let result = run(&[
+ "write",
+ "--database",
+ db_name,
+ "--host",
+ &server_addr,
+ "bar,tag1=1,tag2=2 field1=1,field2=2 0",
+ ]);
+ assert_eq!("success", result);
+ debug!(result = ?result, "wrote data to database");
+ let result = run(&[
+ "query",
+ "--database",
+ db_name,
+ "--host",
+ &server_addr,
+ "SELECT * FROM bar",
+ ]);
+ debug!(result = ?result, "queried data to database");
+ assert_eq!(
+ [
+ "+--------+--------+------+------+---------------------+",
+ "| field1 | field2 | tag1 | tag2 | time |",
+ "+--------+--------+------+------+---------------------+",
+ "| 1.0 | 2.0 | 1 | 2 | 1970-01-01T00:00:00 |",
+ "+--------+--------+------+------+---------------------+",
+ ]
+ .join("\n"),
+ result
+ );
+}
diff --git a/influxdb3/tests/server/fixtures/file.lp b/influxdb3/tests/server/fixtures/file.lp
new file mode 100644
index 0000000000..13e7f32479
--- /dev/null
+++ b/influxdb3/tests/server/fixtures/file.lp
@@ -0,0 +1 @@
+bar,tag1=1,tag2=2 field1=1,field2=2 0
diff --git a/influxdb3/tests/server/fixtures/file.sql b/influxdb3/tests/server/fixtures/file.sql
new file mode 100644
index 0000000000..d226772d33
--- /dev/null
+++ b/influxdb3/tests/server/fixtures/file.sql
@@ -0,0 +1 @@
+SELECT * FROM bar
|
bc95c70144b3fa9a07f4a69c136dc9924a697f4f
|
Dom Dwyer
|
2023-05-22 14:44:48
|
add missing lints to service_common
|
Adds the standard lints to service_common and fixes any lint failures.
Note this doesn't include the normal "document things" lint, because
there's a load of missing docs
| null |
refactor(lints): add missing lints to service_common
Adds the standard lints to service_common and fixes any lint failures.
Note this doesn't include the normal "document things" lint, because
there's a load of missing docs
|
diff --git a/service_common/src/lib.rs b/service_common/src/lib.rs
index 34c922f372..c92e3cae1d 100644
--- a/service_common/src/lib.rs
+++ b/service_common/src/lib.rs
@@ -1,5 +1,18 @@
//! Common methods for RPC service implementations
+#![deny(rustdoc::broken_intra_doc_links, rustdoc::bare_urls, rust_2018_idioms)]
+#![allow(clippy::clone_on_ref_ptr)]
+#![warn(
+ missing_copy_implementations,
+ missing_debug_implementations,
+ clippy::explicit_iter_loop,
+ // See https://github.com/influxdata/influxdb_iox/pull/1671
+ clippy::future_not_send,
+ clippy::clone_on_ref_ptr,
+ clippy::todo,
+ clippy::dbg_macro,
+)]
+
mod error;
pub mod planner;
pub mod test_util;
diff --git a/service_common/src/planner.rs b/service_common/src/planner.rs
index d7d38b0ef9..35aa29af4f 100644
--- a/service_common/src/planner.rs
+++ b/service_common/src/planner.rs
@@ -18,10 +18,10 @@ use predicate::rpc_predicate::InfluxRpcPredicate;
/// Query planner that plans queries on a separate threadpool.
///
-/// Query planning was, at time of writing, a single threaded
-/// affair. In order to avoid tying up the tokio executor that is
-/// handling API requests, IOx plan queries using a separate thread
-/// pool.
+/// Query planning was, at time of writing, a single threaded affair. In order
+/// to avoid tying up the tokio executor that is handling API requests, IOx plan
+/// queries using a separate thread pool.
+#[derive(Debug)]
pub struct Planner {
/// Executors (whose threadpool to use)
ctx: IOxSessionContext,
@@ -62,12 +62,11 @@ impl Planner {
.await
}
- /// Creates a plan for a `DoGet` FlightSQL message,
- /// as described on [`FlightSQLPlanner::do_get`], on a
- /// separate threadpool
+ /// Creates a plan for a `DoGet` FlightSQL message, as described on
+ /// [`FlightSQLPlanner::do_get`], on a separate threadpool
pub async fn flight_sql_do_get<N>(
&self,
- namespace_name: impl Into<String>,
+ namespace_name: impl Into<String> + Send,
namespace: Arc<N>,
cmd: FlightSQLCommand,
) -> Result<Arc<dyn ExecutionPlan>>
@@ -86,12 +85,11 @@ impl Planner {
.await
}
- /// Creates a plan for a `DoAction` FlightSQL message,
- /// as described on [`FlightSQLPlanner::do_action`], on a
- /// separate threadpool
+ /// Creates a plan for a `DoAction` FlightSQL message, as described on
+ /// [`FlightSQLPlanner::do_action`], on a separate threadpool
pub async fn flight_sql_do_action<N>(
&self,
- namespace_name: impl Into<String>,
+ namespace_name: impl Into<String> + Send,
namespace: Arc<N>,
cmd: FlightSQLCommand,
) -> Result<Bytes>
@@ -110,12 +108,12 @@ impl Planner {
.await
}
- /// Creates the response for a `GetFlightInfo` FlightSQL message
- /// as described on [`FlightSQLPlanner::get_flight_info`], on a
- /// separate threadpool.
+ /// Creates the response for a `GetFlightInfo` FlightSQL message as
+ /// described on [`FlightSQLPlanner::get_flight_info`], on a separate
+ /// threadpool.
pub async fn flight_sql_get_flight_info(
&self,
- namespace_name: impl Into<String>,
+ namespace_name: impl Into<String> + Send,
cmd: FlightSQLCommand,
) -> Result<Bytes> {
let namespace_name = namespace_name.into();
@@ -130,8 +128,8 @@ impl Planner {
.await
}
- /// Creates a plan as described on
- /// [`InfluxRpcPlanner::table_names`], on a separate threadpool
+ /// Creates a plan as described on [`InfluxRpcPlanner::table_names`], on a
+ /// separate threadpool
pub async fn table_names<N>(
&self,
namespace: Arc<N>,
@@ -152,8 +150,8 @@ impl Planner {
.await
}
- /// Creates a plan as described on
- /// [`InfluxRpcPlanner::tag_keys`], on a separate threadpool
+ /// Creates a plan as described on [`InfluxRpcPlanner::tag_keys`], on a
+ /// separate threadpool
pub async fn tag_keys<N>(
&self,
namespace: Arc<N>,
@@ -174,8 +172,8 @@ impl Planner {
.await
}
- /// Creates a plan as described on
- /// [`InfluxRpcPlanner::tag_values`], on a separate threadpool
+ /// Creates a plan as described on [`InfluxRpcPlanner::tag_values`], on a
+ /// separate threadpool
pub async fn tag_values<N>(
&self,
namespace: Arc<N>,
@@ -198,8 +196,8 @@ impl Planner {
.await
}
- /// Creates a plan as described on
- /// [`InfluxRpcPlanner::field_columns`], on a separate threadpool
+ /// Creates a plan as described on [`InfluxRpcPlanner::field_columns`], on a
+ /// separate threadpool
pub async fn field_columns<N>(
&self,
namespace: Arc<N>,
@@ -220,8 +218,8 @@ impl Planner {
.await
}
- /// Creates a plan as described on
- /// [`InfluxRpcPlanner::read_filter`], on a separate threadpool
+ /// Creates a plan as described on [`InfluxRpcPlanner::read_filter`], on a
+ /// separate threadpool
pub async fn read_filter<N>(
&self,
namespace: Arc<N>,
@@ -242,8 +240,8 @@ impl Planner {
.await
}
- /// Creates a plan as described on
- /// [`InfluxRpcPlanner::read_group`], on a separate threadpool
+ /// Creates a plan as described on [`InfluxRpcPlanner::read_group`], on a
+ /// separate threadpool
pub async fn read_group<N>(
&self,
namespace: Arc<N>,
|
3b672c223c0e298df11a85d06b588919609cfbad
|
Dom Dwyer
|
2023-01-05 18:13:58
|
SequenceNumberSet
|
Adds a space-efficient encoding of a set of SequenceNumber, backed by
roaring bitmaps.
Memory utilisation will change as the number of elements changes,
according to the underlying roaring bitmap design, but is intended to be
"relatively" cheap.
| null |
feat(data_types): SequenceNumberSet
Adds a space-efficient encoding of a set of SequenceNumber, backed by
roaring bitmaps.
Memory utilisation will change as the number of elements changes,
according to the underlying roaring bitmap design, but is intended to be
"relatively" cheap.
|
diff --git a/Cargo.lock b/Cargo.lock
index 0880f7098d..5bbc84610a 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -1116,6 +1116,27 @@ dependencies = [
"itertools",
]
+[[package]]
+name = "croaring"
+version = "0.7.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b637fa52bae8bfe59608b632329dd3bd606126ec8af04a5cbc628f9371a545d5"
+dependencies = [
+ "byteorder",
+ "croaring-sys",
+ "libc",
+]
+
+[[package]]
+name = "croaring-sys"
+version = "0.7.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1f09181aa94e1b0b038a1ef581f816e1e303c1146c1504bf34ea73fe8079095f"
+dependencies = [
+ "cc",
+ "libc",
+]
+
[[package]]
name = "crossbeam-channel"
version = "0.5.6"
@@ -1278,6 +1299,7 @@ dependencies = [
name = "data_types"
version = "0.1.0"
dependencies = [
+ "croaring",
"influxdb_line_protocol",
"iox_time",
"observability_deps",
diff --git a/data_types/Cargo.toml b/data_types/Cargo.toml
index 624ee15555..764bb4147f 100644
--- a/data_types/Cargo.toml
+++ b/data_types/Cargo.toml
@@ -7,6 +7,7 @@ edition.workspace = true
license.workspace = true
[dependencies]
+croaring = "0.7.0"
influxdb_line_protocol = { path = "../influxdb_line_protocol" }
iox_time = { path = "../iox_time" }
observability_deps = { path = "../observability_deps" }
diff --git a/data_types/src/lib.rs b/data_types/src/lib.rs
index 3ffac44a47..1ed0ca352e 100644
--- a/data_types/src/lib.rs
+++ b/data_types/src/lib.rs
@@ -13,6 +13,8 @@
clippy::dbg_macro
)]
+pub mod sequence_number_set;
+
use influxdb_line_protocol::FieldValue;
use observability_deps::tracing::warn;
use percent_encoding::{utf8_percent_encode, NON_ALPHANUMERIC};
diff --git a/data_types/src/sequence_number_set.rs b/data_types/src/sequence_number_set.rs
new file mode 100644
index 0000000000..97a332a8d3
--- /dev/null
+++ b/data_types/src/sequence_number_set.rs
@@ -0,0 +1,117 @@
+//! A set of [`SequenceNumber`] instances.
+
+use crate::SequenceNumber;
+
+/// A space-efficient encoded set of [`SequenceNumber`].
+#[derive(Debug, Default, Clone)]
+pub struct SequenceNumberSet(croaring::Bitmap);
+
+impl SequenceNumberSet {
+ /// Add the specified [`SequenceNumber`] to the set.
+ pub fn add(&mut self, n: SequenceNumber) {
+ self.0.add(n.get() as _);
+ }
+
+ /// Remove the specified [`SequenceNumber`] to the set, if present.
+ ///
+ /// This is a no-op if `n` was not part of `self`.
+ pub fn remove(&mut self, n: SequenceNumber) {
+ self.0.remove(n.get() as _);
+ }
+
+ /// Add all the [`SequenceNumber`] in `other` to `self`.
+ ///
+ /// The result of this operation is the set union of both input sets.
+ pub fn add_set(&mut self, other: &Self) {
+ self.0.or_inplace(&other.0)
+ }
+
+ /// Remove all the [`SequenceNumber`] in `other` from `self`.
+ pub fn remove_set(&mut self, other: &Self) {
+ self.0.andnot_inplace(&other.0)
+ }
+
+ /// Serialise `self` into a set of bytes.
+ ///
+ /// [This document][spec] describes the serialised format.
+ ///
+ /// [spec]: https://github.com/RoaringBitmap/RoaringFormatSpec/
+ pub fn as_bytes(&self) -> Vec<u8> {
+ self.0.serialize()
+ }
+
+ /// Return true if the specified [`SequenceNumber`] has been added to
+ /// `self`.
+ pub fn contains(&self, n: SequenceNumber) -> bool {
+ self.0.contains(n.get() as _)
+ }
+
+ /// Returns the number of [`SequenceNumber`] in this set.
+ pub fn len(&self) -> u64 {
+ self.0.cardinality()
+ }
+
+ /// Return `true` if there are no [`SequenceNumber`] in this set.
+ pub fn is_empty(&self) -> bool {
+ self.0.is_empty()
+ }
+
+ /// Return an iterator of all [`SequenceNumber`] in this set.
+ pub fn iter(&self) -> impl Iterator<Item = SequenceNumber> + '_ {
+ self.0.iter().map(|v| SequenceNumber::new(v as _))
+ }
+}
+
+/// Deserialisation method.
+impl TryFrom<&[u8]> for SequenceNumberSet {
+ type Error = String;
+
+ fn try_from(buffer: &[u8]) -> Result<Self, Self::Error> {
+ croaring::Bitmap::try_deserialize(buffer)
+ .map(SequenceNumberSet)
+ .ok_or_else(|| "invalid bitmap bytes".to_string())
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn test_set_operations() {
+ let mut a = SequenceNumberSet::default();
+ let mut b = SequenceNumberSet::default();
+
+ // Add an element and check it is readable
+ a.add(SequenceNumber::new(1));
+ assert!(a.contains(SequenceNumber::new(1)));
+ assert_eq!(a.len(), 1);
+ assert_eq!(a.iter().collect::<Vec<_>>(), vec![SequenceNumber::new(1)]);
+ assert!(!a.contains(SequenceNumber::new(42)));
+
+ // Merging an empty set into a should not change a
+ a.add_set(&b);
+ assert_eq!(a.len(), 1);
+ assert!(a.contains(SequenceNumber::new(1)));
+
+ // Merging a non-empty set should add the new elements
+ b.add(SequenceNumber::new(2));
+ a.add_set(&b);
+ assert_eq!(a.len(), 2);
+ assert!(a.contains(SequenceNumber::new(1)));
+ assert!(a.contains(SequenceNumber::new(2)));
+
+ // Removing the set should return it to the pre-merged state.
+ a.remove_set(&b);
+ assert_eq!(a.len(), 1);
+ assert!(a.contains(SequenceNumber::new(1)));
+
+ // Removing a non-existant element should be a NOP
+ a.remove(SequenceNumber::new(42));
+ assert_eq!(a.len(), 1);
+
+ // Removing the last element should result in an empty set.
+ a.remove(SequenceNumber::new(1));
+ assert_eq!(a.len(), 0);
+ }
+}
|
cb582b859d59fec8070cde88ff2d370ef4880cb1
|
Marco Neumann
|
2023-02-02 17:29:45
|
throttle partitions that do not receive commits (#6778) (#6820)
|
This will stall the compactor and only touch each partition ones because
the "unique" combo thinks that partitions never finish. This will need
more thought.
| null |
revert: throttle partitions that do not receive commits (#6778) (#6820)
This will stall the compactor and only touch each partition ones because
the "unique" combo thinks that partitions never finish. This will need
more thought.
|
diff --git a/compactor2/src/compactor.rs b/compactor2/src/compactor.rs
index 8a7bdd0bfd..6b328e3364 100644
--- a/compactor2/src/compactor.rs
+++ b/compactor2/src/compactor.rs
@@ -58,6 +58,7 @@ impl Compactor2 {
_ = async {
loop {
compact(config.partition_concurrency, config.partition_timeout, Arc::clone(&job_semaphore), &components).await;
+ // TODO: implement throttling if there was no work to do
}
} => unreachable!(),
}
diff --git a/compactor2/src/components/combos/mod.rs b/compactor2/src/components/combos/mod.rs
index 7f09b8b3cf..944bec0265 100644
--- a/compactor2/src/components/combos/mod.rs
+++ b/compactor2/src/components/combos/mod.rs
@@ -1,4 +1,3 @@
//! Combinations of multiple components that together can achieve one goal.
-pub mod throttle_partition;
pub mod unique_partitions;
diff --git a/compactor2/src/components/combos/throttle_partition.rs b/compactor2/src/components/combos/throttle_partition.rs
deleted file mode 100644
index ebf28a2251..0000000000
--- a/compactor2/src/components/combos/throttle_partition.rs
+++ /dev/null
@@ -1,433 +0,0 @@
-//! Throttle partions that receive no commits.
-
-use std::{
- collections::HashMap,
- fmt::Display,
- sync::{Arc, Mutex},
- time::Duration,
-};
-
-use async_trait::async_trait;
-use data_types::{CompactionLevel, ParquetFileId, ParquetFileParams, PartitionId};
-use iox_time::{Time, TimeProvider};
-
-use crate::components::{
- commit::Commit, partition_done_sink::PartitionDoneSink, partitions_source::PartitionsSource,
-};
-
-/// Ensures that partitions that do not receive any commits are throttled.
-///
-/// This may happen because our catalog query detects that the partition receives writes but the comapctor already
-/// finished all the outstandign work.
-///
-/// This should be used as a wrapper around the actual [`PartitionsSource`] & [`Commit`] & [`PartitionDoneSink`] and will setup of
-/// the following stream layout:
-///
-/// ```text
-/// (5)
-/// ^
-/// |
-/// +----------------------------------(4)
-/// | ^
-/// V |
-/// (1)====>(2)====>[concurrent processing]---->(3)---->(6)---->(7)
-/// ^ |
-/// | |
-/// | |
-/// +-------------------------------------------+
-/// ```
-///
-/// | Step | Name | Type | Description |
-/// | ---- | --------------------- | ----------------------------------------------------------------- | ----------- |
-/// | 1 | **Actual source** | `inner_source`/`T1`/[`PartitionsSource`], wrapped | This is the actual source. |
-/// | 2 | **Throttling source** | [`ThrottlePartitionsSourceWrapper`], wraps `inner_source`/`T1` | Throttles partitions that do not receive any commits |
-/// | 3 | **Critical section** | -- | The actual partition processing |
-/// | 4 | **Throttle commit** | [`ThrottleCommitWrapper`], wraps `inner_commit`/`T2` | Observes commits. |
-/// | 5 | **Actual commit** | `inner_commit`/`T2`/[`Commit`] | The actual commit implementation |
-/// | 6 | **Throttle sink** | [`ThrottlePartitionDoneSinkWrapper`], wraps `inner_sink`/`T3` | Observes incoming IDs enables throttled if step (4) did not observe any commits. |
-/// | 7 | **Actual sink** | `inner_sink`/`T3`/[`PartitionDoneSink`], wrapped | The actual sink. |
-///
-/// This setup relies on a fact that it does not process duplicate [`PartitionId`]. You may use
-/// [`unique_partitions`](crate::components::combos::unique_partitions::unique_partitions) to achieve that.
-pub fn throttle_partition<T1, T2, T3>(
- source: T1,
- commit: T2,
- sink: T3,
- time_provider: Arc<dyn TimeProvider>,
- throttle_duration: Duration,
-) -> (
- ThrottlePartitionsSourceWrapper<T1>,
- ThrottleCommitWrapper<T2>,
- ThrottlePartitionDoneSinkWrapper<T3>,
-)
-where
- T1: PartitionsSource,
- T2: Commit,
- T3: PartitionDoneSink,
-{
- let state = SharedState::default();
- let source = ThrottlePartitionsSourceWrapper {
- inner: source,
- state: Arc::clone(&state),
- time_provider: Arc::clone(&time_provider),
- };
- let commit = ThrottleCommitWrapper {
- inner: commit,
- state: Arc::clone(&state),
- };
- let sink = ThrottlePartitionDoneSinkWrapper {
- inner: sink,
- state,
- time_provider,
- throttle_duration,
- };
- (source, commit, sink)
-}
-
-#[derive(Debug, Default)]
-struct State {
- in_flight: HashMap<PartitionId, bool>,
- throttled: HashMap<PartitionId, Time>,
-}
-
-type SharedState = Arc<Mutex<State>>;
-
-#[derive(Debug)]
-pub struct ThrottlePartitionsSourceWrapper<T>
-where
- T: PartitionsSource,
-{
- inner: T,
- state: SharedState,
- time_provider: Arc<dyn TimeProvider>,
-}
-
-impl<T> Display for ThrottlePartitionsSourceWrapper<T>
-where
- T: PartitionsSource,
-{
- fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
- write!(f, "throttle({})", self.inner)
- }
-}
-
-#[async_trait]
-impl<T> PartitionsSource for ThrottlePartitionsSourceWrapper<T>
-where
- T: PartitionsSource,
-{
- async fn fetch(&self) -> Vec<PartitionId> {
- let res = self.inner.fetch().await;
- let mut guard = self.state.lock().expect("not poisoned");
-
- // ensure that in-flight data is non-overlapping
- for id in &res {
- if guard.in_flight.contains_key(id) {
- drop(guard); // avoid poison
- panic!("Partition already in-flight: {id}");
- }
- }
-
- // clean throttled states
- let now = self.time_provider.now();
- guard.throttled = guard
- .throttled
- .iter()
- .filter(|(_id, until)| **until > now)
- .map(|(k, v)| (*k, *v))
- .collect();
-
- // filter output
- let res = res
- .into_iter()
- .filter(|id| !guard.throttled.contains_key(id))
- .collect::<Vec<_>>();
-
- // set up in-flight
- for id in &res {
- guard.in_flight.insert(*id, false);
- }
-
- res
- }
-}
-
-#[derive(Debug)]
-pub struct ThrottleCommitWrapper<T>
-where
- T: Commit,
-{
- inner: T,
- state: SharedState,
-}
-
-impl<T> Display for ThrottleCommitWrapper<T>
-where
- T: Commit,
-{
- fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
- write!(f, "throttle({})", self.inner)
- }
-}
-
-#[async_trait]
-impl<T> Commit for ThrottleCommitWrapper<T>
-where
- T: Commit,
-{
- async fn commit(
- &self,
- partition_id: PartitionId,
- delete: &[ParquetFileId],
- upgrade: &[ParquetFileId],
- create: &[ParquetFileParams],
- target_level: CompactionLevel,
- ) -> Vec<ParquetFileId> {
- let known = {
- let mut guard = self.state.lock().expect("not poisoned");
- match guard.in_flight.get_mut(&partition_id) {
- Some(val) => {
- *val = true;
- true
- }
- None => false,
- }
- };
- // perform check when NOT holding the mutex to not poison it
- assert!(
- known,
- "Unknown or already done partition in commit: {partition_id}"
- );
-
- self.inner
- .commit(partition_id, delete, upgrade, create, target_level)
- .await
- }
-}
-
-#[derive(Debug)]
-pub struct ThrottlePartitionDoneSinkWrapper<T>
-where
- T: PartitionDoneSink,
-{
- inner: T,
- state: SharedState,
- throttle_duration: Duration,
- time_provider: Arc<dyn TimeProvider>,
-}
-
-impl<T> Display for ThrottlePartitionDoneSinkWrapper<T>
-where
- T: PartitionDoneSink,
-{
- fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
- write!(f, "throttle({})", self.inner)
- }
-}
-
-#[async_trait]
-impl<T> PartitionDoneSink for ThrottlePartitionDoneSinkWrapper<T>
-where
- T: PartitionDoneSink,
-{
- async fn record(
- &self,
- partition: PartitionId,
- res: Result<(), Box<dyn std::error::Error + Send + Sync>>,
- ) {
- let known = {
- let mut guard = self.state.lock().expect("not poisoned");
- match guard.in_flight.remove(&partition) {
- Some(val) => {
- if !val {
- guard
- .throttled
- .insert(partition, self.time_provider.now() + self.throttle_duration);
- }
- true
- }
- None => false,
- }
- };
- // perform check when NOT holding the mutex to not poison it
- assert!(
- known,
- "Unknown or already done partition in partition done sink: {partition}"
- );
-
- self.inner.record(partition, res).await;
- }
-}
-
-#[cfg(test)]
-mod tests {
- use iox_time::MockProvider;
-
- use crate::components::{
- commit::mock::{CommitHistoryEntry, MockCommit},
- partition_done_sink::mock::MockPartitionDoneSink,
- partitions_source::mock::MockPartitionsSource,
- };
-
- use super::*;
-
- #[test]
- fn test_display() {
- let (source, commit, sink) = throttle_partition(
- MockPartitionsSource::new(vec![]),
- MockCommit::new(),
- MockPartitionDoneSink::new(),
- Arc::new(MockProvider::new(Time::MIN)),
- Duration::from_secs(0),
- );
- assert_eq!(source.to_string(), "throttle(mock)");
- assert_eq!(commit.to_string(), "throttle(mock)");
- assert_eq!(sink.to_string(), "throttle(mock)");
- }
-
- #[tokio::test]
- async fn test_throttle() {
- let inner_source = Arc::new(MockPartitionsSource::new(vec![
- PartitionId::new(1),
- PartitionId::new(2),
- PartitionId::new(3),
- PartitionId::new(4),
- ]));
- let inner_commit = Arc::new(MockCommit::new());
- let inner_sink = Arc::new(MockPartitionDoneSink::new());
- let time_provider = Arc::new(MockProvider::new(Time::MIN));
- let (source, commit, sink) = throttle_partition(
- Arc::clone(&inner_source),
- Arc::clone(&inner_commit),
- Arc::clone(&inner_sink),
- Arc::clone(&time_provider) as _,
- Duration::from_secs(1),
- );
-
- assert_eq!(
- source.fetch().await,
- vec![
- PartitionId::new(1),
- PartitionId::new(2),
- PartitionId::new(3),
- PartitionId::new(4)
- ],
- );
- commit
- .commit(PartitionId::new(1), &[], &[], &[], CompactionLevel::Initial)
- .await;
- commit
- .commit(PartitionId::new(2), &[], &[], &[], CompactionLevel::Initial)
- .await;
- sink.record(PartitionId::new(1), Ok(())).await;
- sink.record(PartitionId::new(3), Ok(())).await;
-
- // need to remove partition 2 and 4 because they weren't finished yet
- inner_source.set(vec![
- PartitionId::new(1),
- PartitionId::new(3),
- PartitionId::new(5),
- ]);
- assert_eq!(
- source.fetch().await,
- vec![
- // ID 1: commit in last round => pass
- PartitionId::new(1),
- // ID 3: no commit in last round => throttled
- // ID 5: new => pass
- PartitionId::new(5),
- ],
- );
-
- // advance time to "unthrottle" ID 3
- inner_source.set(vec![PartitionId::new(3)]);
- time_provider.inc(Duration::from_secs(1));
- assert_eq!(source.fetch().await, vec![PartitionId::new(3)],);
-
- // can still finish partition 2 and 4
- sink.record(PartitionId::new(2), Err(String::from("foo").into()))
- .await;
- sink.record(PartitionId::new(4), Err(String::from("bar").into()))
- .await;
- inner_source.set(vec![PartitionId::new(2), PartitionId::new(4)]);
- assert_eq!(source.fetch().await, vec![PartitionId::new(2)],);
-
- assert_eq!(
- inner_sink.results(),
- HashMap::from([
- (PartitionId::new(1), Ok(())),
- (PartitionId::new(2), Err(String::from("foo"))),
- (PartitionId::new(3), Ok(())),
- (PartitionId::new(4), Err(String::from("bar"))),
- ]),
- );
- assert_eq!(
- inner_commit.history(),
- vec![
- CommitHistoryEntry {
- partition_id: PartitionId::new(1),
- delete: vec![],
- upgrade: vec![],
- created: vec![],
- target_level: CompactionLevel::Initial,
- },
- CommitHistoryEntry {
- partition_id: PartitionId::new(2),
- delete: vec![],
- upgrade: vec![],
- created: vec![],
- target_level: CompactionLevel::Initial,
- },
- ]
- );
- }
-
- #[tokio::test]
- #[should_panic(expected = "Unknown or already done partition in commit: 1")]
- async fn test_panic_commit_unknown() {
- let (source, commit, sink) = throttle_partition(
- MockPartitionsSource::new(vec![PartitionId::new(1)]),
- MockCommit::new(),
- MockPartitionDoneSink::new(),
- Arc::new(MockProvider::new(Time::MIN)),
- Duration::from_secs(0),
- );
-
- source.fetch().await;
- sink.record(PartitionId::new(1), Ok(())).await;
- commit
- .commit(PartitionId::new(1), &[], &[], &[], CompactionLevel::Initial)
- .await;
- }
-
- #[tokio::test]
- #[should_panic(expected = "Unknown or already done partition in partition done sink: 1")]
- async fn test_panic_sink_unknown() {
- let (source, _commit, sink) = throttle_partition(
- MockPartitionsSource::new(vec![PartitionId::new(1)]),
- MockCommit::new(),
- MockPartitionDoneSink::new(),
- Arc::new(MockProvider::new(Time::MIN)),
- Duration::from_secs(0),
- );
-
- source.fetch().await;
- sink.record(PartitionId::new(1), Ok(())).await;
- sink.record(PartitionId::new(1), Ok(())).await;
- }
-
- #[tokio::test]
- #[should_panic(expected = "Partition already in-flight: 1")]
- async fn test_panic_duplicate_in_flight() {
- let (source, _commit, _sink) = throttle_partition(
- MockPartitionsSource::new(vec![PartitionId::new(1)]),
- MockCommit::new(),
- MockPartitionDoneSink::new(),
- Arc::new(MockProvider::new(Time::MIN)),
- Duration::from_secs(0),
- );
-
- source.fetch().await;
- source.fetch().await;
- }
-}
diff --git a/compactor2/src/components/hardcoded.rs b/compactor2/src/components/hardcoded.rs
index edb3f6b41d..f9e4f65776 100644
--- a/compactor2/src/components/hardcoded.rs
+++ b/compactor2/src/components/hardcoded.rs
@@ -17,7 +17,7 @@ use crate::{
};
use super::{
- combos::{throttle_partition::throttle_partition, unique_partitions::unique_partitions},
+ combos::unique_partitions::unique_partitions,
commit::{
catalog::CatalogCommit, logging::LoggingCommitWrapper, metrics::MetricsCommitWrapper,
mock::MockCommit, Commit,
@@ -155,13 +155,6 @@ pub fn hardcoded_components(config: &Config) -> Arc<Components> {
let (partitions_source, partition_done_sink) =
unique_partitions(partitions_source, partition_done_sink);
- let (partitions_source, commit, partition_done_sink) = throttle_partition(
- partitions_source,
- commit,
- partition_done_sink,
- Arc::clone(&config.time_provider),
- Duration::from_secs(60),
- );
Arc::new(Components {
// Note: Place "not empty" wrapper at the very last so that the logging and metric wrapper work even when there
|
0d5b591ec94796e2b29939353ad7fdf00f95c166
|
Trevor Hilton
|
2024-04-23 12:55:30
|
point at latest core (#24937)
|
Minor core update to bring in security updates and cargo optimizations from core.
| null |
chore: point at latest core (#24937)
Minor core update to bring in security updates and cargo optimizations from core.
|
diff --git a/Cargo.lock b/Cargo.lock
index 07faad493b..deb4e26db7 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -57,9 +57,9 @@ dependencies = [
[[package]]
name = "allocator-api2"
-version = "0.2.16"
+version = "0.2.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0942ffc6dcaadf03badf6e6a2d0228460359d5e34b57ccdc720b7382dfbd5ec5"
+checksum = "5c6cb57a04249c6480766f7f7cef5467412af1490f8d1e243141daddada3264f"
[[package]]
name = "android-tzdata"
@@ -126,9 +126,9 @@ dependencies = [
[[package]]
name = "anyhow"
-version = "1.0.81"
+version = "1.0.82"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0952808a6c2afd1aa8947271f3a60f1a6763c7b912d210184c5149b5cf147247"
+checksum = "f538837af36e6f6a9be0faa67f9a314f8119e4e4b5867c6ab40ed60360142519"
[[package]]
name = "arc-swap"
@@ -311,7 +311,7 @@ dependencies = [
"futures",
"once_cell",
"paste",
- "prost 0.12.3",
+ "prost 0.12.4",
"tokio",
"tonic 0.10.2",
]
@@ -474,7 +474,7 @@ dependencies = [
[[package]]
name = "arrow_util"
version = "0.1.0"
-source = "git+https://github.com/influxdata/influxdb3_core?rev=1d19543c8ef1fe9b3401f703cdcaba4d20db4e8c#1d19543c8ef1fe9b3401f703cdcaba4d20db4e8c"
+source = "git+https://github.com/influxdata/influxdb3_core?rev=b546e7f86ee9adbff0dd3c5e687140848397604a#b546e7f86ee9adbff0dd3c5e687140848397604a"
dependencies = [
"ahash",
"arrow",
@@ -522,22 +522,22 @@ checksum = "9b34d609dfbaf33d6889b2b7106d3ca345eacad44200913df5ba02bfd31d2ba9"
[[package]]
name = "async-channel"
-version = "2.2.0"
+version = "2.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f28243a43d821d11341ab73c80bed182dc015c514b951616cf79bd4af39af0c3"
+checksum = "136d4d23bcc79e27423727b36823d86233aad06dfea531837b038394d11e9928"
dependencies = [
"concurrent-queue",
- "event-listener 5.2.0",
- "event-listener-strategy",
+ "event-listener 5.3.0",
+ "event-listener-strategy 0.5.1",
"futures-core",
"pin-project-lite",
]
[[package]]
name = "async-compression"
-version = "0.4.7"
+version = "0.4.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "86a9249d1447a85f95810c620abea82e001fe58a31713fcce614caf52499f905"
+checksum = "07dbbf24db18d609b1462965249abdf49129ccad073ec257da372adc83259c60"
dependencies = [
"bzip2",
"flate2",
@@ -553,11 +553,13 @@ dependencies = [
[[package]]
name = "async-lock"
-version = "2.8.0"
+version = "3.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "287272293e9d8c41773cec55e365490fe034813a2f172f502d6ddcf75b2f582b"
+checksum = "d034b430882f8381900d3fe6f0aaa3ad94f2cb4ac519b429692a1bc2dda4ae7b"
dependencies = [
- "event-listener 2.5.3",
+ "event-listener 4.0.3",
+ "event-listener-strategy 0.4.0",
+ "pin-project-lite",
]
[[package]]
@@ -579,18 +581,18 @@ checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.58",
+ "syn 2.0.60",
]
[[package]]
name = "async-trait"
-version = "0.1.79"
+version = "0.1.80"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a507401cad91ec6a857ed5513a2073c82a9b9048762b885bb98655b306964681"
+checksum = "c6fa2087f2753a7da8cc1c0dbfcf89579dd57458e36769de5ac750b4671737ca"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.58",
+ "syn 2.0.60",
]
[[package]]
@@ -605,7 +607,7 @@ dependencies = [
[[package]]
name = "authz"
version = "0.1.0"
-source = "git+https://github.com/influxdata/influxdb3_core?rev=1d19543c8ef1fe9b3401f703cdcaba4d20db4e8c#1d19543c8ef1fe9b3401f703cdcaba4d20db4e8c"
+source = "git+https://github.com/influxdata/influxdb3_core?rev=b546e7f86ee9adbff0dd3c5e687140848397604a#b546e7f86ee9adbff0dd3c5e687140848397604a"
dependencies = [
"async-trait",
"backoff 0.1.0",
@@ -674,7 +676,7 @@ dependencies = [
[[package]]
name = "backoff"
version = "0.1.0"
-source = "git+https://github.com/influxdata/influxdb3_core?rev=1d19543c8ef1fe9b3401f703cdcaba4d20db4e8c#1d19543c8ef1fe9b3401f703cdcaba4d20db4e8c"
+source = "git+https://github.com/influxdata/influxdb3_core?rev=b546e7f86ee9adbff0dd3c5e687140848397604a#b546e7f86ee9adbff0dd3c5e687140848397604a"
dependencies = [
"observability_deps",
"rand",
@@ -727,21 +729,6 @@ version = "1.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b"
-[[package]]
-name = "bit-set"
-version = "0.5.3"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0700ddab506f33b20a03b13996eccd309a48e5ff77d0d95926aa0210fb4e95f1"
-dependencies = [
- "bit-vec",
-]
-
-[[package]]
-name = "bit-vec"
-version = "0.6.3"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "349f9b6a179ed607305526ca489b34ad0a41aed5f7980fa90eb03160b69598fb"
-
[[package]]
name = "bitflags"
version = "1.3.2"
@@ -822,15 +809,9 @@ dependencies = [
[[package]]
name = "bumpalo"
-version = "3.15.4"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7ff69b9dd49fd426c69a0db9fc04dd934cdb6645ff000864d98f7e2af8830eaa"
-
-[[package]]
-name = "bytecount"
-version = "0.6.7"
+version = "3.16.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e1e5f035d16fc623ae5f74981db80a439803888314e3a555fd6f04acd51a3205"
+checksum = "79296716171880943b8470b5f8d03aa55eb2e645a4874bdbb28adb49162e012c"
[[package]]
name = "bytemuck"
@@ -849,7 +830,7 @@ checksum = "4da9a32f3fed317401fa3c862968128267c3106685286e15d5aaa3d7389c2f60"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.58",
+ "syn 2.0.60",
]
[[package]]
@@ -885,41 +866,10 @@ dependencies = [
"pkg-config",
]
-[[package]]
-name = "camino"
-version = "1.1.6"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c59e92b5a388f549b863a7bea62612c09f24c8393560709a54558a9abdfb3b9c"
-dependencies = [
- "serde",
-]
-
-[[package]]
-name = "cargo-platform"
-version = "0.1.8"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "24b1f0365a6c6bb4020cd05806fd0d33c44d38046b8bd7f0e40814b9763cabfc"
-dependencies = [
- "serde",
-]
-
-[[package]]
-name = "cargo_metadata"
-version = "0.14.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4acbb09d9ee8e23699b9634375c72795d095bf268439da88562cf9b501f181fa"
-dependencies = [
- "camino",
- "cargo-platform",
- "semver",
- "serde",
- "serde_json",
-]
-
[[package]]
name = "catalog_cache"
version = "0.1.0"
-source = "git+https://github.com/influxdata/influxdb3_core?rev=1d19543c8ef1fe9b3401f703cdcaba4d20db4e8c#1d19543c8ef1fe9b3401f703cdcaba4d20db4e8c"
+source = "git+https://github.com/influxdata/influxdb3_core?rev=b546e7f86ee9adbff0dd3c5e687140848397604a#b546e7f86ee9adbff0dd3c5e687140848397604a"
dependencies = [
"bytes",
"dashmap",
@@ -937,12 +887,13 @@ dependencies = [
[[package]]
name = "cc"
-version = "1.0.90"
+version = "1.0.95"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8cd6604a82acf3039f1144f54b8eb34e91ffba622051189e71b781822d5ee1f5"
+checksum = "d32a725bc159af97c3e629873bb9f88fb8cf8a4867175f76dc987815ea07c83b"
dependencies = [
"jobserver",
"libc",
+ "once_cell",
]
[[package]]
@@ -959,9 +910,9 @@ checksum = "fd16c4719339c4530435d38e511904438d07cce7950afa3718a84ac36c10e89e"
[[package]]
name = "chrono"
-version = "0.4.37"
+version = "0.4.38"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8a0d04d43504c61aa6c7531f1871dd0d418d91130162063b789da00fd7057a5e"
+checksum = "a21f936df1771bf62b77f047b726c4625ff2e8aa607c01ec06e5a05bd8463401"
dependencies = [
"android-tzdata",
"iana-time-zone",
@@ -969,7 +920,7 @@ dependencies = [
"num-traits",
"serde",
"wasm-bindgen",
- "windows-targets 0.52.4",
+ "windows-targets 0.52.5",
]
[[package]]
@@ -1007,7 +958,7 @@ dependencies = [
[[package]]
name = "clap_blocks"
version = "0.1.0"
-source = "git+https://github.com/influxdata/influxdb3_core?rev=1d19543c8ef1fe9b3401f703cdcaba4d20db4e8c#1d19543c8ef1fe9b3401f703cdcaba4d20db4e8c"
+source = "git+https://github.com/influxdata/influxdb3_core?rev=b546e7f86ee9adbff0dd3c5e687140848397604a#b546e7f86ee9adbff0dd3c5e687140848397604a"
dependencies = [
"clap",
"ed25519-dalek",
@@ -1052,7 +1003,7 @@ dependencies = [
"heck 0.5.0",
"proc-macro2",
"quote",
- "syn 2.0.58",
+ "syn 2.0.60",
]
[[package]]
@@ -1064,7 +1015,7 @@ checksum = "98cc8fbded0c607b7ba9dd60cd98df59af97e84d24e49c8557331cfc26d301ce"
[[package]]
name = "client_util"
version = "0.1.0"
-source = "git+https://github.com/influxdata/influxdb3_core?rev=1d19543c8ef1fe9b3401f703cdcaba4d20db4e8c#1d19543c8ef1fe9b3401f703cdcaba4d20db4e8c"
+source = "git+https://github.com/influxdata/influxdb3_core?rev=b546e7f86ee9adbff0dd3c5e687140848397604a#b546e7f86ee9adbff0dd3c5e687140848397604a"
dependencies = [
"http",
"reqwest",
@@ -1082,12 +1033,12 @@ checksum = "acbf1af155f9b9ef647e42cdc158db4b64a1b61f743629225fde6f3e0be2a7c7"
[[package]]
name = "comfy-table"
-version = "7.1.0"
+version = "7.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7c64043d6c7b7a4c58e39e7efccfdea7b93d885a795d0c054a69dbbf4dd52686"
+checksum = "b34115915337defe99b2aff5c2ce6771e5fbc4079f4b506301f5cf394c8452f7"
dependencies = [
- "strum 0.25.0",
- "strum_macros 0.25.3",
+ "strum",
+ "strum_macros",
"unicode-width",
]
@@ -1217,9 +1168,9 @@ dependencies = [
[[package]]
name = "crc"
-version = "3.0.1"
+version = "3.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "86ec7a15cbe22e59248fc7eadb1907dab5ba09372595da4d73dd805ed4417dfe"
+checksum = "69e6e4d7b33a94f0991c26729976b10ebde1d34c3ee82408fb536164fa10d636"
dependencies = [
"crc-catalog",
]
@@ -1363,7 +1314,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.58",
+ "syn 2.0.60",
]
[[package]]
@@ -1387,7 +1338,7 @@ dependencies = [
"proc-macro2",
"quote",
"strsim 0.10.0",
- "syn 2.0.58",
+ "syn 2.0.60",
]
[[package]]
@@ -1398,7 +1349,7 @@ checksum = "a668eda54683121533a393014d8692171709ff57a7d61f187b6e782719f8933f"
dependencies = [
"darling_core",
"quote",
- "syn 2.0.58",
+ "syn 2.0.60",
]
[[package]]
@@ -1417,7 +1368,7 @@ dependencies = [
[[package]]
name = "data_types"
version = "0.1.0"
-source = "git+https://github.com/influxdata/influxdb3_core?rev=1d19543c8ef1fe9b3401f703cdcaba4d20db4e8c#1d19543c8ef1fe9b3401f703cdcaba4d20db4e8c"
+source = "git+https://github.com/influxdata/influxdb3_core?rev=b546e7f86ee9adbff0dd3c5e687140848397604a#b546e7f86ee9adbff0dd3c5e687140848397604a"
dependencies = [
"arrow-buffer",
"bytes",
@@ -1431,7 +1382,7 @@ dependencies = [
"once_cell",
"ordered-float 4.2.0",
"percent-encoding",
- "prost 0.12.3",
+ "prost 0.12.4",
"schema",
"serde_json",
"sha2",
@@ -1553,8 +1504,8 @@ dependencies = [
"datafusion-common",
"paste",
"sqlparser",
- "strum 0.26.2",
- "strum_macros 0.26.2",
+ "strum",
+ "strum_macros",
]
[[package]]
@@ -1683,7 +1634,7 @@ dependencies = [
"datafusion-common",
"datafusion-expr",
"object_store",
- "prost 0.12.3",
+ "prost 0.12.4",
]
[[package]]
@@ -1702,7 +1653,7 @@ dependencies = [
[[package]]
name = "datafusion_util"
version = "0.1.0"
-source = "git+https://github.com/influxdata/influxdb3_core?rev=1d19543c8ef1fe9b3401f703cdcaba4d20db4e8c#1d19543c8ef1fe9b3401f703cdcaba4d20db4e8c"
+source = "git+https://github.com/influxdata/influxdb3_core?rev=b546e7f86ee9adbff0dd3c5e687140848397604a#b546e7f86ee9adbff0dd3c5e687140848397604a"
dependencies = [
"async-trait",
"datafusion",
@@ -1786,7 +1737,7 @@ dependencies = [
[[package]]
name = "dml"
version = "0.1.0"
-source = "git+https://github.com/influxdata/influxdb3_core?rev=1d19543c8ef1fe9b3401f703cdcaba4d20db4e8c#1d19543c8ef1fe9b3401f703cdcaba4d20db4e8c"
+source = "git+https://github.com/influxdata/influxdb3_core?rev=b546e7f86ee9adbff0dd3c5e687140848397604a#b546e7f86ee9adbff0dd3c5e687140848397604a"
dependencies = [
"arrow_util",
"data_types",
@@ -1841,9 +1792,9 @@ dependencies = [
[[package]]
name = "either"
-version = "1.10.0"
+version = "1.11.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "11157ac094ffbdde99aa67b23417ebdd801842852b500e395a45a9c0aac03e4a"
+checksum = "a47c1c47d2f5964e29c61246e81db715514cd532db6b5116a25ea3c03d6780a2"
dependencies = [
"serde",
]
@@ -1856,9 +1807,9 @@ checksum = "a357d28ed41a50f9c765dbfe56cbc04a64e53e5fc58ba79fbc34c10ef3df831f"
[[package]]
name = "encoding_rs"
-version = "0.8.33"
+version = "0.8.34"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7268b386296a025e474d5140678f75d6de9493ae55a5d709eeb9dd08149945e1"
+checksum = "b45de904aa0b010bce2ab45264d0631681847fa7b6f2eaa7dab7619943bc4f59"
dependencies = [
"cfg-if",
]
@@ -1879,15 +1830,6 @@ dependencies = [
"windows-sys 0.52.0",
]
-[[package]]
-name = "error-chain"
-version = "0.12.4"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2d2f06b9cac1506ece98fe3231e3cc9c4410ec3d5b1f24ae1c8946f0742cdefc"
-dependencies = [
- "version_check",
-]
-
[[package]]
name = "etcetera"
version = "0.8.0"
@@ -1907,29 +1849,50 @@ checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0"
[[package]]
name = "event-listener"
-version = "5.2.0"
+version = "4.0.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "67b215c49b2b248c855fb73579eb1f4f26c38ffdc12973e20e07b91d78d5646e"
+dependencies = [
+ "concurrent-queue",
+ "parking",
+ "pin-project-lite",
+]
+
+[[package]]
+name = "event-listener"
+version = "5.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2b5fb89194fa3cad959b833185b3063ba881dbfc7030680b314250779fb4cc91"
+checksum = "6d9944b8ca13534cdfb2800775f8dd4902ff3fc75a50101466decadfdf322a24"
dependencies = [
"concurrent-queue",
"parking",
"pin-project-lite",
]
+[[package]]
+name = "event-listener-strategy"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "958e4d70b6d5e81971bebec42271ec641e7ff4e170a6fa605f2b8a8b65cb97d3"
+dependencies = [
+ "event-listener 4.0.3",
+ "pin-project-lite",
+]
+
[[package]]
name = "event-listener-strategy"
version = "0.5.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "332f51cb23d20b0de8458b86580878211da09bcd4503cb579c225b3d124cabb3"
dependencies = [
- "event-listener 5.2.0",
+ "event-listener 5.3.0",
"pin-project-lite",
]
[[package]]
name = "executor"
version = "0.1.0"
-source = "git+https://github.com/influxdata/influxdb3_core?rev=1d19543c8ef1fe9b3401f703cdcaba4d20db4e8c#1d19543c8ef1fe9b3401f703cdcaba4d20db4e8c"
+source = "git+https://github.com/influxdata/influxdb3_core?rev=b546e7f86ee9adbff0dd3c5e687140848397604a#b546e7f86ee9adbff0dd3c5e687140848397604a"
dependencies = [
"futures",
"libc",
@@ -1954,9 +1917,9 @@ checksum = "658bd65b1cf4c852a3cc96f18a8ce7b5640f6b703f905c7d74532294c2a63984"
[[package]]
name = "fiat-crypto"
-version = "0.2.7"
+version = "0.2.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c007b1ae3abe1cb6f85a16305acd418b7ca6343b953633fee2b76d8f108b830f"
+checksum = "38793c55593b33412e3ae40c2c9781ffaa6f438f6f8c10f24e71846fbd7ae01e"
[[package]]
name = "filetime"
@@ -2017,7 +1980,7 @@ dependencies = [
[[package]]
name = "flightsql"
version = "0.1.0"
-source = "git+https://github.com/influxdata/influxdb3_core?rev=1d19543c8ef1fe9b3401f703cdcaba4d20db4e8c#1d19543c8ef1fe9b3401f703cdcaba4d20db4e8c"
+source = "git+https://github.com/influxdata/influxdb3_core?rev=b546e7f86ee9adbff0dd3c5e687140848397604a#b546e7f86ee9adbff0dd3c5e687140848397604a"
dependencies = [
"arrow",
"arrow-flight",
@@ -2030,21 +1993,12 @@ dependencies = [
"iox_query_params",
"observability_deps",
"once_cell",
- "prost 0.12.3",
+ "prost 0.12.4",
"snafu 0.8.2",
"tonic 0.10.2",
"workspace-hack",
]
-[[package]]
-name = "float-cmp"
-version = "0.9.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "98de4bbd547a563b716d8dfa9aad1cb19bfab00f4fa09a6a4ed21dbcf44ce9c4"
-dependencies = [
- "num-traits",
-]
-
[[package]]
name = "flume"
version = "0.11.0"
@@ -2147,7 +2101,7 @@ checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.58",
+ "syn 2.0.60",
]
[[package]]
@@ -2183,16 +2137,16 @@ dependencies = [
[[package]]
name = "generated_types"
version = "0.1.0"
-source = "git+https://github.com/influxdata/influxdb3_core?rev=1d19543c8ef1fe9b3401f703cdcaba4d20db4e8c#1d19543c8ef1fe9b3401f703cdcaba4d20db4e8c"
+source = "git+https://github.com/influxdata/influxdb3_core?rev=b546e7f86ee9adbff0dd3c5e687140848397604a#b546e7f86ee9adbff0dd3c5e687140848397604a"
dependencies = [
"observability_deps",
"once_cell",
"pbjson",
"pbjson-build",
"pbjson-types",
- "prost 0.12.3",
+ "prost 0.12.4",
"prost-build",
- "prost-types 0.12.3",
+ "prost-types 0.12.4",
"serde",
"tonic 0.10.2",
"tonic-build",
@@ -2212,9 +2166,9 @@ dependencies = [
[[package]]
name = "getrandom"
-version = "0.2.12"
+version = "0.2.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "190092ea657667030ac6a35e305e62fc4dd69fd98ac98631e5d3a2b1575a12b5"
+checksum = "94b22e06ecb0110981051723910cbf0b5f5e09a2062dd7663334ee79a9d1286c"
dependencies = [
"cfg-if",
"libc",
@@ -2254,9 +2208,9 @@ dependencies = [
[[package]]
name = "half"
-version = "2.4.0"
+version = "2.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b5eceaaeec696539ddaf7b333340f1af35a5aa87ae3e4f3ead0532f72affab2e"
+checksum = "6dd08c532ae367adf81c312a4580bc67f1d0fe8bc9c460520283f4c0ff277888"
dependencies = [
"bytemuck",
"cfg-if",
@@ -2549,7 +2503,7 @@ dependencies = [
[[package]]
name = "influxdb-line-protocol"
version = "1.0.0"
-source = "git+https://github.com/influxdata/influxdb3_core?rev=1d19543c8ef1fe9b3401f703cdcaba4d20db4e8c#1d19543c8ef1fe9b3401f703cdcaba4d20db4e8c"
+source = "git+https://github.com/influxdata/influxdb3_core?rev=b546e7f86ee9adbff0dd3c5e687140848397604a#b546e7f86ee9adbff0dd3c5e687140848397604a"
dependencies = [
"bytes",
"log",
@@ -2780,7 +2734,7 @@ dependencies = [
[[package]]
name = "influxdb_influxql_parser"
version = "0.1.0"
-source = "git+https://github.com/influxdata/influxdb3_core?rev=1d19543c8ef1fe9b3401f703cdcaba4d20db4e8c#1d19543c8ef1fe9b3401f703cdcaba4d20db4e8c"
+source = "git+https://github.com/influxdata/influxdb3_core?rev=b546e7f86ee9adbff0dd3c5e687140848397604a#b546e7f86ee9adbff0dd3c5e687140848397604a"
dependencies = [
"chrono",
"chrono-tz",
@@ -2796,7 +2750,7 @@ dependencies = [
[[package]]
name = "influxdb_iox_client"
version = "0.1.0"
-source = "git+https://github.com/influxdata/influxdb3_core?rev=1d19543c8ef1fe9b3401f703cdcaba4d20db4e8c#1d19543c8ef1fe9b3401f703cdcaba4d20db4e8c"
+source = "git+https://github.com/influxdata/influxdb3_core?rev=b546e7f86ee9adbff0dd3c5e687140848397604a#b546e7f86ee9adbff0dd3c5e687140848397604a"
dependencies = [
"arrow",
"arrow-flight",
@@ -2809,7 +2763,7 @@ dependencies = [
"generated_types",
"influxdb-line-protocol",
"iox_query_params",
- "prost 0.12.3",
+ "prost 0.12.4",
"rand",
"reqwest",
"schema",
@@ -2824,7 +2778,7 @@ dependencies = [
[[package]]
name = "ingester_query_grpc"
version = "0.1.0"
-source = "git+https://github.com/influxdata/influxdb3_core?rev=1d19543c8ef1fe9b3401f703cdcaba4d20db4e8c#1d19543c8ef1fe9b3401f703cdcaba4d20db4e8c"
+source = "git+https://github.com/influxdata/influxdb3_core?rev=b546e7f86ee9adbff0dd3c5e687140848397604a#b546e7f86ee9adbff0dd3c5e687140848397604a"
dependencies = [
"arrow",
"base64 0.22.0",
@@ -2836,7 +2790,7 @@ dependencies = [
"pbjson",
"pbjson-build",
"predicate",
- "prost 0.12.3",
+ "prost 0.12.4",
"prost-build",
"query_functions",
"serde",
@@ -2900,7 +2854,7 @@ checksum = "8bb03732005da905c88227371639bf1ad885cc712789c011c31c5fb3ab3ccf02"
[[package]]
name = "iox_catalog"
version = "0.1.0"
-source = "git+https://github.com/influxdata/influxdb3_core?rev=1d19543c8ef1fe9b3401f703cdcaba4d20db4e8c#1d19543c8ef1fe9b3401f703cdcaba4d20db4e8c"
+source = "git+https://github.com/influxdata/influxdb3_core?rev=b546e7f86ee9adbff0dd3c5e687140848397604a#b546e7f86ee9adbff0dd3c5e687140848397604a"
dependencies = [
"async-trait",
"backoff 0.1.0",
@@ -2935,7 +2889,7 @@ dependencies = [
[[package]]
name = "iox_http"
version = "0.1.0"
-source = "git+https://github.com/influxdata/influxdb3_core?rev=1d19543c8ef1fe9b3401f703cdcaba4d20db4e8c#1d19543c8ef1fe9b3401f703cdcaba4d20db4e8c"
+source = "git+https://github.com/influxdata/influxdb3_core?rev=b546e7f86ee9adbff0dd3c5e687140848397604a#b546e7f86ee9adbff0dd3c5e687140848397604a"
dependencies = [
"async-trait",
"authz",
@@ -2951,7 +2905,7 @@ dependencies = [
[[package]]
name = "iox_query"
version = "0.1.0"
-source = "git+https://github.com/influxdata/influxdb3_core?rev=1d19543c8ef1fe9b3401f703cdcaba4d20db4e8c#1d19543c8ef1fe9b3401f703cdcaba4d20db4e8c"
+source = "git+https://github.com/influxdata/influxdb3_core?rev=b546e7f86ee9adbff0dd3c5e687140848397604a#b546e7f86ee9adbff0dd3c5e687140848397604a"
dependencies = [
"arrow",
"arrow_util",
@@ -2989,7 +2943,7 @@ dependencies = [
[[package]]
name = "iox_query_influxql"
version = "0.1.0"
-source = "git+https://github.com/influxdata/influxdb3_core?rev=1d19543c8ef1fe9b3401f703cdcaba4d20db4e8c#1d19543c8ef1fe9b3401f703cdcaba4d20db4e8c"
+source = "git+https://github.com/influxdata/influxdb3_core?rev=b546e7f86ee9adbff0dd3c5e687140848397604a#b546e7f86ee9adbff0dd3c5e687140848397604a"
dependencies = [
"arrow",
"chrono-tz",
@@ -3022,7 +2976,7 @@ dependencies = [
[[package]]
name = "iox_query_params"
version = "0.1.0"
-source = "git+https://github.com/influxdata/influxdb3_core?rev=1d19543c8ef1fe9b3401f703cdcaba4d20db4e8c#1d19543c8ef1fe9b3401f703cdcaba4d20db4e8c"
+source = "git+https://github.com/influxdata/influxdb3_core?rev=b546e7f86ee9adbff0dd3c5e687140848397604a#b546e7f86ee9adbff0dd3c5e687140848397604a"
dependencies = [
"arrow",
"datafusion",
@@ -3037,7 +2991,7 @@ dependencies = [
[[package]]
name = "iox_time"
version = "0.1.0"
-source = "git+https://github.com/influxdata/influxdb3_core?rev=1d19543c8ef1fe9b3401f703cdcaba4d20db4e8c#1d19543c8ef1fe9b3401f703cdcaba4d20db4e8c"
+source = "git+https://github.com/influxdata/influxdb3_core?rev=b546e7f86ee9adbff0dd3c5e687140848397604a#b546e7f86ee9adbff0dd3c5e687140848397604a"
dependencies = [
"chrono",
"parking_lot",
@@ -3048,7 +3002,7 @@ dependencies = [
[[package]]
name = "ioxd_common"
version = "0.1.0"
-source = "git+https://github.com/influxdata/influxdb3_core?rev=1d19543c8ef1fe9b3401f703cdcaba4d20db4e8c#1d19543c8ef1fe9b3401f703cdcaba4d20db4e8c"
+source = "git+https://github.com/influxdata/influxdb3_core?rev=b546e7f86ee9adbff0dd3c5e687140848397604a#b546e7f86ee9adbff0dd3c5e687140848397604a"
dependencies = [
"async-trait",
"authz",
@@ -3141,9 +3095,9 @@ checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b"
[[package]]
name = "jobserver"
-version = "0.1.28"
+version = "0.1.31"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ab46a6e9526ddef3ae7f787c06f0f2600639ba80ea3eade3d8e670a2230f51d6"
+checksum = "d2b099aaa34a9751c5bf0878add70444e1ed2dd73f347be99003d4577277de6e"
dependencies = [
"libc",
]
@@ -3293,7 +3247,7 @@ dependencies = [
"proc-macro2",
"quote",
"serde_json",
- "syn 2.0.58",
+ "syn 2.0.60",
]
[[package]]
@@ -3322,15 +3276,6 @@ dependencies = [
"tracing",
]
-[[package]]
-name = "lalrpop-util"
-version = "0.20.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "507460a910eb7b32ee961886ff48539633b788a36b65692b95f225b844c82553"
-dependencies = [
- "regex-automata 0.4.6",
-]
-
[[package]]
name = "lazy_static"
version = "1.4.0"
@@ -3454,14 +3399,11 @@ name = "log"
version = "0.4.21"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "90ed8c1e510134f979dbc4f070f87d4313098b704861a105fe34231c70a3901c"
-dependencies = [
- "value-bag",
-]
[[package]]
name = "logfmt"
version = "0.1.0"
-source = "git+https://github.com/influxdata/influxdb3_core?rev=1d19543c8ef1fe9b3401f703cdcaba4d20db4e8c#1d19543c8ef1fe9b3401f703cdcaba4d20db4e8c"
+source = "git+https://github.com/influxdata/influxdb3_core?rev=b546e7f86ee9adbff0dd3c5e687140848397604a#b546e7f86ee9adbff0dd3c5e687140848397604a"
dependencies = [
"observability_deps",
"tracing-subscriber",
@@ -3531,7 +3473,7 @@ dependencies = [
[[package]]
name = "metric"
version = "0.1.0"
-source = "git+https://github.com/influxdata/influxdb3_core?rev=1d19543c8ef1fe9b3401f703cdcaba4d20db4e8c#1d19543c8ef1fe9b3401f703cdcaba4d20db4e8c"
+source = "git+https://github.com/influxdata/influxdb3_core?rev=b546e7f86ee9adbff0dd3c5e687140848397604a#b546e7f86ee9adbff0dd3c5e687140848397604a"
dependencies = [
"parking_lot",
"workspace-hack",
@@ -3540,7 +3482,7 @@ dependencies = [
[[package]]
name = "metric_exporters"
version = "0.1.0"
-source = "git+https://github.com/influxdata/influxdb3_core?rev=1d19543c8ef1fe9b3401f703cdcaba4d20db4e8c#1d19543c8ef1fe9b3401f703cdcaba4d20db4e8c"
+source = "git+https://github.com/influxdata/influxdb3_core?rev=b546e7f86ee9adbff0dd3c5e687140848397604a#b546e7f86ee9adbff0dd3c5e687140848397604a"
dependencies = [
"metric",
"observability_deps",
@@ -3601,21 +3543,21 @@ dependencies = [
[[package]]
name = "moka"
-version = "0.12.5"
+version = "0.12.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b1911e88d5831f748a4097a43862d129e3c6fca831eecac9b8db6d01d93c9de2"
+checksum = "9e0d88686dc561d743b40de8269b26eaf0dc58781bde087b0984646602021d08"
dependencies = [
"async-lock",
"async-trait",
"crossbeam-channel",
"crossbeam-epoch",
"crossbeam-utils",
+ "event-listener 5.3.0",
"futures-util",
"once_cell",
"parking_lot",
"quanta",
"rustc_version",
- "skeptic",
"smallvec",
"tagptr",
"thiserror",
@@ -3636,9 +3578,9 @@ dependencies = [
[[package]]
name = "multimap"
-version = "0.8.3"
+version = "0.10.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e5ce46fe64a9d73be07dcbe690a38ce1b293be448fd8ce1e6c1b8062c9f72c6a"
+checksum = "defc4c55412d89136f966bbb339008b474350e5e6e78d2714439c386b3137a03"
[[package]]
name = "murmur3"
@@ -3649,7 +3591,7 @@ checksum = "9252111cf132ba0929b6f8e030cac2a24b507f3a4d6db6fb2896f27b354c714b"
[[package]]
name = "mutable_batch"
version = "0.1.0"
-source = "git+https://github.com/influxdata/influxdb3_core?rev=1d19543c8ef1fe9b3401f703cdcaba4d20db4e8c#1d19543c8ef1fe9b3401f703cdcaba4d20db4e8c"
+source = "git+https://github.com/influxdata/influxdb3_core?rev=b546e7f86ee9adbff0dd3c5e687140848397604a#b546e7f86ee9adbff0dd3c5e687140848397604a"
dependencies = [
"arrow",
"arrow_util",
@@ -3665,7 +3607,7 @@ dependencies = [
[[package]]
name = "mutable_batch_lp"
version = "0.1.0"
-source = "git+https://github.com/influxdata/influxdb3_core?rev=1d19543c8ef1fe9b3401f703cdcaba4d20db4e8c#1d19543c8ef1fe9b3401f703cdcaba4d20db4e8c"
+source = "git+https://github.com/influxdata/influxdb3_core?rev=b546e7f86ee9adbff0dd3c5e687140848397604a#b546e7f86ee9adbff0dd3c5e687140848397604a"
dependencies = [
"hashbrown 0.14.3",
"influxdb-line-protocol",
@@ -3678,7 +3620,7 @@ dependencies = [
[[package]]
name = "mutable_batch_pb"
version = "0.1.0"
-source = "git+https://github.com/influxdata/influxdb3_core?rev=1d19543c8ef1fe9b3401f703cdcaba4d20db4e8c#1d19543c8ef1fe9b3401f703cdcaba4d20db4e8c"
+source = "git+https://github.com/influxdata/influxdb3_core?rev=b546e7f86ee9adbff0dd3c5e687140848397604a#b546e7f86ee9adbff0dd3c5e687140848397604a"
dependencies = [
"arrow_util",
"dml",
@@ -3732,12 +3674,6 @@ dependencies = [
"delegate",
]
-[[package]]
-name = "normalize-line-endings"
-version = "0.3.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "61807f77802ff30975e01f4f071c8ba10c022052f98b3294119f3e615d13e5be"
-
[[package]]
name = "notify"
version = "6.1.1"
@@ -3778,9 +3714,9 @@ dependencies = [
[[package]]
name = "num"
-version = "0.4.1"
+version = "0.4.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b05180d69e3da0e530ba2a1dae5110317e49e3b7f3d41be227dc5f92e49ee7af"
+checksum = "3135b08af27d103b0a51f2ae0f8632117b7b185ccf931445affa8df530576a41"
dependencies = [
"num-bigint",
"num-complex",
@@ -3919,7 +3855,7 @@ dependencies = [
"rand",
"reqwest",
"ring",
- "rustls-pemfile 2.1.1",
+ "rustls-pemfile 2.1.2",
"serde",
"serde_json",
"snafu 0.7.5",
@@ -3932,7 +3868,7 @@ dependencies = [
[[package]]
name = "observability_deps"
version = "0.1.0"
-source = "git+https://github.com/influxdata/influxdb3_core?rev=1d19543c8ef1fe9b3401f703cdcaba4d20db4e8c#1d19543c8ef1fe9b3401f703cdcaba4d20db4e8c"
+source = "git+https://github.com/influxdata/influxdb3_core?rev=b546e7f86ee9adbff0dd3c5e687140848397604a#b546e7f86ee9adbff0dd3c5e687140848397604a"
dependencies = [
"tracing",
"workspace-hack",
@@ -3980,7 +3916,7 @@ checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39"
[[package]]
name = "panic_logging"
version = "0.1.0"
-source = "git+https://github.com/influxdata/influxdb3_core?rev=1d19543c8ef1fe9b3401f703cdcaba4d20db4e8c#1d19543c8ef1fe9b3401f703cdcaba4d20db4e8c"
+source = "git+https://github.com/influxdata/influxdb3_core?rev=b546e7f86ee9adbff0dd3c5e687140848397604a#b546e7f86ee9adbff0dd3c5e687140848397604a"
dependencies = [
"metric",
"observability_deps",
@@ -4054,7 +3990,7 @@ dependencies = [
[[package]]
name = "parquet_cache"
version = "0.1.0"
-source = "git+https://github.com/influxdata/influxdb3_core?rev=1d19543c8ef1fe9b3401f703cdcaba4d20db4e8c#1d19543c8ef1fe9b3401f703cdcaba4d20db4e8c"
+source = "git+https://github.com/influxdata/influxdb3_core?rev=b546e7f86ee9adbff0dd3c5e687140848397604a#b546e7f86ee9adbff0dd3c5e687140848397604a"
dependencies = [
"arc-swap",
"async-channel",
@@ -4095,7 +4031,7 @@ dependencies = [
[[package]]
name = "parquet_file"
version = "0.1.0"
-source = "git+https://github.com/influxdata/influxdb3_core?rev=1d19543c8ef1fe9b3401f703cdcaba4d20db4e8c#1d19543c8ef1fe9b3401f703cdcaba4d20db4e8c"
+source = "git+https://github.com/influxdata/influxdb3_core?rev=b546e7f86ee9adbff0dd3c5e687140848397604a#b546e7f86ee9adbff0dd3c5e687140848397604a"
dependencies = [
"arrow",
"base64 0.22.0",
@@ -4110,7 +4046,7 @@ dependencies = [
"observability_deps",
"parquet",
"pbjson-types",
- "prost 0.12.3",
+ "prost 0.12.4",
"schema",
"snafu 0.8.2",
"thiserror",
@@ -4154,8 +4090,8 @@ checksum = "2580e33f2292d34be285c5bc3dba5259542b083cfad6037b6d70345f24dcb735"
dependencies = [
"heck 0.4.1",
"itertools 0.11.0",
- "prost 0.12.3",
- "prost-types 0.12.3",
+ "prost 0.12.4",
+ "prost-types 0.12.4",
]
[[package]]
@@ -4168,18 +4104,18 @@ dependencies = [
"chrono",
"pbjson",
"pbjson-build",
- "prost 0.12.3",
+ "prost 0.12.4",
"prost-build",
"serde",
]
[[package]]
name = "pem"
-version = "3.0.3"
+version = "3.0.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1b8fcc794035347fb64beda2d3b462595dd2753e3f268d89c5aae77e8cf2c310"
+checksum = "8e459365e590736a54c3fa561947c84837534b8e9af6fc5bf781307e82658fae"
dependencies = [
- "base64 0.21.7",
+ "base64 0.22.0",
"serde",
]
@@ -4229,7 +4165,7 @@ dependencies = [
"pest_meta",
"proc-macro2",
"quote",
- "syn 2.0.58",
+ "syn 2.0.60",
]
[[package]]
@@ -4308,7 +4244,7 @@ checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.58",
+ "syn 2.0.60",
]
[[package]]
@@ -4371,9 +4307,9 @@ dependencies = [
"nix 0.26.4",
"once_cell",
"parking_lot",
- "prost 0.12.3",
+ "prost 0.12.4",
"prost-build",
- "prost-derive 0.12.3",
+ "prost-derive 0.12.4",
"protobuf",
"sha2",
"smallvec",
@@ -4391,7 +4327,7 @@ checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de"
[[package]]
name = "predicate"
version = "0.1.0"
-source = "git+https://github.com/influxdata/influxdb3_core?rev=1d19543c8ef1fe9b3401f703cdcaba4d20db4e8c#1d19543c8ef1fe9b3401f703cdcaba4d20db4e8c"
+source = "git+https://github.com/influxdata/influxdb3_core?rev=b546e7f86ee9adbff0dd3c5e687140848397604a#b546e7f86ee9adbff0dd3c5e687140848397604a"
dependencies = [
"arrow",
"chrono",
@@ -4415,10 +4351,7 @@ checksum = "68b87bfd4605926cdfefc1c3b5f8fe560e3feca9d5552cf68c466d3d8236c7e8"
dependencies = [
"anstyle",
"difflib",
- "float-cmp",
- "normalize-line-endings",
"predicates-core",
- "regex",
]
[[package]]
@@ -4449,19 +4382,19 @@ dependencies = [
[[package]]
name = "prettyplease"
-version = "0.2.17"
+version = "0.2.19"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8d3928fb5db768cb86f891ff014f0144589297e3c6a1aba6ed7cecfdace270c7"
+checksum = "5ac2cf0f2e4f42b49f5ffd07dae8d746508ef7526c13940e5f524012ae6c6550"
dependencies = [
"proc-macro2",
- "syn 2.0.58",
+ "syn 2.0.60",
]
[[package]]
name = "proc-macro2"
-version = "1.0.79"
+version = "1.0.81"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e835ff2298f5721608eb1a980ecaee1aef2c132bf95ecc026a11b7bf3c01c02e"
+checksum = "3d1597b0c024618f09a9c3b8655b7e430397a36d23fdafec26d6965e9eec3eba"
dependencies = [
"unicode-ident",
]
@@ -4486,8 +4419,6 @@ version = "1.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "31b476131c3c86cb68032fdc5cb6d5a1045e3e42d96b69fa599fd77701e1f5bf"
dependencies = [
- "bit-set",
- "bit-vec",
"bitflags 2.5.0",
"lazy_static",
"num-traits",
@@ -4495,8 +4426,6 @@ dependencies = [
"rand_chacha",
"rand_xorshift",
"regex-syntax 0.8.3",
- "rusty-fork",
- "tempfile",
"unarray",
]
@@ -4512,34 +4441,33 @@ dependencies = [
[[package]]
name = "prost"
-version = "0.12.3"
+version = "0.12.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "146c289cda302b98a28d40c8b3b90498d6e526dd24ac2ecea73e4e491685b94a"
+checksum = "d0f5d036824e4761737860779c906171497f6d55681139d8312388f8fe398922"
dependencies = [
"bytes",
- "prost-derive 0.12.3",
+ "prost-derive 0.12.4",
]
[[package]]
name = "prost-build"
-version = "0.12.3"
+version = "0.12.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c55e02e35260070b6f716a2423c2ff1c3bb1642ddca6f99e1f26d06268a0e2d2"
+checksum = "80b776a1b2dc779f5ee0641f8ade0125bc1298dd41a9a0c16d8bd57b42d222b1"
dependencies = [
"bytes",
- "heck 0.4.1",
- "itertools 0.11.0",
+ "heck 0.5.0",
+ "itertools 0.12.1",
"log",
"multimap",
"once_cell",
"petgraph",
"prettyplease",
- "prost 0.12.3",
- "prost-types 0.12.3",
+ "prost 0.12.4",
+ "prost-types 0.12.4",
"regex",
- "syn 2.0.58",
+ "syn 2.0.60",
"tempfile",
- "which",
]
[[package]]
@@ -4557,15 +4485,15 @@ dependencies = [
[[package]]
name = "prost-derive"
-version = "0.12.3"
+version = "0.12.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "efb6c9a1dd1def8e2124d17e83a20af56f1570d6c2d2bd9e266ccb768df3840e"
+checksum = "19de2de2a00075bf566bee3bd4db014b11587e84184d3f7a791bc17f1a8e9e48"
dependencies = [
"anyhow",
- "itertools 0.11.0",
+ "itertools 0.12.1",
"proc-macro2",
"quote",
- "syn 2.0.58",
+ "syn 2.0.60",
]
[[package]]
@@ -4579,11 +4507,11 @@ dependencies = [
[[package]]
name = "prost-types"
-version = "0.12.3"
+version = "0.12.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "193898f59edcf43c26227dcd4c8427f00d99d61e95dcde58dabd49fa291d470e"
+checksum = "3235c33eb02c1f1e212abdbe34c78b264b038fb58ca612664343271e36e55ffe"
dependencies = [
- "prost 0.12.3",
+ "prost 0.12.4",
]
[[package]]
@@ -4592,17 +4520,6 @@ version = "2.28.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "106dd99e98437432fed6519dedecfade6a06a73bb7b2a1e019fdd2bee5778d94"
-[[package]]
-name = "pulldown-cmark"
-version = "0.9.6"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "57206b407293d2bcd3af849ce869d52068623f19e1b5ff8e8778e3309439682b"
-dependencies = [
- "bitflags 2.5.0",
- "memchr",
- "unicase",
-]
-
[[package]]
name = "quanta"
version = "0.12.3"
@@ -4621,7 +4538,7 @@ dependencies = [
[[package]]
name = "query_functions"
version = "0.1.0"
-source = "git+https://github.com/influxdata/influxdb3_core?rev=1d19543c8ef1fe9b3401f703cdcaba4d20db4e8c#1d19543c8ef1fe9b3401f703cdcaba4d20db4e8c"
+source = "git+https://github.com/influxdata/influxdb3_core?rev=b546e7f86ee9adbff0dd3c5e687140848397604a#b546e7f86ee9adbff0dd3c5e687140848397604a"
dependencies = [
"arrow",
"chrono",
@@ -4634,12 +4551,6 @@ dependencies = [
"workspace-hack",
]
-[[package]]
-name = "quick-error"
-version = "1.2.3"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0"
-
[[package]]
name = "quick-xml"
version = "0.26.0"
@@ -4661,9 +4572,9 @@ dependencies = [
[[package]]
name = "quote"
-version = "1.0.35"
+version = "1.0.36"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "291ec9ab5efd934aaf503a6466c5d5251535d108ee747472c3977cc5acc868ef"
+checksum = "0fa76aaf39101c457836aec0ce2316dbdc3ab723cdda1c6bd4e6ad4208acaca7"
dependencies = [
"proc-macro2",
]
@@ -4894,9 +4805,9 @@ dependencies = [
[[package]]
name = "rustix"
-version = "0.38.32"
+version = "0.38.34"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "65e04861e65f21776e67888bfbea442b3642beaa0138fdb1dd7a84a52dffdb89"
+checksum = "70dc5ec042f7a43c4a73241207cecc9873a06d45debb38b329f8541d85c2730f"
dependencies = [
"bitflags 2.5.0",
"errno",
@@ -4907,9 +4818,9 @@ dependencies = [
[[package]]
name = "rustls"
-version = "0.21.10"
+version = "0.21.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f9d5a6813c0759e4609cd494e8e725babae6a2ca7b62a5536a13daaec6fcb7ba"
+checksum = "7fecbfb7b1444f477b345853b1fce097a2c6fb637b2bfb87e6bc5db0f043fae4"
dependencies = [
"log",
"ring",
@@ -4940,11 +4851,11 @@ dependencies = [
[[package]]
name = "rustls-pemfile"
-version = "2.1.1"
+version = "2.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f48172685e6ff52a556baa527774f61fcaa884f59daf3375c62a3f1cd2549dab"
+checksum = "29993a25686778eb88d4189742cd713c9bce943bc54251a33509dc63cbacf73d"
dependencies = [
- "base64 0.21.7",
+ "base64 0.22.0",
"rustls-pki-types",
]
@@ -4966,21 +4877,9 @@ dependencies = [
[[package]]
name = "rustversion"
-version = "1.0.14"
+version = "1.0.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7ffc183a10b4478d04cbbbfc96d0873219d962dd5accaff2ffbd4ceb7df837f4"
-
-[[package]]
-name = "rusty-fork"
-version = "0.3.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "cb3dcc6e454c328bb824492db107ab7c0ae8fcffe4ad210136ef014458c1bc4f"
-dependencies = [
- "fnv",
- "quick-error",
- "tempfile",
- "wait-timeout",
-]
+checksum = "80af6f9131f277a45a3fba6ce8e2258037bb0477a67e610d3c1fe046ab31de47"
[[package]]
name = "ryu"
@@ -5009,7 +4908,7 @@ dependencies = [
[[package]]
name = "schema"
version = "0.1.0"
-source = "git+https://github.com/influxdata/influxdb3_core?rev=1d19543c8ef1fe9b3401f703cdcaba4d20db4e8c#1d19543c8ef1fe9b3401f703cdcaba4d20db4e8c"
+source = "git+https://github.com/influxdata/influxdb3_core?rev=b546e7f86ee9adbff0dd3c5e687140848397604a#b546e7f86ee9adbff0dd3c5e687140848397604a"
dependencies = [
"arrow",
"hashbrown 0.14.3",
@@ -5098,9 +4997,6 @@ name = "semver"
version = "1.0.22"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "92d43fe69e652f3df9bdc2b85b2854a0825b86e4fb76bc44d945137d053639ca"
-dependencies = [
- "serde",
-]
[[package]]
name = "seq-macro"
@@ -5110,9 +5006,9 @@ checksum = "a3f0bf26fd526d2a95683cd0f87bf103b8539e2ca1ef48ce002d67aad59aa0b4"
[[package]]
name = "serde"
-version = "1.0.197"
+version = "1.0.198"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3fb1c873e1b9b056a4dc4c0c198b24c3ffa059243875552b2bd0933b1aee4ce2"
+checksum = "9846a40c979031340571da2545a4e5b7c4163bdae79b301d5f86d03979451fcc"
dependencies = [
"serde_derive",
]
@@ -5145,13 +5041,13 @@ dependencies = [
[[package]]
name = "serde_derive"
-version = "1.0.197"
+version = "1.0.198"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7eb0b34b42edc17f6b7cac84a52a1c5f0e1bb2227e997ca9011ea3dd34e8610b"
+checksum = "e88edab869b01783ba905e7d0153f9fc1a6505a96e4ad3018011eedb838566d9"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.58",
+ "syn 2.0.60",
]
[[package]]
@@ -5167,9 +5063,9 @@ dependencies = [
[[package]]
name = "serde_json"
-version = "1.0.115"
+version = "1.0.116"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "12dc5c46daa8e9fdf4f5e71b6cf9a53f2487da0e86e55808e2d35539666497dd"
+checksum = "3e17db7126d17feb94eb3fad46bf1a96b034e8aacbc2e775fe81505f8b0b2813"
dependencies = [
"itoa",
"ryu",
@@ -5204,7 +5100,7 @@ dependencies = [
[[package]]
name = "service_common"
version = "0.1.0"
-source = "git+https://github.com/influxdata/influxdb3_core?rev=1d19543c8ef1fe9b3401f703cdcaba4d20db4e8c#1d19543c8ef1fe9b3401f703cdcaba4d20db4e8c"
+source = "git+https://github.com/influxdata/influxdb3_core?rev=b546e7f86ee9adbff0dd3c5e687140848397604a#b546e7f86ee9adbff0dd3c5e687140848397604a"
dependencies = [
"arrow",
"datafusion",
@@ -5216,7 +5112,7 @@ dependencies = [
[[package]]
name = "service_grpc_flight"
version = "0.1.0"
-source = "git+https://github.com/influxdata/influxdb3_core?rev=1d19543c8ef1fe9b3401f703cdcaba4d20db4e8c#1d19543c8ef1fe9b3401f703cdcaba4d20db4e8c"
+source = "git+https://github.com/influxdata/influxdb3_core?rev=b546e7f86ee9adbff0dd3c5e687140848397604a#b546e7f86ee9adbff0dd3c5e687140848397604a"
dependencies = [
"arrow",
"arrow-flight",
@@ -5231,7 +5127,7 @@ dependencies = [
"iox_query_influxql",
"iox_query_params",
"observability_deps",
- "prost 0.12.3",
+ "prost 0.12.4",
"serde",
"serde_json",
"service_common",
@@ -5248,7 +5144,7 @@ dependencies = [
[[package]]
name = "service_grpc_testing"
version = "0.1.0"
-source = "git+https://github.com/influxdata/influxdb3_core?rev=1d19543c8ef1fe9b3401f703cdcaba4d20db4e8c#1d19543c8ef1fe9b3401f703cdcaba4d20db4e8c"
+source = "git+https://github.com/influxdata/influxdb3_core?rev=b546e7f86ee9adbff0dd3c5e687140848397604a#b546e7f86ee9adbff0dd3c5e687140848397604a"
dependencies = [
"generated_types",
"observability_deps",
@@ -5289,9 +5185,9 @@ dependencies = [
[[package]]
name = "signal-hook-registry"
-version = "1.4.1"
+version = "1.4.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d8229b473baa5980ac72ef434c4415e70c4b5e71b423043adb4ba059f89c99a1"
+checksum = "a9e9e0b4211b72e7b8b6e85c807d36c212bdb33ea8587f7569562a84df5465b1"
dependencies = [
"libc",
]
@@ -5324,21 +5220,6 @@ version = "1.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "56199f7ddabf13fe5074ce809e7d3f42b42ae711800501b5b16ea82ad029c39d"
-[[package]]
-name = "skeptic"
-version = "0.13.7"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "16d23b015676c90a0f01c197bfdc786c20342c73a0afdda9025adb0bc42940a8"
-dependencies = [
- "bytecount",
- "cargo_metadata",
- "error-chain",
- "glob",
- "pulldown-cmark",
- "tempfile",
- "walkdir",
-]
-
[[package]]
name = "slab"
version = "0.4.9"
@@ -5394,7 +5275,7 @@ dependencies = [
"heck 0.4.1",
"proc-macro2",
"quote",
- "syn 2.0.58",
+ "syn 2.0.60",
]
[[package]]
@@ -5467,7 +5348,7 @@ checksum = "01b2e185515564f15375f593fb966b5718bc624ba77fe49fa4616ad619690554"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.58",
+ "syn 2.0.60",
]
[[package]]
@@ -5529,7 +5410,7 @@ dependencies = [
[[package]]
name = "sqlx-hotswap-pool"
version = "0.1.0"
-source = "git+https://github.com/influxdata/influxdb3_core?rev=1d19543c8ef1fe9b3401f703cdcaba4d20db4e8c#1d19543c8ef1fe9b3401f703cdcaba4d20db4e8c"
+source = "git+https://github.com/influxdata/influxdb3_core?rev=b546e7f86ee9adbff0dd3c5e687140848397604a#b546e7f86ee9adbff0dd3c5e687140848397604a"
dependencies = [
"either",
"futures",
@@ -5723,32 +5604,13 @@ version = "0.11.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f"
-[[package]]
-name = "strum"
-version = "0.25.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "290d54ea6f91c969195bdbcd7442c8c2a2ba87da8bf60a7ee86a235d4bc1e125"
-
[[package]]
name = "strum"
version = "0.26.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5d8cec3501a5194c432b2b7976db6b7d10ec95c253208b45f83f7136aa985e29"
dependencies = [
- "strum_macros 0.26.2",
-]
-
-[[package]]
-name = "strum_macros"
-version = "0.25.3"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "23dc1fa9ac9c169a78ba62f0b841814b7abae11bdd047b9c58f893439e309ea0"
-dependencies = [
- "heck 0.4.1",
- "proc-macro2",
- "quote",
- "rustversion",
- "syn 2.0.58",
+ "strum_macros",
]
[[package]]
@@ -5761,7 +5623,7 @@ dependencies = [
"proc-macro2",
"quote",
"rustversion",
- "syn 2.0.58",
+ "syn 2.0.60",
]
[[package]]
@@ -5806,9 +5668,9 @@ dependencies = [
[[package]]
name = "syn"
-version = "2.0.58"
+version = "2.0.60"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "44cfb93f38070beee36b3fef7d4f5a16f27751d94b187b666a5cc5e9b0d30687"
+checksum = "909518bc7b1c9b779f1bbf07f2929d35af9f0f37e47c6e9ef7f9dddc1e1821f3"
dependencies = [
"proc-macro2",
"quote",
@@ -5823,9 +5685,9 @@ checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160"
[[package]]
name = "sysinfo"
-version = "0.30.8"
+version = "0.30.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4b1a378e48fb3ce3a5cf04359c456c9c98ff689bcf1c1bc6e6a31f247686f275"
+checksum = "87341a165d73787554941cd5ef55ad728011566fe714e987d1b976c15dbc3a83"
dependencies = [
"cfg-if",
"core-foundation-sys",
@@ -5884,7 +5746,7 @@ checksum = "3369f5ac52d5eb6ab48c6b4ffdc8efbcad6b89c765749064ba298f2c68a16a76"
[[package]]
name = "test_helpers"
version = "0.1.0"
-source = "git+https://github.com/influxdata/influxdb3_core?rev=1d19543c8ef1fe9b3401f703cdcaba4d20db4e8c#1d19543c8ef1fe9b3401f703cdcaba4d20db4e8c"
+source = "git+https://github.com/influxdata/influxdb3_core?rev=b546e7f86ee9adbff0dd3c5e687140848397604a#b546e7f86ee9adbff0dd3c5e687140848397604a"
dependencies = [
"async-trait",
"dotenvy",
@@ -5900,7 +5762,7 @@ dependencies = [
[[package]]
name = "test_helpers_end_to_end"
version = "0.1.0"
-source = "git+https://github.com/influxdata/influxdb3_core?rev=1d19543c8ef1fe9b3401f703cdcaba4d20db4e8c#1d19543c8ef1fe9b3401f703cdcaba4d20db4e8c"
+source = "git+https://github.com/influxdata/influxdb3_core?rev=b546e7f86ee9adbff0dd3c5e687140848397604a#b546e7f86ee9adbff0dd3c5e687140848397604a"
dependencies = [
"arrow",
"arrow-flight",
@@ -5925,7 +5787,7 @@ dependencies = [
"observability_deps",
"once_cell",
"parking_lot",
- "prost 0.12.3",
+ "prost 0.12.4",
"rand",
"regex",
"reqwest",
@@ -5942,22 +5804,22 @@ dependencies = [
[[package]]
name = "thiserror"
-version = "1.0.58"
+version = "1.0.59"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "03468839009160513471e86a034bb2c5c0e4baae3b43f79ffc55c4a5427b3297"
+checksum = "f0126ad08bff79f29fc3ae6a55cc72352056dfff61e3ff8bb7129476d44b23aa"
dependencies = [
"thiserror-impl",
]
[[package]]
name = "thiserror-impl"
-version = "1.0.58"
+version = "1.0.59"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c61f3ba182994efc43764a46c018c347bc492c79f024e705f46567b418f6d4f7"
+checksum = "d1cd413b5d558b4c5bf3680e324a6fa5014e7b7c067a51e69dbdf47eb7148b66"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.58",
+ "syn 2.0.60",
]
[[package]]
@@ -6075,7 +5937,7 @@ checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.58",
+ "syn 2.0.60",
]
[[package]]
@@ -6107,7 +5969,6 @@ checksum = "5419f34732d9eb6ee4c3578b7989078579b7f039cbbb9ca2c4da015749371e15"
dependencies = [
"bytes",
"futures-core",
- "futures-io",
"futures-sink",
"pin-project-lite",
"slab",
@@ -6118,7 +5979,7 @@ dependencies = [
[[package]]
name = "tokio_metrics_bridge"
version = "0.1.0"
-source = "git+https://github.com/influxdata/influxdb3_core?rev=1d19543c8ef1fe9b3401f703cdcaba4d20db4e8c#1d19543c8ef1fe9b3401f703cdcaba4d20db4e8c"
+source = "git+https://github.com/influxdata/influxdb3_core?rev=b546e7f86ee9adbff0dd3c5e687140848397604a#b546e7f86ee9adbff0dd3c5e687140848397604a"
dependencies = [
"metric",
"parking_lot",
@@ -6129,7 +5990,7 @@ dependencies = [
[[package]]
name = "tokio_watchdog"
version = "0.1.0"
-source = "git+https://github.com/influxdata/influxdb3_core?rev=1d19543c8ef1fe9b3401f703cdcaba4d20db4e8c#1d19543c8ef1fe9b3401f703cdcaba4d20db4e8c"
+source = "git+https://github.com/influxdata/influxdb3_core?rev=b546e7f86ee9adbff0dd3c5e687140848397604a#b546e7f86ee9adbff0dd3c5e687140848397604a"
dependencies = [
"metric",
"observability_deps",
@@ -6183,7 +6044,7 @@ dependencies = [
"hyper-timeout",
"percent-encoding",
"pin-project",
- "prost 0.12.3",
+ "prost 0.12.4",
"rustls",
"rustls-native-certs",
"rustls-pemfile 1.0.4",
@@ -6206,7 +6067,7 @@ dependencies = [
"proc-macro2",
"prost-build",
"quote",
- "syn 2.0.58",
+ "syn 2.0.60",
]
[[package]]
@@ -6216,7 +6077,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f80db390246dfb46553481f6024f0082ba00178ea495dbb99e70ba9a4fafb5e1"
dependencies = [
"async-stream",
- "prost 0.12.3",
+ "prost 0.12.4",
"tokio",
"tokio-stream",
"tonic 0.10.2",
@@ -6228,8 +6089,8 @@ version = "0.10.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3fa37c513df1339d197f4ba21d28c918b9ef1ac1768265f11ecb6b7f1cba1b76"
dependencies = [
- "prost 0.12.3",
- "prost-types 0.12.3",
+ "prost 0.12.4",
+ "prost-types 0.12.4",
"tokio",
"tokio-stream",
"tonic 0.10.2",
@@ -6291,7 +6152,7 @@ checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52"
[[package]]
name = "tower_trailer"
version = "0.1.0"
-source = "git+https://github.com/influxdata/influxdb3_core?rev=1d19543c8ef1fe9b3401f703cdcaba4d20db4e8c#1d19543c8ef1fe9b3401f703cdcaba4d20db4e8c"
+source = "git+https://github.com/influxdata/influxdb3_core?rev=b546e7f86ee9adbff0dd3c5e687140848397604a#b546e7f86ee9adbff0dd3c5e687140848397604a"
dependencies = [
"futures",
"http",
@@ -6305,7 +6166,7 @@ dependencies = [
[[package]]
name = "trace"
version = "0.1.0"
-source = "git+https://github.com/influxdata/influxdb3_core?rev=1d19543c8ef1fe9b3401f703cdcaba4d20db4e8c#1d19543c8ef1fe9b3401f703cdcaba4d20db4e8c"
+source = "git+https://github.com/influxdata/influxdb3_core?rev=b546e7f86ee9adbff0dd3c5e687140848397604a#b546e7f86ee9adbff0dd3c5e687140848397604a"
dependencies = [
"chrono",
"observability_deps",
@@ -6317,7 +6178,7 @@ dependencies = [
[[package]]
name = "trace_exporters"
version = "0.1.0"
-source = "git+https://github.com/influxdata/influxdb3_core?rev=1d19543c8ef1fe9b3401f703cdcaba4d20db4e8c#1d19543c8ef1fe9b3401f703cdcaba4d20db4e8c"
+source = "git+https://github.com/influxdata/influxdb3_core?rev=b546e7f86ee9adbff0dd3c5e687140848397604a#b546e7f86ee9adbff0dd3c5e687140848397604a"
dependencies = [
"async-trait",
"clap",
@@ -6334,7 +6195,7 @@ dependencies = [
[[package]]
name = "trace_http"
version = "0.1.0"
-source = "git+https://github.com/influxdata/influxdb3_core?rev=1d19543c8ef1fe9b3401f703cdcaba4d20db4e8c#1d19543c8ef1fe9b3401f703cdcaba4d20db4e8c"
+source = "git+https://github.com/influxdata/influxdb3_core?rev=b546e7f86ee9adbff0dd3c5e687140848397604a#b546e7f86ee9adbff0dd3c5e687140848397604a"
dependencies = [
"bytes",
"futures",
@@ -6372,7 +6233,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.58",
+ "syn 2.0.60",
]
[[package]]
@@ -6431,7 +6292,7 @@ dependencies = [
[[package]]
name = "tracker"
version = "0.1.0"
-source = "git+https://github.com/influxdata/influxdb3_core?rev=1d19543c8ef1fe9b3401f703cdcaba4d20db4e8c#1d19543c8ef1fe9b3401f703cdcaba4d20db4e8c"
+source = "git+https://github.com/influxdata/influxdb3_core?rev=b546e7f86ee9adbff0dd3c5e687140848397604a#b546e7f86ee9adbff0dd3c5e687140848397604a"
dependencies = [
"futures",
"hashbrown 0.14.3",
@@ -6466,7 +6327,7 @@ checksum = "859eb650cfee7434994602c3a68b25d77ad9e68c8a6cd491616ef86661382eb3"
[[package]]
name = "trogging"
version = "0.1.0"
-source = "git+https://github.com/influxdata/influxdb3_core?rev=1d19543c8ef1fe9b3401f703cdcaba4d20db4e8c#1d19543c8ef1fe9b3401f703cdcaba4d20db4e8c"
+source = "git+https://github.com/influxdata/influxdb3_core?rev=b546e7f86ee9adbff0dd3c5e687140848397604a#b546e7f86ee9adbff0dd3c5e687140848397604a"
dependencies = [
"clap",
"logfmt",
@@ -6489,7 +6350,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "97fee6b57c6a41524a810daee9286c02d7752c4253064d0b05472833a438f675"
dependencies = [
"cfg-if",
- "rand",
"static_assertions",
]
@@ -6511,15 +6371,6 @@ version = "0.1.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "eaea85b334db583fe3274d12b4cd1880032beab409c0d774be044d4480ab9a94"
-[[package]]
-name = "unicase"
-version = "2.7.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f7d2d4dafb69621809a81864c9c1b864479e1235c0dd4e199924b9742439ed89"
-dependencies = [
- "version_check",
-]
-
[[package]]
name = "unicode-bidi"
version = "0.3.15"
@@ -6615,12 +6466,6 @@ version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d"
-[[package]]
-name = "value-bag"
-version = "1.8.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "74797339c3b98616c009c7c3eb53a0ce41e85c8ec66bd3db96ed132d20cfdee8"
-
[[package]]
name = "vcpkg"
version = "0.2.15"
@@ -6694,7 +6539,7 @@ dependencies = [
"once_cell",
"proc-macro2",
"quote",
- "syn 2.0.58",
+ "syn 2.0.60",
"wasm-bindgen-shared",
]
@@ -6728,7 +6573,7 @@ checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.58",
+ "syn 2.0.60",
"wasm-bindgen-backend",
"wasm-bindgen-shared",
]
@@ -6768,18 +6613,6 @@ version = "0.25.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5f20c57d8d7db6d3b86154206ae5d8fba62dd39573114de97c2cb0578251f8e1"
-[[package]]
-name = "which"
-version = "4.4.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "87ba24419a2078cd2b0f2ede2691b6c66d8e47836da3b6db8265ebad47afbfc7"
-dependencies = [
- "either",
- "home",
- "once_cell",
- "rustix",
-]
-
[[package]]
name = "whoami"
version = "1.5.1"
@@ -6808,11 +6641,11 @@ checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
[[package]]
name = "winapi-util"
-version = "0.1.6"
+version = "0.1.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f29e6f9198ba0d26b4c9f07dbe6f9ed633e1f3d5b8b414090084349e46a52596"
+checksum = "134306a13c5647ad6453e8deaec55d3a44d6021970129e6188735e74bf546697"
dependencies = [
- "winapi",
+ "windows-sys 0.52.0",
]
[[package]]
@@ -6828,7 +6661,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e48a53791691ab099e5e2ad123536d0fff50652600abaf43bbf952894110d0be"
dependencies = [
"windows-core",
- "windows-targets 0.52.4",
+ "windows-targets 0.52.5",
]
[[package]]
@@ -6837,7 +6670,7 @@ version = "0.52.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "33ab640c8d7e35bf8ba19b884ba838ceb4fba93a4e8c65a9059d08afcfc683d9"
dependencies = [
- "windows-targets 0.52.4",
+ "windows-targets 0.52.5",
]
[[package]]
@@ -6855,7 +6688,7 @@ version = "0.52.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d"
dependencies = [
- "windows-targets 0.52.4",
+ "windows-targets 0.52.5",
]
[[package]]
@@ -6875,17 +6708,18 @@ dependencies = [
[[package]]
name = "windows-targets"
-version = "0.52.4"
+version = "0.52.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7dd37b7e5ab9018759f893a1952c9420d060016fc19a472b4bb20d1bdd694d1b"
+checksum = "6f0713a46559409d202e70e28227288446bf7841d3211583a4b53e3f6d96e7eb"
dependencies = [
- "windows_aarch64_gnullvm 0.52.4",
- "windows_aarch64_msvc 0.52.4",
- "windows_i686_gnu 0.52.4",
- "windows_i686_msvc 0.52.4",
- "windows_x86_64_gnu 0.52.4",
- "windows_x86_64_gnullvm 0.52.4",
- "windows_x86_64_msvc 0.52.4",
+ "windows_aarch64_gnullvm 0.52.5",
+ "windows_aarch64_msvc 0.52.5",
+ "windows_i686_gnu 0.52.5",
+ "windows_i686_gnullvm",
+ "windows_i686_msvc 0.52.5",
+ "windows_x86_64_gnu 0.52.5",
+ "windows_x86_64_gnullvm 0.52.5",
+ "windows_x86_64_msvc 0.52.5",
]
[[package]]
@@ -6896,9 +6730,9 @@ checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8"
[[package]]
name = "windows_aarch64_gnullvm"
-version = "0.52.4"
+version = "0.52.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "bcf46cf4c365c6f2d1cc93ce535f2c8b244591df96ceee75d8e83deb70a9cac9"
+checksum = "7088eed71e8b8dda258ecc8bac5fb1153c5cffaf2578fc8ff5d61e23578d3263"
[[package]]
name = "windows_aarch64_msvc"
@@ -6908,9 +6742,9 @@ checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc"
[[package]]
name = "windows_aarch64_msvc"
-version = "0.52.4"
+version = "0.52.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "da9f259dd3bcf6990b55bffd094c4f7235817ba4ceebde8e6d11cd0c5633b675"
+checksum = "9985fd1504e250c615ca5f281c3f7a6da76213ebd5ccc9561496568a2752afb6"
[[package]]
name = "windows_i686_gnu"
@@ -6920,9 +6754,15 @@ checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e"
[[package]]
name = "windows_i686_gnu"
-version = "0.52.4"
+version = "0.52.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b474d8268f99e0995f25b9f095bc7434632601028cf86590aea5c8a5cb7801d3"
+checksum = "88ba073cf16d5372720ec942a8ccbf61626074c6d4dd2e745299726ce8b89670"
+
+[[package]]
+name = "windows_i686_gnullvm"
+version = "0.52.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "87f4261229030a858f36b459e748ae97545d6f1ec60e5e0d6a3d32e0dc232ee9"
[[package]]
name = "windows_i686_msvc"
@@ -6932,9 +6772,9 @@ checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406"
[[package]]
name = "windows_i686_msvc"
-version = "0.52.4"
+version = "0.52.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1515e9a29e5bed743cb4415a9ecf5dfca648ce85ee42e15873c3cd8610ff8e02"
+checksum = "db3c2bf3d13d5b658be73463284eaf12830ac9a26a90c717b7f771dfe97487bf"
[[package]]
name = "windows_x86_64_gnu"
@@ -6944,9 +6784,9 @@ checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e"
[[package]]
name = "windows_x86_64_gnu"
-version = "0.52.4"
+version = "0.52.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5eee091590e89cc02ad514ffe3ead9eb6b660aedca2183455434b93546371a03"
+checksum = "4e4246f76bdeff09eb48875a0fd3e2af6aada79d409d33011886d3e1581517d9"
[[package]]
name = "windows_x86_64_gnullvm"
@@ -6956,9 +6796,9 @@ checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc"
[[package]]
name = "windows_x86_64_gnullvm"
-version = "0.52.4"
+version = "0.52.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "77ca79f2451b49fa9e2af39f0747fe999fcda4f5e241b2898624dca97a1f2177"
+checksum = "852298e482cd67c356ddd9570386e2862b5673c85bd5f88df9ab6802b334c596"
[[package]]
name = "windows_x86_64_msvc"
@@ -6968,9 +6808,9 @@ checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538"
[[package]]
name = "windows_x86_64_msvc"
-version = "0.52.4"
+version = "0.52.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "32b752e52a2da0ddfbdbcc6fceadfeede4c939ed16d13e648833a61dfb611ed8"
+checksum = "bec47e5bfd1bff0eeaf6d8b485cc1074891a197ab4225d504cb7a1ab88b02bf0"
[[package]]
name = "winreg"
@@ -6985,22 +6825,17 @@ dependencies = [
[[package]]
name = "workspace-hack"
version = "0.1.0"
-source = "git+https://github.com/influxdata/influxdb3_core?rev=1d19543c8ef1fe9b3401f703cdcaba4d20db4e8c#1d19543c8ef1fe9b3401f703cdcaba4d20db4e8c"
+source = "git+https://github.com/influxdata/influxdb3_core?rev=b546e7f86ee9adbff0dd3c5e687140848397604a#b546e7f86ee9adbff0dd3c5e687140848397604a"
dependencies = [
"ahash",
"arrow-array",
"arrow-ipc",
"base64 0.21.7",
- "bit-set",
- "bit-vec",
"bitflags 2.5.0",
"byteorder",
"bytes",
"cc",
"chrono",
- "clap",
- "clap_builder",
- "concurrent-queue",
"crossbeam-epoch",
"crossbeam-utils",
"crypto-common",
@@ -7024,15 +6859,12 @@ dependencies = [
"itertools 0.11.0",
"k8s-openapi",
"kube-core",
- "lalrpop-util",
"libc",
- "linux-raw-sys",
"lock_api",
"log",
"md-5",
"memchr",
"mio",
- "nix 0.28.0",
"nom",
"num-traits",
"object_store",
@@ -7041,10 +6873,9 @@ dependencies = [
"percent-encoding",
"petgraph",
"phf_shared",
- "predicates",
"proptest",
- "prost 0.12.3",
- "prost-types 0.12.3",
+ "prost 0.12.4",
+ "prost-types 0.12.4",
"rand",
"rand_core",
"regex",
@@ -7052,14 +6883,11 @@ dependencies = [
"regex-syntax 0.8.3",
"reqwest",
"ring",
- "rustix",
"rustls",
"serde",
"serde_json",
"sha2",
"similar",
- "smallvec",
- "socket2",
"spin 0.9.8",
"sqlparser",
"sqlx",
@@ -7069,7 +6897,7 @@ dependencies = [
"sqlx-postgres",
"sqlx-sqlite",
"syn 1.0.109",
- "syn 2.0.58",
+ "syn 2.0.60",
"thrift",
"tokio",
"tokio-stream",
@@ -7080,7 +6908,6 @@ dependencies = [
"tracing-core",
"tracing-log",
"tracing-subscriber",
- "twox-hash",
"unicode-bidi",
"unicode-normalization",
"url",
@@ -7128,7 +6955,7 @@ checksum = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.58",
+ "syn 2.0.60",
]
[[package]]
diff --git a/Cargo.toml b/Cargo.toml
index f42e5064d7..820056ccd1 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -101,37 +101,37 @@ urlencoding = "1.1"
uuid = { version = "1", features = ["v4"] }
# Core.git crates we depend on
-arrow_util = { git = "https://github.com/influxdata/influxdb3_core", rev = "1d19543c8ef1fe9b3401f703cdcaba4d20db4e8c"}
-authz = { git = "https://github.com/influxdata/influxdb3_core", rev = "1d19543c8ef1fe9b3401f703cdcaba4d20db4e8c", features = ["http"] }
-clap_blocks = { git = "https://github.com/influxdata/influxdb3_core", rev = "1d19543c8ef1fe9b3401f703cdcaba4d20db4e8c" }
-data_types = { git = "https://github.com/influxdata/influxdb3_core", rev = "1d19543c8ef1fe9b3401f703cdcaba4d20db4e8c" }
-datafusion_util = { git = "https://github.com/influxdata/influxdb3_core", rev = "1d19543c8ef1fe9b3401f703cdcaba4d20db4e8c" }
-influxdb-line-protocol = { git = "https://github.com/influxdata/influxdb3_core", rev = "1d19543c8ef1fe9b3401f703cdcaba4d20db4e8c" }
-influxdb_influxql_parser = { git = "https://github.com/influxdata/influxdb3_core", rev = "1d19543c8ef1fe9b3401f703cdcaba4d20db4e8c" }
-influxdb_iox_client = { git = "https://github.com/influxdata/influxdb3_core", rev = "1d19543c8ef1fe9b3401f703cdcaba4d20db4e8c" }
-iox_catalog = { git = "https://github.com/influxdata/influxdb3_core", rev = "1d19543c8ef1fe9b3401f703cdcaba4d20db4e8c" }
-ioxd_common = { git = "https://github.com/influxdata/influxdb3_core", rev = "1d19543c8ef1fe9b3401f703cdcaba4d20db4e8c" }
-iox_http = { git = "https://github.com/influxdata/influxdb3_core", rev = "1d19543c8ef1fe9b3401f703cdcaba4d20db4e8c" }
-iox_query = { git = "https://github.com/influxdata/influxdb3_core", rev = "1d19543c8ef1fe9b3401f703cdcaba4d20db4e8c" }
-iox_query_params = { git = "https://github.com/influxdata/influxdb3_core", rev = "1d19543c8ef1fe9b3401f703cdcaba4d20db4e8c" }
-iox_query_influxql = { git = "https://github.com/influxdata/influxdb3_core", rev = "1d19543c8ef1fe9b3401f703cdcaba4d20db4e8c" }
-iox_time = { git = "https://github.com/influxdata/influxdb3_core", rev = "1d19543c8ef1fe9b3401f703cdcaba4d20db4e8c" }
-metric = { git = "https://github.com/influxdata/influxdb3_core", rev = "1d19543c8ef1fe9b3401f703cdcaba4d20db4e8c" }
-metric_exporters = { git = "https://github.com/influxdata/influxdb3_core", rev = "1d19543c8ef1fe9b3401f703cdcaba4d20db4e8c" }
-observability_deps = { git = "https://github.com/influxdata/influxdb3_core", rev = "1d19543c8ef1fe9b3401f703cdcaba4d20db4e8c" }
-panic_logging = { git = "https://github.com/influxdata/influxdb3_core", rev = "1d19543c8ef1fe9b3401f703cdcaba4d20db4e8c" }
-parquet_file = { git = "https://github.com/influxdata/influxdb3_core", rev = "1d19543c8ef1fe9b3401f703cdcaba4d20db4e8c" }
-schema = { git = "https://github.com/influxdata/influxdb3_core", rev = "1d19543c8ef1fe9b3401f703cdcaba4d20db4e8c" }
-service_common = { git = "https://github.com/influxdata/influxdb3_core", rev = "1d19543c8ef1fe9b3401f703cdcaba4d20db4e8c" }
-service_grpc_flight = { git = "https://github.com/influxdata/influxdb3_core", rev = "1d19543c8ef1fe9b3401f703cdcaba4d20db4e8c" }
-test_helpers = { git = "https://github.com/influxdata/influxdb3_core", rev = "1d19543c8ef1fe9b3401f703cdcaba4d20db4e8c" }
-test_helpers_end_to_end = { git = "https://github.com/influxdata/influxdb3_core", rev = "1d19543c8ef1fe9b3401f703cdcaba4d20db4e8c" }
-tokio_metrics_bridge = { git = "https://github.com/influxdata/influxdb3_core", rev = "1d19543c8ef1fe9b3401f703cdcaba4d20db4e8c" }
-trace = { git = "https://github.com/influxdata/influxdb3_core", rev = "1d19543c8ef1fe9b3401f703cdcaba4d20db4e8c" }
-trace_exporters = { git = "https://github.com/influxdata/influxdb3_core", rev = "1d19543c8ef1fe9b3401f703cdcaba4d20db4e8c" }
-trace_http = { git = "https://github.com/influxdata/influxdb3_core", rev = "1d19543c8ef1fe9b3401f703cdcaba4d20db4e8c" }
-tracker = { git = "https://github.com/influxdata/influxdb3_core", rev = "1d19543c8ef1fe9b3401f703cdcaba4d20db4e8c" }
-trogging = { git = "https://github.com/influxdata/influxdb3_core", rev = "1d19543c8ef1fe9b3401f703cdcaba4d20db4e8c", default-features = true, features = ["clap"] }
+arrow_util = { git = "https://github.com/influxdata/influxdb3_core", rev = "b546e7f86ee9adbff0dd3c5e687140848397604a"}
+authz = { git = "https://github.com/influxdata/influxdb3_core", rev = "b546e7f86ee9adbff0dd3c5e687140848397604a", features = ["http"] }
+clap_blocks = { git = "https://github.com/influxdata/influxdb3_core", rev = "b546e7f86ee9adbff0dd3c5e687140848397604a" }
+data_types = { git = "https://github.com/influxdata/influxdb3_core", rev = "b546e7f86ee9adbff0dd3c5e687140848397604a" }
+datafusion_util = { git = "https://github.com/influxdata/influxdb3_core", rev = "b546e7f86ee9adbff0dd3c5e687140848397604a" }
+influxdb-line-protocol = { git = "https://github.com/influxdata/influxdb3_core", rev = "b546e7f86ee9adbff0dd3c5e687140848397604a" }
+influxdb_influxql_parser = { git = "https://github.com/influxdata/influxdb3_core", rev = "b546e7f86ee9adbff0dd3c5e687140848397604a" }
+influxdb_iox_client = { git = "https://github.com/influxdata/influxdb3_core", rev = "b546e7f86ee9adbff0dd3c5e687140848397604a" }
+iox_catalog = { git = "https://github.com/influxdata/influxdb3_core", rev = "b546e7f86ee9adbff0dd3c5e687140848397604a" }
+ioxd_common = { git = "https://github.com/influxdata/influxdb3_core", rev = "b546e7f86ee9adbff0dd3c5e687140848397604a" }
+iox_http = { git = "https://github.com/influxdata/influxdb3_core", rev = "b546e7f86ee9adbff0dd3c5e687140848397604a" }
+iox_query = { git = "https://github.com/influxdata/influxdb3_core", rev = "b546e7f86ee9adbff0dd3c5e687140848397604a" }
+iox_query_params = { git = "https://github.com/influxdata/influxdb3_core", rev = "b546e7f86ee9adbff0dd3c5e687140848397604a" }
+iox_query_influxql = { git = "https://github.com/influxdata/influxdb3_core", rev = "b546e7f86ee9adbff0dd3c5e687140848397604a" }
+iox_time = { git = "https://github.com/influxdata/influxdb3_core", rev = "b546e7f86ee9adbff0dd3c5e687140848397604a" }
+metric = { git = "https://github.com/influxdata/influxdb3_core", rev = "b546e7f86ee9adbff0dd3c5e687140848397604a" }
+metric_exporters = { git = "https://github.com/influxdata/influxdb3_core", rev = "b546e7f86ee9adbff0dd3c5e687140848397604a" }
+observability_deps = { git = "https://github.com/influxdata/influxdb3_core", rev = "b546e7f86ee9adbff0dd3c5e687140848397604a" }
+panic_logging = { git = "https://github.com/influxdata/influxdb3_core", rev = "b546e7f86ee9adbff0dd3c5e687140848397604a" }
+parquet_file = { git = "https://github.com/influxdata/influxdb3_core", rev = "b546e7f86ee9adbff0dd3c5e687140848397604a" }
+schema = { git = "https://github.com/influxdata/influxdb3_core", rev = "b546e7f86ee9adbff0dd3c5e687140848397604a" }
+service_common = { git = "https://github.com/influxdata/influxdb3_core", rev = "b546e7f86ee9adbff0dd3c5e687140848397604a" }
+service_grpc_flight = { git = "https://github.com/influxdata/influxdb3_core", rev = "b546e7f86ee9adbff0dd3c5e687140848397604a" }
+test_helpers = { git = "https://github.com/influxdata/influxdb3_core", rev = "b546e7f86ee9adbff0dd3c5e687140848397604a" }
+test_helpers_end_to_end = { git = "https://github.com/influxdata/influxdb3_core", rev = "b546e7f86ee9adbff0dd3c5e687140848397604a" }
+tokio_metrics_bridge = { git = "https://github.com/influxdata/influxdb3_core", rev = "b546e7f86ee9adbff0dd3c5e687140848397604a" }
+trace = { git = "https://github.com/influxdata/influxdb3_core", rev = "b546e7f86ee9adbff0dd3c5e687140848397604a" }
+trace_exporters = { git = "https://github.com/influxdata/influxdb3_core", rev = "b546e7f86ee9adbff0dd3c5e687140848397604a" }
+trace_http = { git = "https://github.com/influxdata/influxdb3_core", rev = "b546e7f86ee9adbff0dd3c5e687140848397604a" }
+tracker = { git = "https://github.com/influxdata/influxdb3_core", rev = "b546e7f86ee9adbff0dd3c5e687140848397604a" }
+trogging = { git = "https://github.com/influxdata/influxdb3_core", rev = "b546e7f86ee9adbff0dd3c5e687140848397604a", default-features = true, features = ["clap"] }
[workspace.lints.rust]
rust_2018_idioms = "deny"
|
4e2b68a7c5e4a1c3495e3a3d2907f6bf6fa33476
|
Carol (Nichols || Goulding)
|
2022-11-16 16:36:39
|
Simplify test by not actually creating a catalog namespace
|
This isn't actually needed for what this test is testing.
| null |
fix: Simplify test by not actually creating a catalog namespace
This isn't actually needed for what this test is testing.
|
diff --git a/ingester/src/data/namespace.rs b/ingester/src/data/namespace.rs
index 90d816d9ba..fedff3421a 100644
--- a/ingester/src/data/namespace.rs
+++ b/ingester/src/data/namespace.rs
@@ -328,8 +328,8 @@ mod tests {
};
use assert_matches::assert_matches;
use data_types::{
- ColumnId, ColumnSet, CompactionLevel, ParquetFileParams, PartitionId,
- PartitionKey, ShardIndex, Timestamp,
+ ColumnId, ColumnSet, CompactionLevel, ParquetFileParams, PartitionId, PartitionKey,
+ ShardIndex, Timestamp,
};
use iox_catalog::{interface::Catalog, mem::MemCatalog};
use iox_time::SystemProvider;
@@ -424,18 +424,11 @@ mod tests {
let metrics = Arc::new(metric::Registry::new());
let catalog: Arc<dyn Catalog> = Arc::new(MemCatalog::new(Arc::clone(&metrics)));
let mut repos = catalog.repositories().await;
- let topic = repos.topics().create_or_get("whatevs").await.unwrap();
- let query_pool = repos.query_pools().create_or_get("whatevs").await.unwrap();
- let namespace = repos
- .namespaces()
- .create("foo", topic.id, query_pool.id)
- .await
- .unwrap();
let w1 = make_write_op(
&PartitionKey::from("1970-01-01"),
SHARD_INDEX,
- namespace.id,
+ NAMESPACE_ID,
TABLE_NAME,
TABLE_ID,
1,
@@ -444,7 +437,7 @@ mod tests {
let w2 = make_write_op(
&PartitionKey::from("1970-01-01"),
SHARD_INDEX,
- namespace.id,
+ NAMESPACE_ID,
TABLE_NAME,
TABLE_ID,
2,
@@ -470,7 +463,7 @@ mod tests {
let parquet_file_params = ParquetFileParams {
shard_id: SHARD_ID,
- namespace_id: namespace.id,
+ namespace_id: NAMESPACE_ID,
table_id: TABLE_ID,
partition_id: partition.id,
object_store_id: Uuid::new_v4(),
@@ -521,7 +514,7 @@ mod tests {
let partition_provider = Arc::new(CatalogPartitionResolver::new(Arc::clone(&catalog)));
let data = NamespaceData::new(
- namespace.id,
+ NAMESPACE_ID,
DeferredLoad::new(Duration::from_millis(1), async { "foo".into() }),
Arc::new(MockTableNameProvider::new(TABLE_NAME)),
SHARD_ID,
|
66a6e8e929ccc7971e81839c203d85f013cfa94d
|
Dom Dwyer
|
2022-11-08 14:30:25
|
cross-thread hashmap entry visibility
|
At the time of this commit, this test fails. Performing a get() on a key
previously inserted by another thread should not fail.
| null |
test: cross-thread hashmap entry visibility
At the time of this commit, this test fails. Performing a get() on a key
previously inserted by another thread should not fail.
|
diff --git a/ingester/src/arcmap.rs b/ingester/src/arcmap.rs
index 7548f6c4c8..84a5eec454 100644
--- a/ingester/src/arcmap.rs
+++ b/ingester/src/arcmap.rs
@@ -316,6 +316,31 @@ mod tests {
assert_eq!(init_count.load(Ordering::SeqCst), 1); // Number of init() calls
}
+ #[test]
+ fn test_cross_thread_visibility() {
+ let refs = Arc::new(ArcMap::default());
+
+ const N_THREADS: i64 = 10;
+
+ let handles = (0..N_THREADS)
+ .map(|i| {
+ let refs = Arc::clone(&refs);
+ std::thread::spawn(move || {
+ refs.insert(&i, Arc::new(i));
+ })
+ })
+ .collect::<Vec<_>>();
+
+ for h in handles {
+ h.join().unwrap();
+ }
+
+ for i in 0..N_THREADS {
+ let v = refs.get(&i).unwrap();
+ assert_eq!(i, *v);
+ }
+ }
+
// Assert values can be "moved" due to FnOnce being used, vs. Fn.
//
// This is a compile-time assertion more than a runtime test.
|
35bd5ccd135dce12a1597644c532e0a37aa2355d
|
Andrew Lamb
|
2023-03-15 12:35:15
|
Update datafusion pin (#7207)
|
* chore: Update datafusion pin
* chore: Run cargo hakari tasks
---------
|
Co-authored-by: CircleCI[bot] <[email protected]>
Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
|
chore: Update datafusion pin (#7207)
* chore: Update datafusion pin
* chore: Run cargo hakari tasks
---------
Co-authored-by: CircleCI[bot] <[email protected]>
Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
|
diff --git a/Cargo.lock b/Cargo.lock
index 6b02bc6fce..a0a661f7db 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -1407,8 +1407,8 @@ dependencies = [
[[package]]
name = "datafusion"
-version = "19.0.0"
-source = "git+https://github.com/apache/arrow-datafusion.git?rev=8c34ca4fa34787b137b48ce4f6ffd41b64a1a633#8c34ca4fa34787b137b48ce4f6ffd41b64a1a633"
+version = "20.0.0"
+source = "git+https://github.com/apache/arrow-datafusion.git?rev=612eb1d0ce338af7980fa906df8796eb47c4be44#612eb1d0ce338af7980fa906df8796eb47c4be44"
dependencies = [
"ahash 0.8.3",
"arrow",
@@ -1437,7 +1437,6 @@ dependencies = [
"object_store",
"parking_lot 0.12.1",
"parquet",
- "paste",
"percent-encoding",
"pin-project-lite",
"rand",
@@ -1455,8 +1454,8 @@ dependencies = [
[[package]]
name = "datafusion-common"
-version = "19.0.0"
-source = "git+https://github.com/apache/arrow-datafusion.git?rev=8c34ca4fa34787b137b48ce4f6ffd41b64a1a633#8c34ca4fa34787b137b48ce4f6ffd41b64a1a633"
+version = "20.0.0"
+source = "git+https://github.com/apache/arrow-datafusion.git?rev=612eb1d0ce338af7980fa906df8796eb47c4be44#612eb1d0ce338af7980fa906df8796eb47c4be44"
dependencies = [
"arrow",
"chrono",
@@ -1468,8 +1467,8 @@ dependencies = [
[[package]]
name = "datafusion-execution"
-version = "19.0.0"
-source = "git+https://github.com/apache/arrow-datafusion.git?rev=8c34ca4fa34787b137b48ce4f6ffd41b64a1a633#8c34ca4fa34787b137b48ce4f6ffd41b64a1a633"
+version = "20.0.0"
+source = "git+https://github.com/apache/arrow-datafusion.git?rev=612eb1d0ce338af7980fa906df8796eb47c4be44#612eb1d0ce338af7980fa906df8796eb47c4be44"
dependencies = [
"dashmap",
"datafusion-common",
@@ -1485,20 +1484,19 @@ dependencies = [
[[package]]
name = "datafusion-expr"
-version = "19.0.0"
-source = "git+https://github.com/apache/arrow-datafusion.git?rev=8c34ca4fa34787b137b48ce4f6ffd41b64a1a633#8c34ca4fa34787b137b48ce4f6ffd41b64a1a633"
+version = "20.0.0"
+source = "git+https://github.com/apache/arrow-datafusion.git?rev=612eb1d0ce338af7980fa906df8796eb47c4be44#612eb1d0ce338af7980fa906df8796eb47c4be44"
dependencies = [
"ahash 0.8.3",
"arrow",
"datafusion-common",
- "log",
"sqlparser",
]
[[package]]
name = "datafusion-optimizer"
-version = "19.0.0"
-source = "git+https://github.com/apache/arrow-datafusion.git?rev=8c34ca4fa34787b137b48ce4f6ffd41b64a1a633#8c34ca4fa34787b137b48ce4f6ffd41b64a1a633"
+version = "20.0.0"
+source = "git+https://github.com/apache/arrow-datafusion.git?rev=612eb1d0ce338af7980fa906df8796eb47c4be44#612eb1d0ce338af7980fa906df8796eb47c4be44"
dependencies = [
"arrow",
"async-trait",
@@ -1514,8 +1512,8 @@ dependencies = [
[[package]]
name = "datafusion-physical-expr"
-version = "19.0.0"
-source = "git+https://github.com/apache/arrow-datafusion.git?rev=8c34ca4fa34787b137b48ce4f6ffd41b64a1a633#8c34ca4fa34787b137b48ce4f6ffd41b64a1a633"
+version = "20.0.0"
+source = "git+https://github.com/apache/arrow-datafusion.git?rev=612eb1d0ce338af7980fa906df8796eb47c4be44#612eb1d0ce338af7980fa906df8796eb47c4be44"
dependencies = [
"ahash 0.8.3",
"arrow",
@@ -1533,7 +1531,6 @@ dependencies = [
"itertools",
"lazy_static",
"md-5",
- "num-traits",
"paste",
"petgraph",
"rand",
@@ -1545,8 +1542,8 @@ dependencies = [
[[package]]
name = "datafusion-proto"
-version = "19.0.0"
-source = "git+https://github.com/apache/arrow-datafusion.git?rev=8c34ca4fa34787b137b48ce4f6ffd41b64a1a633#8c34ca4fa34787b137b48ce4f6ffd41b64a1a633"
+version = "20.0.0"
+source = "git+https://github.com/apache/arrow-datafusion.git?rev=612eb1d0ce338af7980fa906df8796eb47c4be44#612eb1d0ce338af7980fa906df8796eb47c4be44"
dependencies = [
"arrow",
"chrono",
@@ -1554,7 +1551,6 @@ dependencies = [
"datafusion-common",
"datafusion-expr",
"object_store",
- "parking_lot 0.12.1",
"pbjson-build",
"prost",
"prost-build",
@@ -1562,8 +1558,8 @@ dependencies = [
[[package]]
name = "datafusion-row"
-version = "19.0.0"
-source = "git+https://github.com/apache/arrow-datafusion.git?rev=8c34ca4fa34787b137b48ce4f6ffd41b64a1a633#8c34ca4fa34787b137b48ce4f6ffd41b64a1a633"
+version = "20.0.0"
+source = "git+https://github.com/apache/arrow-datafusion.git?rev=612eb1d0ce338af7980fa906df8796eb47c4be44#612eb1d0ce338af7980fa906df8796eb47c4be44"
dependencies = [
"arrow",
"datafusion-common",
@@ -1573,8 +1569,8 @@ dependencies = [
[[package]]
name = "datafusion-sql"
-version = "19.0.0"
-source = "git+https://github.com/apache/arrow-datafusion.git?rev=8c34ca4fa34787b137b48ce4f6ffd41b64a1a633#8c34ca4fa34787b137b48ce4f6ffd41b64a1a633"
+version = "20.0.0"
+source = "git+https://github.com/apache/arrow-datafusion.git?rev=612eb1d0ce338af7980fa906df8796eb47c4be44#612eb1d0ce338af7980fa906df8796eb47c4be44"
dependencies = [
"arrow-schema",
"datafusion-common",
@@ -6690,6 +6686,8 @@ dependencies = [
"crossbeam-utils",
"crypto-common",
"datafusion",
+ "datafusion-optimizer",
+ "datafusion-physical-expr",
"digest",
"either",
"fixedbitset",
diff --git a/Cargo.toml b/Cargo.toml
index f55e7f9e09..93e61af688 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -118,8 +118,8 @@ license = "MIT OR Apache-2.0"
[workspace.dependencies]
arrow = { version = "34.0.0" }
arrow-flight = { version = "34.0.0" }
-datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev="8c34ca4fa34787b137b48ce4f6ffd41b64a1a633", default-features = false }
-datafusion-proto = { git = "https://github.com/apache/arrow-datafusion.git", rev="8c34ca4fa34787b137b48ce4f6ffd41b64a1a633" }
+datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev="612eb1d0ce338af7980fa906df8796eb47c4be44", default-features = false }
+datafusion-proto = { git = "https://github.com/apache/arrow-datafusion.git", rev="612eb1d0ce338af7980fa906df8796eb47c4be44" }
hashbrown = { version = "0.13.2" }
parquet = { version = "34.0.0" }
diff --git a/workspace-hack/Cargo.toml b/workspace-hack/Cargo.toml
index 44dc960d69..41b653fc9f 100644
--- a/workspace-hack/Cargo.toml
+++ b/workspace-hack/Cargo.toml
@@ -29,7 +29,9 @@ bytes = { version = "1" }
chrono = { version = "0.4", default-features = false, features = ["alloc", "clock", "serde"] }
crossbeam-utils = { version = "0.8" }
crypto-common = { version = "0.1", default-features = false, features = ["std"] }
-datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev = "8c34ca4fa34787b137b48ce4f6ffd41b64a1a633" }
+datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev = "612eb1d0ce338af7980fa906df8796eb47c4be44" }
+datafusion-optimizer = { git = "https://github.com/apache/arrow-datafusion.git", rev = "612eb1d0ce338af7980fa906df8796eb47c4be44", default-features = false, features = ["crypto_expressions", "regex_expressions", "unicode_expressions"] }
+datafusion-physical-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "612eb1d0ce338af7980fa906df8796eb47c4be44", default-features = false, features = ["crypto_expressions", "regex_expressions", "unicode_expressions"] }
digest = { version = "0.10", features = ["mac", "std"] }
either = { version = "1" }
fixedbitset = { version = "0.4" }
|
c4b04a16c5f737a261788866ad6b655ca0f62eff
|
Dom Dwyer
|
2023-01-24 12:03:28
|
rename last_probe instant
|
last_probe was "the instant at which the last set of probes started
being sent" in my head, but Carol saw it as "first_probe - the time at
which probes started being sent".
Hopefully probe_window_started_at is less ambiguous.
| null |
refactor: rename last_probe instant
last_probe was "the instant at which the last set of probes started
being sent" in my head, but Carol saw it as "first_probe - the time at
which probes started being sent".
Hopefully probe_window_started_at is less ambiguous.
|
diff --git a/router/src/dml_handlers/rpc_write/circuit_breaker.rs b/router/src/dml_handlers/rpc_write/circuit_breaker.rs
index dd456caf57..59746d596d 100644
--- a/router/src/dml_handlers/rpc_write/circuit_breaker.rs
+++ b/router/src/dml_handlers/rpc_write/circuit_breaker.rs
@@ -164,7 +164,12 @@ pub(crate) struct CircuitBreaker {
#[derive(Debug, Default)]
struct ProbeState {
- last_probe: Option<Instant>,
+ /// The instant at which this set of probes started to be sent.
+ ///
+ /// Up to [`NUM_PROBES`] SHOULD be sent in the time range between this
+ /// timestamp plus [`PROBE_INTERVAL`].
+ probe_window_started_at: Option<Instant>,
+ /// The number of probes sent so far in this [`PROBE_INTERVAL`].
probes_started: u64,
}
@@ -258,7 +263,7 @@ impl CircuitBreaker {
let now = Instant::now();
// Reset the probe count once per PROBE_INTERVAL.
- match guard.last_probe {
+ match guard.probe_window_started_at {
// It is time to begin probing again.
Some(p) if now.duration_since(p) > PROBE_INTERVAL => {
debug!("remote unavailable, probing");
@@ -270,7 +275,7 @@ impl CircuitBreaker {
// `guard.probes_started` if it has reached `NUM_PROBES`.
assert!(guard.probes_started <= NUM_PROBES);
// Record the start of a probing interval.
- guard.last_probe = Some(now);
+ guard.probe_window_started_at = Some(now);
// Reset the number of probes allowed.
guard.probes_started = 0;
@@ -295,7 +300,7 @@ impl CircuitBreaker {
None => {
// First time this circuit breaker has entered the probing
// state; no start of a probe interval to check.
- guard.last_probe = Some(now);
+ guard.probe_window_started_at = Some(now);
// It should be impossible to have started probes if we've never
// been in the probing state before.
assert_eq!(guard.probes_started, 0);
@@ -495,7 +500,7 @@ mod tests {
assert_reset_is_nop(&c.requests);
// Pretend it is time to probe again.
- c.probes.lock().last_probe = Some(
+ c.probes.lock().probe_window_started_at = Some(
Instant::now()
.checked_sub(PROBE_INTERVAL + Duration::from_nanos(1))
.expect("instant cannot roll back far enough - test issue, not code issue"),
@@ -565,7 +570,7 @@ mod tests {
assert_reset_is_nop(&c.requests);
// Pretend it is time to probe again.
- c.probes.lock().last_probe = Some(
+ c.probes.lock().probe_window_started_at = Some(
Instant::now()
.checked_sub(PROBE_INTERVAL + Duration::from_nanos(1))
.expect("instant cannot roll back far enough - test issue, not code issue"),
|
012df69974564261a140f15d875dd62763d2a3f7
|
Marco Neumann
|
2023-09-18 10:58:56
|
i->q V2 circuit breaker (#8743)
|
* feat: impl `PartialEq + Eq` for `TestError`
* feat: i->q V2 circuit breaker
This is a straight port from V1, it even uses the same test. The code is
copied though (instead of reusing the old one) because the interface in
the V2 client is so different and the new testing infra is also nicer
(IMHO).
For #8349.
---------
|
Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
|
feat: i->q V2 circuit breaker (#8743)
* feat: impl `PartialEq + Eq` for `TestError`
* feat: i->q V2 circuit breaker
This is a straight port from V1, it even uses the same test. The code is
copied though (instead of reusing the old one) because the interface in
the V2 client is so different and the new testing infra is also nicer
(IMHO).
For #8349.
---------
Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
|
diff --git a/Cargo.lock b/Cargo.lock
index 9dadc829be..6c1c1439b1 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -2767,6 +2767,8 @@ dependencies = [
"iox_time",
"metric",
"observability_deps",
+ "pin-project",
+ "rand",
"snafu",
"test_helpers",
"tokio",
diff --git a/ingester_query_client/Cargo.toml b/ingester_query_client/Cargo.toml
index ab7ed730ce..76ee088b66 100644
--- a/ingester_query_client/Cargo.toml
+++ b/ingester_query_client/Cargo.toml
@@ -18,6 +18,8 @@ ingester_query_grpc = { path = "../ingester_query_grpc" }
iox_time = { path = "../iox_time" }
metric = { path = "../metric" }
observability_deps = { path = "../observability_deps" }
+pin-project = "1.1"
+rand = "0.8.3"
snafu = "0.7"
tokio = { version = "1.32" }
tonic = { workspace = true }
diff --git a/ingester_query_client/src/error_classifier.rs b/ingester_query_client/src/error_classifier.rs
index d8552ce40d..b950786616 100644
--- a/ingester_query_client/src/error_classifier.rs
+++ b/ingester_query_client/src/error_classifier.rs
@@ -47,7 +47,7 @@ pub fn is_upstream_error(e: &DynError) -> bool {
}
/// Simple error for testing purposes that controles [`test_error_classifier`].
-#[derive(Debug)]
+#[derive(Debug, PartialEq, Eq)]
#[allow(missing_copy_implementations)]
pub struct TestError {
retry: bool,
diff --git a/ingester_query_client/src/layers/circuit_breaker.rs b/ingester_query_client/src/layers/circuit_breaker.rs
new file mode 100644
index 0000000000..21653a6316
--- /dev/null
+++ b/ingester_query_client/src/layers/circuit_breaker.rs
@@ -0,0 +1,1163 @@
+//! Circuit breaker.
+
+use std::{
+ borrow::Cow,
+ future::Future,
+ ops::DerefMut,
+ pin::Pin,
+ sync::{
+ atomic::{AtomicBool, Ordering},
+ Arc, Mutex,
+ },
+ task::{Context, Poll},
+};
+
+use async_trait::async_trait;
+use backoff::{Backoff, BackoffConfig};
+use iox_time::{Time, TimeProvider};
+use metric::{Metric, Registry, U64Gauge};
+use observability_deps::tracing::{info, warn};
+use pin_project::{pin_project, pinned_drop};
+use rand::rngs::mock::StepRng;
+use snafu::Snafu;
+
+use crate::{
+ error::DynError,
+ error_classifier::{is_upstream_error, ErrorClassifier},
+ layer::{Layer, QueryResponse},
+};
+
+#[derive(Debug, Snafu)]
+#[allow(missing_docs)]
+pub enum Error {
+ #[snafu(display("ingester circuit broken / open: {addr}"))]
+ CircuitBroken { addr: Arc<str> },
+}
+
+/// Wrapper around a [`Future`] that signals if the future was cancelled or not.
+#[pin_project(PinnedDrop)]
+struct TrackedFuture<F> {
+ #[pin]
+ inner: F,
+ done: bool,
+ /// If false, the future was dropped before resolving to `ready`
+ not_cancelled: Arc<AtomicBool>,
+}
+
+impl<F> TrackedFuture<F> {
+ /// Create new tracked future.
+ ///
+ /// # Panic
+ /// The `not_cancelled` MUST be set to `true`.
+ fn new(f: F, not_cancelled: Arc<AtomicBool>) -> Self {
+ assert!(not_cancelled.load(Ordering::SeqCst));
+
+ Self {
+ inner: f,
+ done: false,
+ not_cancelled,
+ }
+ }
+}
+
+impl<F> Future for TrackedFuture<F>
+where
+ F: Future,
+{
+ type Output = F::Output;
+
+ fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ let this = self.project();
+
+ assert!(!*this.done);
+ let res = this.inner.poll(cx);
+ *this.done = res.is_ready();
+ res
+ }
+}
+
+#[pinned_drop]
+impl<F> PinnedDrop for TrackedFuture<F> {
+ fn drop(self: Pin<&mut Self>) {
+ let this = self.project();
+
+ if !*this.done {
+ this.not_cancelled.store(false, Ordering::SeqCst);
+ }
+ }
+}
+
+/// Metrics for a specific circuit.
+#[derive(Debug, Clone)]
+struct CircuitMetrics {
+ open: U64Gauge,
+ closed: U64Gauge,
+ half_open: U64Gauge,
+}
+
+impl CircuitMetrics {
+ fn new(metric_registry: &Registry, addr: &str) -> Self {
+ let circuit_state: Metric<U64Gauge> = metric_registry.register_metric(
+ "ingester_circuit_state",
+ "state of the given ingestger connection",
+ );
+
+ Self {
+ open: circuit_state.recorder([
+ ("ingester", Cow::Owned(addr.to_owned())),
+ ("state", Cow::from("open")),
+ ]),
+ closed: circuit_state.recorder([
+ ("ingester", Cow::Owned(addr.to_owned())),
+ ("state", Cow::from("closed")),
+ ]),
+ half_open: circuit_state.recorder([
+ ("ingester", Cow::Owned(addr.to_owned())),
+ ("state", Cow::from("half_open")),
+ ]),
+ }
+ }
+
+ /// Set state to [open](CircuitState::Open).
+ fn set_open(&self) {
+ self.open.set(1);
+ self.closed.set(0);
+ self.half_open.set(0);
+ }
+
+ /// Set state to [closed](CircuitState::Closed).
+ fn set_closed(&self) {
+ self.open.set(0);
+ self.closed.set(1);
+ self.half_open.set(0);
+ }
+
+ /// Set state to [half open](CircuitState::HalfOpen).
+ fn set_half_open(&self) {
+ self.open.set(0);
+ self.closed.set(0);
+ self.half_open.set(1);
+ }
+}
+
+/// Current circuit state of a specific connection.
+///
+/// # State Machine
+///
+/// ```text
+/// o------(ok)----<request>---(err)--------------------o
+/// | | |
+/// V | V
+/// +------------+ +-------------+ +--------+
+/// | | | | o-(no)->| |
+/// | | | | | | |
+/// <start>-->| Closed | | Half Open |<--(yes)--<waited?>--| Open |
+/// | | | | | |
+/// | | | | | |
+/// +------------+ +-------------+ +--------+
+/// ^ | ^
+/// | | |
+/// | <request>--(err)---<too many errors?>--(yes)----------o
+/// | | |
+/// | (ok) (no)
+/// | | |
+/// | [reset err counter] |
+/// | | |
+/// o-----o-----------------o
+/// ```
+///
+/// # Generation Counter
+/// The circuit state carries a generation counter so because we can have multiple concurrent requests for the same
+/// connection and a response should only be able to change a circuit state if the the state hasn't changed while the
+/// request was running. Otherwise the reasoning about the state machine will get pretty nasty.
+#[derive(Debug)]
+enum CircuitState {
+ /// Circuit is closed, connection is used.
+ Closed {
+ /// Number of errors on this connection.
+ error_count: u64,
+ },
+
+ /// Circuit is open, no connection will be used.
+ Open {
+ /// How long this open state will last.
+ until: Time,
+
+ /// Backoff state to generate the next `until` value if the trial during [half open](Self::HalfOpen) fails.
+ backoff: Option<Backoff>,
+ },
+
+ /// Circuit is half-open, we will try if the connection is usable again.
+ HalfOpen {
+ /// Backoff state in case the trial fails and we need to go back into the [open](Self::Open) state.
+ backoff: Option<Backoff>,
+
+ /// Signal that is set to `true` if we already have a test request running.
+ has_test_running: Arc<AtomicBool>,
+ },
+}
+
+#[derive(Debug)]
+struct Circuit {
+ /// Current state.
+ state: CircuitState,
+
+ /// Change counter.
+ gen: u64,
+}
+
+/// Wrapper around [`Layer`] that implements the [Circuit Breaker Design Pattern].
+///
+///
+/// [Circuit Breaker Design Pattern]: https://en.wikipedia.org/wiki/Circuit_breaker_design_pattern
+#[derive(Debug)]
+pub struct CircuitBreakerLayer<L>
+where
+ L: Layer,
+{
+ /// The underlying client.
+ inner: Arc<L>,
+
+ /// Ingester addr.
+ addr: Arc<str>,
+
+ /// After how many consecutive errors shall we open a circuit?
+ open_circuit_after_n_errors: u64,
+
+ /// Time provider.
+ time_provider: Arc<dyn TimeProvider>,
+
+ /// Backoff config.
+ backoff_config: BackoffConfig,
+
+ /// Metrics.
+ metrics: CircuitMetrics,
+
+ /// Detect if a given error should trigger the circuit breaker.
+ should_break: ErrorClassifier,
+
+ /// Current circuit states, keyed by ingester address
+ circuit: Mutex<Circuit>,
+
+ /// Overwrite for the backoff RNG, used for testing.
+ rng_overwrite: Option<StepRng>,
+}
+
+impl<L> CircuitBreakerLayer<L>
+where
+ L: Layer,
+{
+ /// Create new circuit breaker wrapper.
+ ///
+ /// Use `open_circuit_after_n_errors` to determine after how many consecutive errors we shall open a circuit.
+ pub fn new(
+ inner: L,
+ addr: Arc<str>,
+ time_provider: Arc<dyn TimeProvider>,
+ metric_registry: &Registry,
+ open_circuit_after_n_errors: u64,
+ backoff_config: BackoffConfig,
+ ) -> Self {
+ Self::new_with_classifier(
+ inner,
+ addr,
+ time_provider,
+ metric_registry,
+ open_circuit_after_n_errors,
+ backoff_config,
+ ErrorClassifier::new(is_upstream_error),
+ )
+ }
+
+ fn new_with_classifier(
+ inner: L,
+ addr: Arc<str>,
+ time_provider: Arc<dyn TimeProvider>,
+ metric_registry: &Registry,
+ open_circuit_after_n_errors: u64,
+ backoff_config: BackoffConfig,
+ should_break: ErrorClassifier,
+ ) -> Self {
+ let metrics = CircuitMetrics::new(metric_registry, &addr);
+ metrics.set_closed();
+ Self {
+ inner: Arc::new(inner),
+ addr,
+ open_circuit_after_n_errors,
+ time_provider,
+ backoff_config,
+ metrics,
+ should_break,
+ circuit: Mutex::new(Circuit {
+ state: CircuitState::Closed { error_count: 0 },
+ gen: 0,
+ }),
+ rng_overwrite: None,
+ }
+ }
+}
+
+#[async_trait]
+impl<L> Layer for CircuitBreakerLayer<L>
+where
+ L: Layer,
+{
+ type Request = L::Request;
+ type ResponseMetadata = L::ResponseMetadata;
+ type ResponsePayload = L::ResponsePayload;
+
+ async fn query(
+ &self,
+ request: Self::Request,
+ ) -> Result<QueryResponse<Self::ResponseMetadata, Self::ResponsePayload>, DynError> {
+ let (test_signal, start_gen) = {
+ let mut circuit = self.circuit.lock().expect("not poisoned");
+
+ // Open => HalfOpen transition
+ if let CircuitState::Open { until, backoff } = &mut circuit.state {
+ let now = self.time_provider.now();
+ if *until <= now {
+ let backoff = backoff.take().expect("not moved");
+ self.metrics.set_half_open();
+ circuit.state = CircuitState::HalfOpen {
+ backoff: Some(backoff),
+ has_test_running: Arc::new(AtomicBool::new(false)),
+ };
+ circuit.gen += 1;
+ }
+ }
+
+ let (open, test_signal) = match &circuit.state {
+ CircuitState::Open { .. } => {
+ warn!(
+ addr = self.addr.as_ref(),
+ "Circuit open, not contacting ingester",
+ );
+ (true, None)
+ }
+ CircuitState::HalfOpen {
+ has_test_running, ..
+ } => {
+ let this_is_this_test = has_test_running
+ .compare_exchange(false, true, Ordering::SeqCst, Ordering::SeqCst)
+ .is_ok();
+
+ if this_is_this_test {
+ info!(
+ addr = self.addr.as_ref(),
+ "Circuit half-open and this is a test request",
+ );
+ } else {
+ info!(
+ addr = self.addr.as_ref(),
+ "Circuit half-open but a test request is already running, not contacting ingester",
+ );
+ }
+
+ (
+ !this_is_this_test,
+ this_is_this_test.then(|| Arc::clone(has_test_running)),
+ )
+ }
+ CircuitState::Closed { .. } => (false, None),
+ };
+
+ if open {
+ return Err(DynError::new(Error::CircuitBroken {
+ addr: Arc::clone(&self.addr),
+ }));
+ }
+
+ (test_signal, circuit.gen)
+ };
+
+ let fut = self.inner.query(request);
+ let not_cancelled = test_signal.unwrap_or_else(|| Arc::new(AtomicBool::new(true)));
+ let fut = TrackedFuture::new(fut, not_cancelled);
+ let res = fut.await;
+
+ let is_error = if let Err(e) = &res {
+ self.should_break.matches(e)
+ } else {
+ false
+ };
+
+ let mut circuit = self.circuit.lock().expect("not poisoned");
+ let circuit = circuit.deref_mut(); // needed so we can later borrow state and gen seperately
+ if is_error {
+ let maybe_backoff = match &mut circuit.state {
+ CircuitState::Open { .. } => {
+ assert_ne!(
+ start_gen, circuit.gen,
+ "could not have started in an open circuit state"
+ );
+ None
+ }
+ CircuitState::HalfOpen { backoff, .. } => {
+ if circuit.gen == start_gen {
+ Some(backoff.take().expect("not moved"))
+ } else {
+ // this was not the test request but an old one
+ None
+ }
+ }
+ CircuitState::Closed { error_count } => {
+ if circuit.gen == start_gen {
+ *error_count += 1;
+ (*error_count >= self.open_circuit_after_n_errors).then(|| {
+ warn!(
+ addr = self.addr.as_ref(),
+ "Error contacting ingester, circuit opened"
+ );
+
+ Backoff::new_with_rng(
+ &self.backoff_config,
+ self.rng_overwrite
+ .as_ref()
+ .map(|rng| Box::new(rng.clone()) as _),
+ )
+ })
+ } else {
+ None
+ }
+ }
+ };
+
+ if let Some(mut backoff) = maybe_backoff {
+ let until = self.time_provider.now() + backoff.next().expect("never end backoff");
+ self.metrics.set_open();
+ circuit.state = CircuitState::Open {
+ until,
+ backoff: Some(backoff),
+ };
+ circuit.gen += 1;
+ }
+ } else {
+ match &mut circuit.state {
+ CircuitState::Open { .. } => {
+ // We likely started in an "closed" state but this very request here got delayed and in the
+ // meantime there were so many errors that we've opened the circuit. Keep it open.
+ assert_ne!(
+ start_gen, circuit.gen,
+ "cannot have started a request for an open circuit"
+ );
+ }
+ CircuitState::HalfOpen { .. } => {
+ if start_gen == circuit.gen {
+ info!(addr = self.addr.as_ref(), "Circuit closed",);
+
+ self.metrics.set_closed();
+ circuit.state = CircuitState::Closed { error_count: 0 };
+ circuit.gen += 1;
+ }
+ }
+ CircuitState::Closed { error_count, .. } => {
+ if start_gen == circuit.gen {
+ *error_count = 0;
+ }
+ }
+ }
+ }
+
+ res
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use std::time::Duration;
+
+ use iox_time::MockProvider;
+ use metric::Attributes;
+ use test_helpers::maybe_start_logging;
+ use tokio::{spawn, sync::Barrier};
+
+ use crate::{
+ error::ErrorChainExt,
+ error_classifier::{test_error_classifier, TestError},
+ layers::testing::{TestLayer, TestResponse},
+ };
+
+ use super::*;
+
+ #[tokio::test]
+ async fn test_metric_initially_closed() {
+ maybe_start_logging();
+
+ let TestSetup {
+ metric_registry, ..
+ } = TestSetup::from([]);
+ assert_eq!(
+ Metrics {
+ open: 0,
+ closed: 1,
+ half_open: 0
+ },
+ Metrics::from(&metric_registry),
+ );
+ }
+
+ #[tokio::test]
+ async fn test_happy_path() {
+ maybe_start_logging();
+
+ let TestSetup {
+ l, metric_registry, ..
+ } = TestSetup::from([TestResponse::ok(())]);
+
+ l.assert_query_ok().await;
+ assert_eq!(
+ Metrics {
+ open: 0,
+ closed: 1,
+ half_open: 0
+ },
+ Metrics::from(&metric_registry),
+ );
+ }
+
+ #[tokio::test]
+ async fn test_cut_after_n_errors() {
+ maybe_start_logging();
+
+ let TestSetup {
+ l, metric_registry, ..
+ } = TestSetup::from([
+ TestResponse::err(DynError::new(TestError::RETRY)),
+ TestResponse::err(DynError::new(TestError::RETRY)),
+ ]);
+
+ l.assert_query_err_upstream(TestError::RETRY).await;
+ assert_eq!(
+ Metrics {
+ open: 0,
+ closed: 1,
+ half_open: 0
+ },
+ Metrics::from(&metric_registry),
+ );
+
+ l.assert_query_err_upstream(TestError::RETRY).await;
+ assert_eq!(
+ Metrics {
+ open: 1,
+ closed: 0,
+ half_open: 0
+ },
+ Metrics::from(&metric_registry),
+ );
+
+ l.assert_query_err_circuit().await;
+ assert_eq!(
+ Metrics {
+ open: 1,
+ closed: 0,
+ half_open: 0
+ },
+ Metrics::from(&metric_registry),
+ );
+ }
+
+ #[tokio::test]
+ async fn test_ok_resets_error_counter() {
+ maybe_start_logging();
+
+ let TestSetup { l, .. } = TestSetup::from([
+ TestResponse::err(DynError::new(TestError::RETRY)),
+ TestResponse::ok(()),
+ TestResponse::err(DynError::new(TestError::RETRY)),
+ TestResponse::err(DynError::new(TestError::RETRY)),
+ ]);
+
+ l.assert_query_err_upstream(TestError::RETRY).await;
+ l.assert_query_ok().await;
+ l.assert_query_err_upstream(TestError::RETRY).await;
+ l.assert_query_err_upstream(TestError::RETRY).await;
+ l.assert_query_err_circuit().await;
+ }
+
+ #[tokio::test]
+ async fn test_error_classifier_used() {
+ maybe_start_logging();
+
+ let TestSetup { l, .. } = TestSetup::from([
+ TestResponse::err(DynError::new(TestError::RETRY)),
+ TestResponse::err(DynError::new(TestError::NO_RETRY)),
+ TestResponse::ok(()),
+ ]);
+
+ l.assert_query_err_upstream(TestError::RETRY).await;
+ l.assert_query_err_upstream(TestError::NO_RETRY).await;
+ l.assert_query_ok().await;
+ }
+
+ #[tokio::test]
+ async fn test_recovery() {
+ maybe_start_logging();
+
+ let TestSetup {
+ l,
+ metric_registry,
+ time_provider,
+ ..
+ } = TestSetup::from([
+ TestResponse::err(DynError::new(TestError::RETRY)),
+ TestResponse::err(DynError::new(TestError::RETRY)),
+ TestResponse::ok(()),
+ TestResponse::ok(()),
+ ]);
+
+ l.assert_query_err_upstream(TestError::RETRY).await;
+ l.assert_query_err_upstream(TestError::RETRY).await;
+ l.assert_query_err_circuit().await;
+ l.assert_query_err_circuit().await;
+
+ assert_eq!(
+ Metrics {
+ open: 1,
+ closed: 0,
+ half_open: 0
+ },
+ Metrics::from(&metric_registry),
+ );
+
+ time_provider.inc(Duration::from_secs(1));
+
+ l.assert_query_ok().await;
+ assert_eq!(
+ Metrics {
+ open: 0,
+ closed: 1,
+ half_open: 0
+ },
+ Metrics::from(&metric_registry),
+ );
+
+ l.assert_query_ok().await;
+ }
+
+ #[tokio::test]
+ async fn test_fail_during_recovery() {
+ maybe_start_logging();
+
+ let TestSetup {
+ l,
+ metric_registry,
+ time_provider,
+ ..
+ } = TestSetup::from([
+ TestResponse::err(DynError::new(TestError::RETRY)),
+ TestResponse::err(DynError::new(TestError::RETRY)),
+ TestResponse::err(DynError::new(TestError::RETRY)),
+ TestResponse::ok(()),
+ ]);
+
+ l.assert_query_err_upstream(TestError::RETRY).await;
+ l.assert_query_err_upstream(TestError::RETRY).await;
+ l.assert_query_err_circuit().await;
+
+ assert_eq!(
+ Metrics {
+ open: 1,
+ closed: 0,
+ half_open: 0
+ },
+ Metrics::from(&metric_registry),
+ );
+
+ time_provider.inc(Duration::from_secs(1));
+
+ l.assert_query_err_upstream(TestError::RETRY).await;
+ assert_eq!(
+ Metrics {
+ open: 1,
+ closed: 0,
+ half_open: 0
+ },
+ Metrics::from(&metric_registry),
+ );
+
+ l.assert_query_err_circuit().await;
+
+ // backoff is longer now (base is 2)
+ time_provider.inc(Duration::from_secs(1));
+ l.assert_query_err_circuit().await;
+ time_provider.inc(Duration::from_secs(1));
+
+ l.assert_query_ok().await;
+ assert_eq!(
+ Metrics {
+ open: 0,
+ closed: 1,
+ half_open: 0
+ },
+ Metrics::from(&metric_registry),
+ );
+ }
+
+ #[tokio::test]
+ async fn test_only_one_concurrent_request_during_recovery() {
+ maybe_start_logging();
+
+ let barrier_pre = barrier();
+ let barrier_post = barrier();
+ let TestSetup {
+ l,
+ metric_registry,
+ time_provider,
+ ..
+ } = TestSetup::from([
+ TestResponse::err(DynError::new(TestError::RETRY)),
+ TestResponse::err(DynError::new(TestError::RETRY)),
+ TestResponse::ok(())
+ .with_initial_barrier(Arc::clone(&barrier_pre))
+ .with_initial_barrier(Arc::clone(&barrier_post)),
+ TestResponse::ok(()),
+ ]);
+
+ l.assert_query_err_upstream(TestError::RETRY).await;
+ l.assert_query_err_upstream(TestError::RETRY).await;
+ l.assert_query_err_circuit().await;
+ l.assert_query_err_circuit().await;
+
+ assert_eq!(
+ Metrics {
+ open: 1,
+ closed: 0,
+ half_open: 0
+ },
+ Metrics::from(&metric_registry),
+ );
+
+ time_provider.inc(Duration::from_secs(1));
+
+ let l_captured = Arc::clone(&l);
+ let fut = spawn(async move {
+ l_captured.assert_query_ok().await;
+ });
+ barrier_pre.wait().await;
+
+ l.assert_query_err_circuit().await;
+
+ assert_eq!(
+ Metrics {
+ open: 0,
+ closed: 0,
+ half_open: 1,
+ },
+ Metrics::from(&metric_registry),
+ );
+
+ barrier_post.wait().await;
+ fut.await.unwrap();
+
+ assert_eq!(
+ Metrics {
+ open: 0,
+ closed: 1,
+ half_open: 0,
+ },
+ Metrics::from(&metric_registry),
+ );
+
+ l.assert_query_ok().await;
+ }
+
+ // this test may seem a bit weird / unnecessary, but I was wondering if this could happen during the implementation
+ #[tokio::test]
+ async fn test_ok_finishes_during_open_state() {
+ maybe_start_logging();
+
+ let barrier_pre = barrier();
+ let barrier_post = barrier();
+ let TestSetup {
+ l, metric_registry, ..
+ } = TestSetup::from([
+ TestResponse::ok(())
+ .with_initial_barrier(Arc::clone(&barrier_pre))
+ .with_initial_barrier(Arc::clone(&barrier_post)),
+ TestResponse::err(DynError::new(TestError::RETRY)),
+ TestResponse::err(DynError::new(TestError::RETRY)),
+ ]);
+
+ let l_captured = Arc::clone(&l);
+ let fut = spawn(async move {
+ l_captured.assert_query_ok().await;
+ });
+ barrier_pre.wait().await;
+
+ l.assert_query_err_upstream(TestError::RETRY).await;
+ l.assert_query_err_upstream(TestError::RETRY).await;
+ l.assert_query_err_circuit().await;
+
+ barrier_post.wait().await;
+ fut.await.unwrap();
+
+ // keep it open because while the request was running the circuit opened
+ assert_eq!(
+ Metrics {
+ open: 1,
+ closed: 0,
+ half_open: 0,
+ },
+ Metrics::from(&metric_registry),
+ );
+ l.assert_query_err_circuit().await;
+ }
+
+ #[tokio::test]
+ async fn test_cancel_recovery() {
+ maybe_start_logging();
+
+ let barrier_pre = barrier();
+ let barrier_post = barrier();
+ let TestSetup {
+ l,
+ metric_registry,
+ time_provider,
+ ..
+ } = TestSetup::from([
+ TestResponse::err(DynError::new(TestError::RETRY)),
+ TestResponse::err(DynError::new(TestError::RETRY)),
+ TestResponse::ok(())
+ .with_initial_barrier(Arc::clone(&barrier_pre))
+ .with_initial_barrier(Arc::clone(&barrier_post)),
+ TestResponse::ok(()),
+ ]);
+
+ l.assert_query_err_upstream(TestError::RETRY).await;
+ l.assert_query_err_upstream(TestError::RETRY).await;
+ l.assert_query_err_circuit().await;
+
+ assert_eq!(
+ Metrics {
+ open: 1,
+ closed: 0,
+ half_open: 0
+ },
+ Metrics::from(&metric_registry),
+ );
+
+ time_provider.inc(Duration::from_secs(1));
+
+ let l_captured = Arc::clone(&l);
+ let fut = spawn(async move {
+ l_captured.assert_query_ok().await;
+ });
+ barrier_pre.wait().await;
+
+ l.assert_query_err_circuit().await;
+
+ assert_eq!(
+ Metrics {
+ open: 0,
+ closed: 0,
+ half_open: 1,
+ },
+ Metrics::from(&metric_registry),
+ );
+
+ fut.abort();
+
+ // wait for tokio to actually cancel the background task
+ tokio::time::timeout(Duration::from_secs(5), async move {
+ loop {
+ if Arc::strong_count(&barrier_post) == 1 {
+ break;
+ }
+ tokio::time::sleep(Duration::from_millis(10)).await;
+ }
+ })
+ .await
+ .unwrap();
+
+ assert_eq!(
+ Metrics {
+ open: 0,
+ closed: 0,
+ half_open: 1,
+ },
+ Metrics::from(&metric_registry),
+ );
+
+ l.assert_query_ok().await;
+
+ assert_eq!(
+ Metrics {
+ open: 0,
+ closed: 1,
+ half_open: 0,
+ },
+ Metrics::from(&metric_registry),
+ );
+ }
+
+ #[tokio::test]
+ async fn test_late_failure_after_recovery() {
+ maybe_start_logging();
+
+ let barrier_pre = barrier();
+ let barrier_post = barrier();
+ let TestSetup {
+ l, time_provider, ..
+ } = TestSetup::from([
+ TestResponse::err(DynError::new(TestError::RETRY))
+ .with_initial_barrier(Arc::clone(&barrier_pre))
+ .with_initial_barrier(Arc::clone(&barrier_post)),
+ TestResponse::err(DynError::new(TestError::RETRY)),
+ TestResponse::err(DynError::new(TestError::RETRY)),
+ TestResponse::ok(()),
+ TestResponse::ok(()),
+ ]);
+
+ // set up request that will fail later
+ let l_captured = Arc::clone(&l);
+ let fut = spawn(async move {
+ l_captured.assert_query_err_upstream(TestError::RETRY).await;
+ });
+ barrier_pre.wait().await;
+
+ // break circuit
+ l.assert_query_err_upstream(TestError::RETRY).await;
+ l.assert_query_err_upstream(TestError::RETRY).await;
+ l.assert_query_err_circuit().await;
+
+ // recover circuit
+ time_provider.inc(Duration::from_secs(1));
+ l.assert_query_ok().await;
+
+ barrier_post.wait().await;
+ fut.await.unwrap();
+
+ // circuit not broken, because it was too late
+ l.assert_query_ok().await;
+ }
+
+ #[tokio::test]
+ async fn test_late_failure_after_half_open() {
+ maybe_start_logging();
+
+ let barrier1_pre = barrier();
+ let barrier1_post = barrier();
+ let barrier2_pre = barrier();
+ let barrier2_post = barrier();
+ let TestSetup {
+ l, time_provider, ..
+ } = TestSetup::from([
+ TestResponse::err(DynError::new(TestError::RETRY))
+ .with_initial_barrier(Arc::clone(&barrier1_pre))
+ .with_initial_barrier(Arc::clone(&barrier1_post)),
+ TestResponse::err(DynError::new(TestError::RETRY)),
+ TestResponse::err(DynError::new(TestError::RETRY)),
+ TestResponse::ok(())
+ .with_initial_barrier(Arc::clone(&barrier2_pre))
+ .with_initial_barrier(Arc::clone(&barrier2_post)),
+ ]);
+
+ // set up request that will fail later
+ let l_captured = Arc::clone(&l);
+ let fut1 = spawn(async move {
+ l_captured.assert_query_err_upstream(TestError::RETRY).await;
+ });
+ barrier1_pre.wait().await;
+
+ // break circuit
+ l.assert_query_err_upstream(TestError::RETRY).await;
+ l.assert_query_err_upstream(TestError::RETRY).await;
+ l.assert_query_err_circuit().await;
+
+ // half-open
+ time_provider.inc(Duration::from_secs(1));
+ let l_captured = Arc::clone(&l);
+ let fut2 = spawn(async move {
+ l_captured.assert_query_ok().await;
+ });
+ barrier2_pre.wait().await;
+
+ barrier1_post.wait().await;
+ fut1.await.unwrap();
+
+ barrier2_post.wait().await;
+ fut2.await.unwrap();
+ }
+
+ #[tokio::test]
+ async fn test_late_ok_after_half_open() {
+ maybe_start_logging();
+
+ let barrier1_pre = barrier();
+ let barrier1_post = barrier();
+ let barrier2_pre = barrier();
+ let barrier2_post = barrier();
+ let TestSetup {
+ l, time_provider, ..
+ } = TestSetup::from([
+ TestResponse::ok(())
+ .with_initial_barrier(Arc::clone(&barrier1_pre))
+ .with_initial_barrier(Arc::clone(&barrier1_post)),
+ TestResponse::err(DynError::new(TestError::RETRY)),
+ TestResponse::err(DynError::new(TestError::RETRY)),
+ TestResponse::ok(())
+ .with_initial_barrier(Arc::clone(&barrier2_pre))
+ .with_initial_barrier(Arc::clone(&barrier2_post)),
+ ]);
+
+ // set up request that will fail later
+ let l_captured = Arc::clone(&l);
+ let fut1 = spawn(async move {
+ l_captured.assert_query_ok().await;
+ });
+ barrier1_pre.wait().await;
+
+ // break circuit
+ l.assert_query_err_upstream(TestError::RETRY).await;
+ l.assert_query_err_upstream(TestError::RETRY).await;
+ l.assert_query_err_circuit().await;
+
+ // half-open
+ time_provider.inc(Duration::from_secs(1));
+ let l_captured = Arc::clone(&l);
+ let fut2 = spawn(async move {
+ l_captured.assert_query_ok().await;
+ });
+ barrier2_pre.wait().await;
+
+ barrier1_post.wait().await;
+ fut1.await.unwrap();
+
+ barrier2_post.wait().await;
+ fut2.await.unwrap();
+ }
+
+ const TEST_INGESTER: &str = "http://my-ingester";
+
+ fn barrier() -> Arc<Barrier> {
+ Arc::new(Barrier::new(2))
+ }
+
+ struct TestSetup {
+ l: Arc<CircuitBreakerLayer<TestLayer<(), (), ()>>>,
+ time_provider: Arc<MockProvider>,
+ metric_registry: Arc<Registry>,
+ }
+
+ impl<const N: usize> From<[TestResponse<(), ()>; N]> for TestSetup {
+ fn from(responses: [TestResponse<(), ()>; N]) -> Self {
+ let l = TestLayer::<(), (), ()>::default();
+ for resp in responses {
+ l.mock_response(resp);
+ }
+ let time_provider = Arc::new(MockProvider::new(Time::MIN));
+ let metric_registry = Arc::new(Registry::new());
+
+ let mut l = CircuitBreakerLayer::new_with_classifier(
+ l,
+ Arc::from(TEST_INGESTER),
+ Arc::clone(&time_provider) as _,
+ &metric_registry,
+ 2,
+ BackoffConfig::default(),
+ test_error_classifier(),
+ );
+
+ // set up "RNG" that always generates the maximum, so we can test things easier
+ l.rng_overwrite = Some(StepRng::new(u64::MAX, 0));
+ l.backoff_config = BackoffConfig {
+ init_backoff: Duration::from_secs(1),
+ max_backoff: Duration::MAX,
+ base: 2.,
+ deadline: None,
+ };
+
+ Self {
+ l: Arc::new(l),
+ time_provider,
+ metric_registry,
+ }
+ }
+ }
+
+ #[async_trait]
+ trait TestLayerExt {
+ async fn assert_query_ok(&self);
+ async fn assert_query_err_upstream(&self, e: TestError);
+ async fn assert_query_err_circuit(&self);
+ }
+
+ #[async_trait]
+ impl<L> TestLayerExt for L
+ where
+ L: Layer<Request = (), ResponseMetadata = (), ResponsePayload = ()>,
+ {
+ async fn assert_query_ok(&self) {
+ self.query(()).await.unwrap();
+ }
+
+ async fn assert_query_err_upstream(&self, e: TestError) {
+ let e_actual = self.query(()).await.unwrap_err();
+ assert!(
+ e_actual.error_chain().any(|e_actual| e_actual
+ .downcast_ref::<TestError>()
+ .map(|e_actual| e_actual == &e)
+ .unwrap_or_default()),
+ "Error does not match.\n\nActual:\n{e_actual}\n\nExpected:\n{e}",
+ );
+ }
+
+ async fn assert_query_err_circuit(&self) {
+ let e = self.query(()).await.unwrap_err();
+ assert!(
+ e.error_chain().any(|e| e
+ .downcast_ref::<Error>()
+ .map(|e| matches!(e, Error::CircuitBroken { .. }))
+ .unwrap_or_default()),
+ "Error is NOT a circuit breaker:\n{e}",
+ );
+ }
+ }
+
+ #[derive(Debug, PartialEq, Eq)]
+ struct Metrics {
+ open: u64,
+ closed: u64,
+ half_open: u64,
+ }
+
+ impl From<&Arc<Registry>> for Metrics {
+ fn from(registry: &Arc<Registry>) -> Self {
+ let instrument = registry
+ .get_instrument::<Metric<U64Gauge>>("ingester_circuit_state")
+ .expect("failed to read metric");
+
+ let open = instrument
+ .get_observer(&Attributes::from(&[
+ ("state", "open"),
+ ("ingester", TEST_INGESTER),
+ ]))
+ .expect("failed to get observer")
+ .fetch();
+
+ let closed = instrument
+ .get_observer(&Attributes::from(&[
+ ("state", "closed"),
+ ("ingester", TEST_INGESTER),
+ ]))
+ .expect("failed to get observer")
+ .fetch();
+
+ let half_open = instrument
+ .get_observer(&Attributes::from(&[
+ ("state", "half_open"),
+ ("ingester", TEST_INGESTER),
+ ]))
+ .expect("failed to get observer")
+ .fetch();
+
+ Self {
+ open,
+ closed,
+ half_open,
+ }
+ }
+ }
+}
diff --git a/ingester_query_client/src/layers/mod.rs b/ingester_query_client/src/layers/mod.rs
index 0f0506a531..b7ef7fc0e3 100644
--- a/ingester_query_client/src/layers/mod.rs
+++ b/ingester_query_client/src/layers/mod.rs
@@ -1,6 +1,7 @@
//! Layers.
pub mod backoff;
+pub mod circuit_breaker;
pub mod deserialize;
pub mod logging;
pub mod metrics;
|
14007808bdab41990f3a1f9047ba52b0a7b1391d
|
Carol (Nichols || Goulding)
|
2023-05-12 13:25:49
|
Move remaining conversions between data types and proto into data_types
|
And have data_types depend on generated_types rather than vice versa.
| null |
fix: Move remaining conversions between data types and proto into data_types
And have data_types depend on generated_types rather than vice versa.
|
diff --git a/Cargo.lock b/Cargo.lock
index e6a944b68f..7cc75a95e9 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -1414,6 +1414,7 @@ name = "data_types"
version = "0.1.0"
dependencies = [
"croaring",
+ "generated_types",
"influxdb-line-protocol",
"iox_time",
"observability_deps",
@@ -2036,7 +2037,6 @@ version = "0.1.0"
dependencies = [
"base64 0.21.0",
"bytes",
- "data_types",
"observability_deps",
"pbjson",
"pbjson-build",
@@ -6735,7 +6735,6 @@ dependencies = [
"hashbrown 0.12.3",
"hashbrown 0.13.2",
"heck",
- "hyper",
"indexmap",
"io-lifetimes",
"itertools",
diff --git a/data_types/Cargo.toml b/data_types/Cargo.toml
index 7de4c08e1d..f3c3d4db1a 100644
--- a/data_types/Cargo.toml
+++ b/data_types/Cargo.toml
@@ -10,6 +10,7 @@ license.workspace = true
croaring = "0.8.1"
influxdb-line-protocol = { path = "../influxdb_line_protocol" }
iox_time = { path = "../iox_time" }
+generated_types = { path = "../generated_types" }
observability_deps = { path = "../observability_deps" }
once_cell = "1"
ordered-float = "3"
diff --git a/data_types/src/columns.rs b/data_types/src/columns.rs
index a4d6bd8fd9..58c2db0913 100644
--- a/data_types/src/columns.rs
+++ b/data_types/src/columns.rs
@@ -1,6 +1,7 @@
//! Types having to do with columns.
use super::TableId;
+use generated_types::influxdata::iox::schema::v1 as proto;
use influxdb_line_protocol::FieldValue;
use schema::{builder::SchemaBuilder, InfluxColumnType, InfluxFieldType, Schema};
use sqlx::postgres::PgHasArrayType;
@@ -305,6 +306,25 @@ pub fn column_type_from_field(field_value: &FieldValue) -> ColumnType {
}
}
+impl TryFrom<proto::column_schema::ColumnType> for ColumnType {
+ type Error = Box<dyn std::error::Error>;
+
+ fn try_from(value: proto::column_schema::ColumnType) -> Result<Self, Self::Error> {
+ Ok(match value {
+ proto::column_schema::ColumnType::I64 => ColumnType::I64,
+ proto::column_schema::ColumnType::U64 => ColumnType::U64,
+ proto::column_schema::ColumnType::F64 => ColumnType::F64,
+ proto::column_schema::ColumnType::Bool => ColumnType::Bool,
+ proto::column_schema::ColumnType::String => ColumnType::String,
+ proto::column_schema::ColumnType::Time => ColumnType::Time,
+ proto::column_schema::ColumnType::Tag => ColumnType::Tag,
+ proto::column_schema::ColumnType::Unspecified => {
+ return Err("unknown column type".into())
+ }
+ })
+ }
+}
+
/// Set of columns.
#[derive(Debug, Clone, PartialEq, Eq, sqlx::Type)]
#[sqlx(transparent)]
@@ -363,4 +383,38 @@ mod tests {
fn test_column_set_duplicates() {
ColumnSet::new([ColumnId::new(1), ColumnId::new(2), ColumnId::new(1)]);
}
+
+ #[test]
+ fn test_column_schema() {
+ assert_eq!(
+ ColumnType::try_from(proto::column_schema::ColumnType::I64).unwrap(),
+ ColumnType::I64,
+ );
+ assert_eq!(
+ ColumnType::try_from(proto::column_schema::ColumnType::U64).unwrap(),
+ ColumnType::U64,
+ );
+ assert_eq!(
+ ColumnType::try_from(proto::column_schema::ColumnType::F64).unwrap(),
+ ColumnType::F64,
+ );
+ assert_eq!(
+ ColumnType::try_from(proto::column_schema::ColumnType::Bool).unwrap(),
+ ColumnType::Bool,
+ );
+ assert_eq!(
+ ColumnType::try_from(proto::column_schema::ColumnType::String).unwrap(),
+ ColumnType::String,
+ );
+ assert_eq!(
+ ColumnType::try_from(proto::column_schema::ColumnType::Time).unwrap(),
+ ColumnType::Time,
+ );
+ assert_eq!(
+ ColumnType::try_from(proto::column_schema::ColumnType::Tag).unwrap(),
+ ColumnType::Tag,
+ );
+
+ assert!(ColumnType::try_from(proto::column_schema::ColumnType::Unspecified).is_err());
+ }
}
diff --git a/data_types/src/lib.rs b/data_types/src/lib.rs
index 84a415da8d..73e7bd8847 100644
--- a/data_types/src/lib.rs
+++ b/data_types/src/lib.rs
@@ -623,6 +623,33 @@ pub struct SkippedCompaction {
pub limit_num_files_first_in_partition: i64,
}
+use generated_types::influxdata::iox::compactor::v1 as compactor_proto;
+impl From<SkippedCompaction> for compactor_proto::SkippedCompaction {
+ fn from(skipped_compaction: SkippedCompaction) -> Self {
+ let SkippedCompaction {
+ partition_id,
+ reason,
+ skipped_at,
+ estimated_bytes,
+ limit_bytes,
+ num_files,
+ limit_num_files,
+ limit_num_files_first_in_partition,
+ } = skipped_compaction;
+
+ Self {
+ partition_id: partition_id.get(),
+ reason,
+ skipped_at: skipped_at.get(),
+ estimated_bytes,
+ limit_bytes,
+ num_files,
+ limit_num_files,
+ limit_num_files_first_in_partition: Some(limit_num_files_first_in_partition),
+ }
+ }
+}
+
/// Data for a parquet file reference that has been inserted in the catalog.
#[derive(Debug, Clone, PartialEq, Eq, sqlx::FromRow)]
pub struct ParquetFile {
diff --git a/generated_types/Cargo.toml b/generated_types/Cargo.toml
index 21d67c1924..26c8729261 100644
--- a/generated_types/Cargo.toml
+++ b/generated_types/Cargo.toml
@@ -8,7 +8,6 @@ license.workspace = true
[dependencies] # In alphabetical order
base64 = "0.21"
bytes = "1.4"
-data_types = { path = "../data_types", optional = true }
observability_deps = { path = "../observability_deps" }
pbjson = "0.5"
pbjson-types = "0.5"
@@ -22,10 +21,3 @@ workspace-hack = { version = "0.1", path = "../workspace-hack" }
tonic-build = { workspace = true }
prost-build = "0.11"
pbjson-build = "0.5"
-
-[dev-dependencies]
-data_types = { path = "../data_types" }
-
-[features]
-default = ["data_types_conversions"]
-data_types_conversions = ["data_types"]
diff --git a/generated_types/src/compactor.rs b/generated_types/src/compactor.rs
deleted file mode 100644
index 364647fd43..0000000000
--- a/generated_types/src/compactor.rs
+++ /dev/null
@@ -1,28 +0,0 @@
-use crate::influxdata::iox::compactor::v1 as proto;
-use data_types::SkippedCompaction;
-
-impl From<SkippedCompaction> for proto::SkippedCompaction {
- fn from(skipped_compaction: SkippedCompaction) -> Self {
- let SkippedCompaction {
- partition_id,
- reason,
- skipped_at,
- estimated_bytes,
- limit_bytes,
- num_files,
- limit_num_files,
- limit_num_files_first_in_partition,
- } = skipped_compaction;
-
- Self {
- partition_id: partition_id.get(),
- reason,
- skipped_at: skipped_at.get(),
- estimated_bytes,
- limit_bytes,
- num_files,
- limit_num_files,
- limit_num_files_first_in_partition: Some(limit_num_files_first_in_partition),
- }
- }
-}
diff --git a/generated_types/src/lib.rs b/generated_types/src/lib.rs
index e981c40590..db013bad74 100644
--- a/generated_types/src/lib.rs
+++ b/generated_types/src/lib.rs
@@ -145,25 +145,6 @@ pub mod influxdata {
env!("OUT_DIR"),
"/influxdata.iox.schema.v1.serde.rs"
));
-
- impl TryFrom<column_schema::ColumnType> for data_types::ColumnType {
- type Error = Box<dyn std::error::Error>;
-
- fn try_from(value: column_schema::ColumnType) -> Result<Self, Self::Error> {
- Ok(match value {
- column_schema::ColumnType::I64 => data_types::ColumnType::I64,
- column_schema::ColumnType::U64 => data_types::ColumnType::U64,
- column_schema::ColumnType::F64 => data_types::ColumnType::F64,
- column_schema::ColumnType::Bool => data_types::ColumnType::Bool,
- column_schema::ColumnType::String => data_types::ColumnType::String,
- column_schema::ColumnType::Time => data_types::ColumnType::Time,
- column_schema::ColumnType::Tag => data_types::ColumnType::Tag,
- column_schema::ColumnType::Unspecified => {
- return Err("unknown column type".into())
- }
- })
- }
- }
}
}
@@ -239,9 +220,6 @@ pub use influxdata::platform::storage::*;
pub mod google;
-#[cfg(any(feature = "data_types_conversions", test))]
-pub mod compactor;
-
pub use prost::{DecodeError, EncodeError};
#[cfg(test)]
@@ -263,40 +241,4 @@ mod tests {
// The URL must start with the type.googleapis.com prefix
assert!(!protobuf_type_url_eq(STORAGE_SERVICE, STORAGE_SERVICE,));
}
-
- #[test]
- fn test_column_schema() {
- use influxdata::iox::schema::v1::*;
-
- assert_eq!(
- data_types::ColumnType::try_from(column_schema::ColumnType::I64).unwrap(),
- data_types::ColumnType::I64,
- );
- assert_eq!(
- data_types::ColumnType::try_from(column_schema::ColumnType::U64).unwrap(),
- data_types::ColumnType::U64,
- );
- assert_eq!(
- data_types::ColumnType::try_from(column_schema::ColumnType::F64).unwrap(),
- data_types::ColumnType::F64,
- );
- assert_eq!(
- data_types::ColumnType::try_from(column_schema::ColumnType::Bool).unwrap(),
- data_types::ColumnType::Bool,
- );
- assert_eq!(
- data_types::ColumnType::try_from(column_schema::ColumnType::String).unwrap(),
- data_types::ColumnType::String,
- );
- assert_eq!(
- data_types::ColumnType::try_from(column_schema::ColumnType::Time).unwrap(),
- data_types::ColumnType::Time,
- );
- assert_eq!(
- data_types::ColumnType::try_from(column_schema::ColumnType::Tag).unwrap(),
- data_types::ColumnType::Tag,
- );
-
- assert!(data_types::ColumnType::try_from(column_schema::ColumnType::Unspecified).is_err());
- }
}
diff --git a/influxdb_iox_client/Cargo.toml b/influxdb_iox_client/Cargo.toml
index 85425cbceb..63e8cc00e8 100644
--- a/influxdb_iox_client/Cargo.toml
+++ b/influxdb_iox_client/Cargo.toml
@@ -19,7 +19,7 @@ client_util = { path = "../client_util" }
comfy-table = { version = "6.1", default-features = false}
futures-util = { version = "0.3" }
influxdb-line-protocol = { path = "../influxdb_line_protocol"}
-generated_types = { path = "../generated_types", default-features = false, features = ["data_types_conversions"] }
+generated_types = { path = "../generated_types" }
prost = "0.11"
rand = "0.8.3"
reqwest = { version = "0.11", default-features = false, features = ["stream", "rustls-tls"] }
diff --git a/influxdb_storage_client/Cargo.toml b/influxdb_storage_client/Cargo.toml
index 28a6c958b2..fdd3a8c9e7 100644
--- a/influxdb_storage_client/Cargo.toml
+++ b/influxdb_storage_client/Cargo.toml
@@ -7,7 +7,7 @@ license.workspace = true
[dependencies]
client_util = { path = "../client_util" }
-generated_types = { path = "../generated_types", default-features=false, features=["data_types"] }
+generated_types = { path = "../generated_types" }
prost = "0.11"
tonic = { workspace = true }
futures-util = { version = "0.3" }
diff --git a/workspace-hack/Cargo.toml b/workspace-hack/Cargo.toml
index c4d6e7809a..6d1980e007 100644
--- a/workspace-hack/Cargo.toml
+++ b/workspace-hack/Cargo.toml
@@ -155,7 +155,6 @@ url = { version = "2" }
uuid = { version = "1", features = ["v4"] }
[target.x86_64-unknown-linux-gnu.dependencies]
-hyper = { version = "0.14", features = ["full"] }
io-lifetimes = { version = "1" }
nix = { version = "0.26" }
once_cell = { version = "1", default-features = false, features = ["unstable"] }
@@ -167,7 +166,6 @@ once_cell = { version = "1", default-features = false, features = ["unstable"] }
rustix = { version = "0.37", features = ["fs", "termios"] }
[target.x86_64-apple-darwin.dependencies]
-hyper = { version = "0.14", features = ["full"] }
io-lifetimes = { version = "1" }
nix = { version = "0.26" }
once_cell = { version = "1", default-features = false, features = ["unstable"] }
@@ -179,7 +177,6 @@ once_cell = { version = "1", default-features = false, features = ["unstable"] }
rustix = { version = "0.37", features = ["fs", "termios"] }
[target.aarch64-apple-darwin.dependencies]
-hyper = { version = "0.14", features = ["full"] }
io-lifetimes = { version = "1" }
nix = { version = "0.26" }
once_cell = { version = "1", default-features = false, features = ["unstable"] }
@@ -191,7 +188,6 @@ once_cell = { version = "1", default-features = false, features = ["unstable"] }
rustix = { version = "0.37", features = ["fs", "termios"] }
[target.x86_64-pc-windows-msvc.dependencies]
-hyper = { version = "0.14", features = ["full"] }
once_cell = { version = "1", default-features = false, features = ["unstable"] }
scopeguard = { version = "1" }
winapi = { version = "0.3", default-features = false, features = ["basetsd", "consoleapi", "errhandlingapi", "fileapi", "handleapi", "impl-debug", "impl-default", "knownfolders", "minwinbase", "minwindef", "ntsecapi", "ntstatus", "objbase", "processenv", "shellapi", "shlobj", "std", "stringapiset", "synchapi", "timezoneapi", "winbase", "wincon", "winerror", "winnt", "winreg", "winuser", "ws2ipdef", "ws2tcpip", "wtypesbase"] }
|
9eac89ebfbb1cc478e788bfccd2edb13ba8bd229
|
Marco Neumann
|
2023-04-20 10:12:07
|
also include metadata in empty InfluxQL select results (#7604)
|
At least the schema metdata and the measurements column should be there.
| null |
fix: also include metadata in empty InfluxQL select results (#7604)
At least the schema metdata and the measurements column should be there.
|
diff --git a/influxdb_iox/tests/query_tests2/cases/in/influxql_metadata.influxql.expected b/influxdb_iox/tests/query_tests2/cases/in/influxql_metadata.influxql.expected
index be0ec6f8b4..e3b739ab6c 100644
--- a/influxdb_iox/tests/query_tests2/cases/in/influxql_metadata.influxql.expected
+++ b/influxdb_iox/tests/query_tests2/cases/in/influxql_metadata.influxql.expected
@@ -70,6 +70,10 @@ name: measurements
| disk |
+------+
-- InfluxQL: SHOW MEASUREMENTS WITH MEASUREMENT = does_not_exist;
++------+
+| name |
++------+
++------+
-- InfluxQL: SHOW MEASUREMENTS WHERE time >= '2022-10-31T02:00:30Z';
-- Results After Sorting
name: measurements
@@ -97,6 +101,10 @@ name: measurements
+-------------+
-- InfluxQL: SHOW MEASUREMENTS WHERE tag0 = "a";
-- Results After Sorting
++------+
+| name |
++------+
++------+
-- InfluxQL: SHOW MEASUREMENTS WITH MEASUREMENT = /my_db/;
Error while planning query: Error during planning: expected string but got regex
-- InfluxQL: SHOW MEASUREMENTS WITH MEASUREMENT =~ my_db;
@@ -400,6 +408,10 @@ name: disk
| bytes_used | integer |
+------------+-----------+
-- InfluxQL: SHOW FIELD KEYS FROM does_not_exist;
++----------+-----------+
+| fieldKey | fieldType |
++----------+-----------+
++----------+-----------+
-- InfluxQL: SHOW FIELD KEYS ON my_db;
Error while planning query: This feature is not implemented: SHOW FIELD KEYS ON <database>
-- InfluxQL: SHOW FIELD KEYS FROM x.my_db;
@@ -453,6 +465,10 @@ name: select_test
+------+-------+
-- InfluxQL: SHOW TAG VALUES WITH KEY = "does_not_exist";
-- Results After Sorting
++-----+-------+
+| key | value |
++-----+-------+
++-----+-------+
-- InfluxQL: SHOW TAG VALUES WITH KEY != "tag0";
-- Results After Sorting
name: cpu
@@ -923,6 +939,10 @@ name: disk
+--------+---------+
-- InfluxQL: SHOW TAG VALUES FROM does_not_exist WITH KEY = "tag0";
-- Results After Sorting
++-----+-------+
+| key | value |
++-----+-------+
++-----+-------+
-- InfluxQL: SHOW TAG VALUES WITH KEY = "tt_tag";
-- Results After Sorting
name: time_test
@@ -978,6 +998,10 @@ name: select_test
+--------+-------+
-- InfluxQL: SHOW TAG VALUES WITH KEY = "st_tag" WHERE tag0 = "a";
-- Results After Sorting
++-----+-------+
+| key | value |
++-----+-------+
++-----+-------+
-- InfluxQL: SHOW TAG VALUES ON my_db WITH KEY = "tag0";
Error while planning query: This feature is not implemented: SHOW TAG VALUES ON <database>
-- InfluxQL: SHOW TAG VALUES FROM x.my_db WITH KEY = "tag0";
@@ -1283,6 +1307,10 @@ name: disk
| host |
+--------+
-- InfluxQL: SHOW TAG KEYS FROM does_not_exist;
++--------+
+| tagKey |
++--------+
++--------+
-- InfluxQL: SHOW TAG KEYS ON my_db;
Error while planning query: This feature is not implemented: SHOW TAG KEYS ON <database>
-- InfluxQL: SHOW TAG KEYS FROM x.my_db;
diff --git a/influxdb_iox/tests/query_tests2/cases/in/issue_6112.influxql b/influxdb_iox/tests/query_tests2/cases/in/issue_6112.influxql
index 87bf1e2334..d696c41fb1 100644
--- a/influxdb_iox/tests/query_tests2/cases/in/issue_6112.influxql
+++ b/influxdb_iox/tests/query_tests2/cases/in/issue_6112.influxql
@@ -491,3 +491,6 @@ SELECT COUNT(DISTINCT str), COUNT(DISTINCT i64) FROM m0;
-- fallible cases
SELECT DISTINCT(str), DISTINCT(i64) FROM m0;
+
+-- non-existing table
+SELECT * FROM does_not_exist;
diff --git a/influxdb_iox/tests/query_tests2/cases/in/issue_6112.influxql.expected b/influxdb_iox/tests/query_tests2/cases/in/issue_6112.influxql.expected
index 07a5834f34..546de7c885 100644
--- a/influxdb_iox/tests/query_tests2/cases/in/issue_6112.influxql.expected
+++ b/influxdb_iox/tests/query_tests2/cases/in/issue_6112.influxql.expected
@@ -13,6 +13,9 @@ name: m0
| 2022-10-31T02:00:30 | 19.2 | 392 | lo | val00 | |
+---------------------+------+-----+-----+-------+-------+
-- InfluxQL: SELECT * FROM non_existent;
+++
+++
+++
-- InfluxQL: SELECT *::tag, f64 FROM m0;
name: m0
+---------------------+-------+-------+------+
@@ -375,6 +378,10 @@ name: m0
| 2022-10-31T02:00:10 | 21.2 |
+---------------------+------+
-- InfluxQL: SELECT f64 FROM m0 WHERE f64 >= 19.5 AND str = 1;
++------+-----+
+| time | f64 |
++------+-----+
++------+-----+
-- InfluxQL: SELECT f64 FROM m0 WHERE f64 >= 19 + 0.5 OR non_existent = 1;
name: m0
+---------------------+------+
@@ -383,7 +390,15 @@ name: m0
| 2022-10-31T02:00:10 | 21.2 |
+---------------------+------+
-- InfluxQL: SELECT f64 FROM m0 WHERE f64 >= 19.5 AND non_existent = 1;
++------+-----+
+| time | f64 |
++------+-----+
++------+-----+
-- InfluxQL: SELECT f64 FROM m0 WHERE f64 >= 19.5 AND f64 =~ /foo/;
++------+-----+
+| time | f64 |
++------+-----+
++------+-----+
-- InfluxQL: SELECT f64 FROM m0 WHERE f64 >= 19.5 OR f64 =~ /foo/;
name: m0
+---------------------+------+
@@ -998,6 +1013,9 @@ name: cpu
| 1970-01-01T00:00:00 | 1.9850000000000003 | |
+---------------------+--------------------+--------------------------+
-- InfluxQL: SELECT MEAN(foo) FROM cpu;
+++
+++
+++
-- InfluxQL: SELECT MEAN(usage_idle) + MEAN(foo) FROM cpu GROUP BY cpu;
name: cpu
tags: cpu=cpu-total
@@ -1043,6 +1061,9 @@ tags: cpu=cpu1
| 1970-01-01T00:00:00 | 1.9849999999999999 | |
+---------------------+--------------------+--------------------------+
-- InfluxQL: SELECT MEAN(foo) FROM cpu GROUP BY cpu;
+++
+++
+++
-- InfluxQL: SELECT COUNT(f64), SUM(f64) FROM m0 GROUP BY TIME(30s) FILL(none);
name: m0
+---------------------+-------+------+
@@ -1692,6 +1713,10 @@ tags: cpu=, device=disk1s5
| 2022-10-31T02:01:30 | | 3 |
+---------------------+-------+---------+
-- InfluxQL: SELECT COUNT(usage_idle) FROM cpu WHERE time >= now() - 2m GROUP BY TIME(30s) FILL(null);
++------+-------+
+| time | count |
++------+-------+
++------+-------+
-- InfluxQL: SELECT f64 FROM m0 WHERE tag0 = 'val00' LIMIT 3;
name: m0
+---------------------+------+
@@ -2045,6 +2070,10 @@ tags: cpu=cpu1
| 1970-01-01T00:00:00 | 2 |
+---------------------+-------+
-- InfluxQL: SELECT COUNT(usage_idle) FROM cpu GROUP BY cpu OFFSET 1;
++------+-------+
+| time | count |
++------+-------+
++------+-------+
-- InfluxQL: SELECT COUNT(usage_idle) FROM cpu GROUP BY cpu LIMIT 1;
name: cpu
tags: cpu=cpu-total
@@ -2068,6 +2097,10 @@ tags: cpu=cpu1
| 1970-01-01T00:00:00 | 2 |
+---------------------+-------+
-- InfluxQL: SELECT COUNT(usage_idle) FROM cpu GROUP BY cpu OFFSET 1;
++------+-------+
+| time | count |
++------+-------+
++------+-------+
-- InfluxQL: SELECT COUNT(usage_idle) FROM cpu WHERE time >= '2022-10-31T02:00:00Z' AND time < '2022-10-31T02:05:00Z' GROUP BY TIME(30s) LIMIT 2;
name: cpu
+---------------------+-------+
@@ -2501,4 +2534,8 @@ name: m0
| 1970-01-01T00:00:00 | 2 | 4 |
+---------------------+-------+---------+
-- InfluxQL: SELECT DISTINCT(str), DISTINCT(i64) FROM m0;
-Error while planning query: Error during planning: aggregate function distinct() cannot be combined with other functions or fields
\ No newline at end of file
+Error while planning query: Error during planning: aggregate function distinct() cannot be combined with other functions or fields
+-- InfluxQL: SELECT * FROM does_not_exist;
+++
+++
+++
\ No newline at end of file
diff --git a/iox_query_influxql/src/plan/planner.rs b/iox_query_influxql/src/plan/planner.rs
index 80a675a80a..7eff3b4467 100644
--- a/iox_query_influxql/src/plan/planner.rs
+++ b/iox_query_influxql/src/plan/planner.rs
@@ -375,7 +375,26 @@ impl<'a> InfluxQLToLogicalPlan<'a> {
}) => continue,
plan => break plan,
},
- None => return LogicalPlanBuilder::empty(false).build(),
+ None => {
+ // empty result, but let's at least have all the strictly necessary metadata
+ let schema = Arc::new(ArrowSchema::new(vec![ArrowField::new(
+ INFLUXQL_MEASUREMENT_COLUMN_NAME,
+ (&InfluxColumnType::Tag).into(),
+ false,
+ )]));
+ let plan = LogicalPlan::EmptyRelation(EmptyRelation {
+ produce_one_row: false,
+ schema: schema.to_dfschema_ref()?,
+ });
+ let plan = plan_with_metadata(
+ plan,
+ &InfluxQlMetadata {
+ measurement_column_index: MEASUREMENT_COLUMN_INDEX,
+ tag_key_columns: vec![],
+ },
+ )?;
+ return Ok(plan);
+ }
}
}
};
@@ -2746,7 +2765,7 @@ mod test {
"###);
// nonexistent
- assert_snapshot!(plan("SELECT host, usage_idle FROM non_existent"), @"EmptyRelation []");
+ assert_snapshot!(plan("SELECT host, usage_idle FROM non_existent"), @"EmptyRelation [iox::measurement:Dictionary(Int32, Utf8)]");
assert_snapshot!(plan("SELECT host, usage_idle FROM cpu, non_existent"), @r###"
Sort: time ASC NULLS LAST, host ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), host:Dictionary(Int32, Utf8);N, usage_idle:Float64;N]
Projection: Dictionary(Int32, Utf8("cpu")) AS iox::measurement, cpu.time AS time, cpu.host AS host, cpu.usage_idle AS usage_idle [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), host:Dictionary(Int32, Utf8);N, usage_idle:Float64;N]
@@ -2873,8 +2892,8 @@ mod test {
"###);
// invalid column reference
- assert_snapshot!(plan("SELECT not_exists::tag FROM data"), @"EmptyRelation []");
- assert_snapshot!(plan("SELECT not_exists::field FROM data"), @"EmptyRelation []");
+ assert_snapshot!(plan("SELECT not_exists::tag FROM data"), @"EmptyRelation [iox::measurement:Dictionary(Int32, Utf8)]");
+ assert_snapshot!(plan("SELECT not_exists::field FROM data"), @"EmptyRelation [iox::measurement:Dictionary(Int32, Utf8)]");
// Returns NULL for invalid casts
assert_snapshot!(plan("SELECT f64_field::string FROM data"), @r###"
diff --git a/test_helpers_end_to_end/src/snapshot_comparison.rs b/test_helpers_end_to_end/src/snapshot_comparison.rs
index c8a68f56fc..c93f5f1b82 100644
--- a/test_helpers_end_to_end/src/snapshot_comparison.rs
+++ b/test_helpers_end_to_end/src/snapshot_comparison.rs
@@ -321,7 +321,11 @@ async fn run_query(cluster: &MiniCluster, query: &Query) -> Result<Vec<String>>
)
.await
{
- Ok((batches, _)) => batches,
+ Ok((mut batches, schema)) => {
+ batches.push(RecordBatch::new_empty(schema));
+
+ batches
+ }
Err(influxdb_iox_client::flight::Error::ArrowFlightError(FlightError::Tonic(
status,
))) if status.code() == Code::InvalidArgument => {
|
df87ca3f17890275379bc14acf65ac9f3890212d
|
Dom Dwyer
|
2023-01-25 15:17:57
|
appropriate queue wait histogram buckets
|
Changes the bucket values for the queue wait duration metric to be more
appropriately scaled.
| null |
refactor: appropriate queue wait histogram buckets
Changes the bucket values for the queue wait duration metric to be more
appropriately scaled.
|
diff --git a/ingester2/src/persist/handle.rs b/ingester2/src/persist/handle.rs
index 52a151da4c..cc9f825210 100644
--- a/ingester2/src/persist/handle.rs
+++ b/ingester2/src/persist/handle.rs
@@ -1,9 +1,9 @@
-use std::sync::Arc;
+use std::{sync::Arc, time::Duration};
use async_trait::async_trait;
use iox_catalog::interface::Catalog;
use iox_query::{exec::Executor, QueryChunkMeta};
-use metric::{DurationHistogram, U64Counter, U64Gauge};
+use metric::{DurationHistogram, DurationHistogramOptions, U64Counter, U64Gauge, DURATION_MAX};
use observability_deps::tracing::*;
use parking_lot::Mutex;
use parquet_file::storage::ParquetStorage;
@@ -201,9 +201,22 @@ impl PersistHandle {
)
.recorder(&[]);
let queue_duration = metrics
- .register_metric::<DurationHistogram>(
+ .register_metric_with_options::<DurationHistogram, _>(
"ingester_persist_enqueue_duration",
"the distribution of duration a persist job spent enqueued, waiting to be processed in seconds",
+ || DurationHistogramOptions::new([
+ Duration::from_millis(500),
+ Duration::from_secs(1),
+ Duration::from_secs(2),
+ Duration::from_secs(4),
+ Duration::from_secs(8),
+ Duration::from_secs(16),
+ Duration::from_secs(32),
+ Duration::from_secs(64),
+ Duration::from_secs(128),
+ Duration::from_secs(256),
+ DURATION_MAX,
+ ])
)
.recorder(&[]);
|
93e11d4c9172fb3ae87d66d9f8a84f2a496df587
|
Nga Tran
|
2022-11-10 12:01:39
|
Revert "feat: flag partitions for delete (#6075)" (#6111)
|
This reverts commit 77a2541172297dc375b097036ca092ba84e8311d.
| null |
chore: Revert "feat: flag partitions for delete (#6075)" (#6111)
This reverts commit 77a2541172297dc375b097036ca092ba84e8311d.
|
diff --git a/Cargo.lock b/Cargo.lock
index 38b2dae001..6012035630 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -2400,7 +2400,6 @@ version = "0.1.0"
dependencies = [
"assert_matches",
"async-trait",
- "chrono",
"data_types",
"dotenvy",
"futures",
diff --git a/data_types/src/lib.rs b/data_types/src/lib.rs
index fdc7c5548c..c4d92c92be 100644
--- a/data_types/src/lib.rs
+++ b/data_types/src/lib.rs
@@ -902,9 +902,6 @@ pub struct Partition {
///
/// If [`None`] no data has been persisted for this partition.
pub persisted_sequence_number: Option<SequenceNumber>,
-
- /// When this file was marked for deletion
- pub to_delete: Option<Timestamp>,
}
impl Partition {
diff --git a/import/src/aggregate_tsm_schema/update_catalog.rs b/import/src/aggregate_tsm_schema/update_catalog.rs
index 4ce456ef0d..a9d681fda3 100644
--- a/import/src/aggregate_tsm_schema/update_catalog.rs
+++ b/import/src/aggregate_tsm_schema/update_catalog.rs
@@ -1322,7 +1322,6 @@ mod tests {
persisted_sequence_number: None,
partition_key: PartitionKey::from("2022-06-21"),
sort_key: Vec::new(),
- to_delete: None,
};
let sort_key = get_sort_key(&partition, &m).1.unwrap();
let sort_key = sort_key.to_columns().collect::<Vec<_>>();
@@ -1371,7 +1370,6 @@ mod tests {
partition_key: PartitionKey::from("2022-06-21"),
// N.B. sort key is already what it will computed to; here we're testing the `adjust_sort_key_columns` code path
sort_key: vec!["host".to_string(), "arch".to_string(), "time".to_string()],
- to_delete: None,
};
// ensure sort key is unchanged
let _maybe_updated_sk = get_sort_key(&partition, &m).1;
@@ -1419,7 +1417,6 @@ mod tests {
partition_key: PartitionKey::from("2022-06-21"),
// N.B. is missing host so will need updating
sort_key: vec!["arch".to_string(), "time".to_string()],
- to_delete: None,
};
let sort_key = get_sort_key(&partition, &m).1.unwrap();
let sort_key = sort_key.to_columns().collect::<Vec<_>>();
@@ -1469,7 +1466,6 @@ mod tests {
partition_key: PartitionKey::from("2022-06-21"),
// N.B. is missing arch so will need updating
sort_key: vec!["host".to_string(), "time".to_string()],
- to_delete: None,
};
let sort_key = get_sort_key(&partition, &m).1.unwrap();
let sort_key = sort_key.to_columns().collect::<Vec<_>>();
diff --git a/ingester/src/data/partition/resolver/cache.rs b/ingester/src/data/partition/resolver/cache.rs
index 70cc0d1b7b..5f0daabc09 100644
--- a/ingester/src/data/partition/resolver/cache.rs
+++ b/ingester/src/data/partition/resolver/cache.rs
@@ -303,7 +303,6 @@ mod tests {
partition_key: stored_partition_key.clone(),
sort_key: vec!["dos".to_string(), "bananas".to_string()],
persisted_sequence_number: Default::default(),
- to_delete: None,
};
let cache = new_cache(inner, [partition]);
@@ -359,7 +358,6 @@ mod tests {
partition_key: PARTITION_KEY.into(),
sort_key: Default::default(),
persisted_sequence_number: Default::default(),
- to_delete: None,
};
let cache = new_cache(inner, [partition]);
@@ -400,7 +398,6 @@ mod tests {
partition_key: PARTITION_KEY.into(),
sort_key: Default::default(),
persisted_sequence_number: Default::default(),
- to_delete: None,
};
let cache = new_cache(inner, [partition]);
@@ -441,7 +438,6 @@ mod tests {
partition_key: PARTITION_KEY.into(),
sort_key: Default::default(),
persisted_sequence_number: Default::default(),
- to_delete: None,
};
let cache = new_cache(inner, [partition]);
diff --git a/iox_catalog/Cargo.toml b/iox_catalog/Cargo.toml
index 563132308c..f76cc3ca7e 100644
--- a/iox_catalog/Cargo.toml
+++ b/iox_catalog/Cargo.toml
@@ -7,7 +7,6 @@ license.workspace = true
[dependencies] # In alphabetical order
async-trait = "0.1.58"
-chrono = "0.4.19"
data_types = { path = "../data_types" }
futures = "0.3"
iox_time = { version = "0.1.0", path = "../iox_time" }
diff --git a/iox_catalog/src/interface.rs b/iox_catalog/src/interface.rs
index 9d561426f5..1ec546a321 100644
--- a/iox_catalog/src/interface.rs
+++ b/iox_catalog/src/interface.rs
@@ -483,9 +483,6 @@ pub trait PartitionRepo: Send + Sync {
/// Return the N most recently created partitions for the specified shards.
async fn most_recent_n(&mut self, n: usize, shards: &[ShardId]) -> Result<Vec<Partition>>;
-
- /// Flag all partition for deletion that are older than their namespace's retention period.
- async fn flag_for_delete_by_retention(&mut self) -> Result<Vec<PartitionId>>;
}
/// Functions for working with tombstones in the catalog
@@ -898,7 +895,6 @@ pub(crate) mod test_helpers {
use super::*;
use ::test_helpers::{assert_contains, tracing::TracingCapture};
use assert_matches::assert_matches;
- use chrono::Utc;
use data_types::{ColumnId, ColumnSet, CompactionLevel};
use metric::{Attributes, DurationHistogram, Metric};
use std::{
@@ -916,7 +912,6 @@ pub(crate) mod test_helpers {
test_column(Arc::clone(&catalog)).await;
test_shards(Arc::clone(&catalog)).await;
test_partition(Arc::clone(&catalog)).await;
- test_partition_flag_for_delete(Arc::clone(&catalog)).await;
test_tombstone(Arc::clone(&catalog)).await;
test_tombstones_by_parquet_file(Arc::clone(&catalog)).await;
test_parquet_file(Arc::clone(&catalog)).await;
@@ -1694,97 +1689,6 @@ pub(crate) mod test_helpers {
assert_eq!(recent, recent2);
}
- // This test must set partition key as a string of date "YYY-MM-DD"
- async fn test_partition_flag_for_delete(catalog: Arc<dyn Catalog>) {
- let mut repos = catalog.repositories().await;
- let topic = repos.topics().create_or_get("foo").await.unwrap();
- let pool = repos.query_pools().create_or_get("foo").await.unwrap();
- let namespace = repos
- .namespaces()
- .create(
- "namespace_partition_test_flag_for_delete",
- "inf",
- topic.id,
- pool.id,
- )
- .await
- .unwrap();
- let table = repos
- .tables()
- .create_or_get("test_table", namespace.id)
- .await
- .unwrap();
- let shard = repos
- .shards()
- .create_or_get(&topic, ShardIndex::new(1))
- .await
- .unwrap();
-
- let mut created = BTreeMap::new();
-
- // Test flagged for deletion
- // 1. No retention period set, nothing should be flagged for deletion
- let ids = repos
- .partitions()
- .flag_for_delete_by_retention()
- .await
- .unwrap();
- assert!(ids.is_empty());
- //
- // 2. set ns retention period to one hour then create 2 partitions: today and 2 days ago
- // today partition should not be flagged for deletion
- repos
- .namespaces()
- .update_retention_period(&namespace.name, 1) // 1 hour
- .await
- .unwrap();
- let today = Utc::now();
- let two_days_ago = today - chrono::Duration::days(2);
- // date to string "YYYY-MM-DD"
- let today_str = today.format("%Y-%m-%d").to_string();
- let two_days_ago_str = two_days_ago.format("%Y-%m-%d").to_string();
-
- let partition_today = repos
- .partitions()
- .create_or_get(today_str.into(), shard.id, table.id)
- .await
- .expect("failed to create partition");
- created.insert(partition_today.id, partition_today.clone());
-
- let partition_two_days_ago = repos
- .partitions()
- .create_or_get(two_days_ago_str.into(), shard.id, table.id)
- .await
- .expect("failed to create partition");
- created.insert(partition_two_days_ago.id, partition_two_days_ago.clone());
- // Should have at least one partition deleted
- let ids = repos
- .partitions()
- .flag_for_delete_by_retention()
- .await
- .unwrap();
- assert!(!ids.is_empty());
- // two_days_ago partition should be flagged for deletion
- assert!(ids.contains(&partition_two_days_ago.id));
- // today partition should not be flagged for deletion
- assert!(!ids.contains(&partition_today.id));
- //
- // 3. flag for select again and should not get anything returned because the partitions are already flagged for deletion
- let ids = repos
- .partitions()
- .flag_for_delete_by_retention()
- .await
- .unwrap();
- assert!(ids.is_empty());
-
- // Reset retention period to infinite so it won't affect following tests
- repos
- .namespaces()
- .update_retention_period(&namespace.name, 0) // infinite
- .await
- .unwrap();
- }
-
async fn test_tombstone(catalog: Arc<dyn Catalog>) {
let mut repos = catalog.repositories().await;
let topic = repos.topics().create_or_get("foo").await.unwrap();
@@ -2669,13 +2573,6 @@ pub(crate) mod test_helpers {
.await
.unwrap();
assert!(ids.is_empty());
-
- // Reset retention period to infinite so it won't affect following tests
- repos
- .namespaces()
- .update_retention_period(&namespace.name, 0) // infinite
- .await
- .unwrap();
}
async fn test_parquet_file_compaction_level_0(catalog: Arc<dyn Catalog>) {
diff --git a/iox_catalog/src/mem.rs b/iox_catalog/src/mem.rs
index b26947d072..7f1d3702a9 100644
--- a/iox_catalog/src/mem.rs
+++ b/iox_catalog/src/mem.rs
@@ -12,7 +12,6 @@ use crate::{
DEFAULT_MAX_COLUMNS_PER_TABLE, DEFAULT_MAX_TABLES, DEFAULT_RETENTION_PERIOD,
};
use async_trait::async_trait;
-use chrono::{DateTime, NaiveDate, Utc};
use data_types::{
Column, ColumnId, ColumnType, ColumnTypeCount, CompactionLevel, Namespace, NamespaceId,
ParquetFile, ParquetFileId, ParquetFileParams, Partition, PartitionId, PartitionKey,
@@ -20,7 +19,7 @@ use data_types::{
ShardIndex, SkippedCompaction, Table, TableId, TablePartition, Timestamp, Tombstone,
TombstoneId, TopicId, TopicMetadata,
};
-use iox_time::{SystemProvider, Time, TimeProvider};
+use iox_time::{SystemProvider, TimeProvider};
use observability_deps::tracing::warn;
use snafu::ensure;
use sqlx::types::Uuid;
@@ -767,7 +766,6 @@ impl PartitionRepo for MemTxn {
partition_key: key,
sort_key: vec![],
persisted_sequence_number: None,
- to_delete: None,
};
stage.partitions.push(p);
stage.partitions.last().unwrap()
@@ -933,57 +931,6 @@ impl PartitionRepo for MemTxn {
.cloned()
.collect())
}
-
- async fn flag_for_delete_by_retention(&mut self) -> Result<Vec<PartitionId>> {
- let now = Timestamp::from(self.time_provider.now());
- let stage = self.stage();
-
- Ok(stage
- .partitions
- .iter_mut()
- // don't flag if it is already flagged
- .filter(|p| p.to_delete.is_none())
- .filter_map(|p| {
- stage
- .tables
- .iter()
- .find(|t| t.id == p.table_id)
- .and_then(|t| {
- stage
- .namespaces
- .iter()
- .find(|n| n.id == t.namespace_id)
- .and_then(|ns| {
- ns.retention_period_ns.and_then(|retention_ns| {
- let partition_key_ns = NaiveDate::parse_from_str(
- &p.partition_key.to_string(),
- "%Y-%m-%d",
- )
- .ok()
- .map(|naive_date| naive_date.and_hms(0, 0, 0))
- .map(|naive_date_time| {
- DateTime::<Utc>::from_utc(naive_date_time, Utc)
- })
- .map(|date_time_utc| {
- Timestamp::from(Time::from_date_time(date_time_utc))
- });
-
- partition_key_ns.and_then(|partition_key_ns| {
- // Partition_key_ns is the start of the day. Need to add a day to it so we compare end of the day
- let day_ns = 24 * 60 * 60 * 1_000_000_000;
- if partition_key_ns + day_ns < now - retention_ns {
- p.to_delete = Some(now);
- Some(p.id)
- } else {
- None
- }
- })
- })
- })
- })
- })
- .collect())
- }
}
#[async_trait]
diff --git a/iox_catalog/src/metrics.rs b/iox_catalog/src/metrics.rs
index 7a73c26f8a..515b841f90 100644
--- a/iox_catalog/src/metrics.rs
+++ b/iox_catalog/src/metrics.rs
@@ -251,7 +251,6 @@ decorate!(
"partition_delete_skipped_compactions" = delete_skipped_compactions(&mut self, partition_id: PartitionId) -> Result<Option<SkippedCompaction>>;
"partition_update_persisted_sequence_number" = update_persisted_sequence_number(&mut self, partition_id: PartitionId, sequence_number: SequenceNumber) -> Result<()>;
"partition_most_recent_n" = most_recent_n(&mut self, n: usize, shards: &[ShardId]) -> Result<Vec<Partition>>;
- "partition_flag_for_delete_by_retention" = flag_for_delete_by_retention(&mut self) -> Result<Vec<PartitionId>>;
]
);
diff --git a/iox_catalog/src/postgres.rs b/iox_catalog/src/postgres.rs
index d5361c3aa6..1e0bac2a77 100644
--- a/iox_catalog/src/postgres.rs
+++ b/iox_catalog/src/postgres.rs
@@ -1377,30 +1377,6 @@ WHERE id = $2;
.await
.map_err(|e| Error::SqlxError { source: e })
}
-
- async fn flag_for_delete_by_retention(&mut self) -> Result<Vec<PartitionId>> {
- let flagged_at = Timestamp::from(self.time_provider.now());
- let flagged = sqlx::query(
- r#"
- UPDATE partition p
- SET to_delete = $1
- FROM table_name t, namespace n
- WHERE p.to_delete IS NULL AND
- t.id = p.table_id AND
- n.id = t.namespace_id AND
- n.retention_period_ns IS NOT NULL AND
- extract(epoch from (to_date(p.partition_key, 'YYYY-MM-DD') + interval '1 day')) * 1000000000 < $1 - n.retention_period_ns
- RETURNING p.id;
- "#,
- )
- .bind(flagged_at) // $1
- .fetch_all(&mut self.inner)
- .await
- .map_err(|e| Error::SqlxError { source: e })?;
-
- let flagged = flagged.into_iter().map(|row| row.get("id")).collect();
- Ok(flagged)
- }
}
#[async_trait]
diff --git a/workspace-hack/Cargo.toml b/workspace-hack/Cargo.toml
index 8418764b57..38de483208 100644
--- a/workspace-hack/Cargo.toml
+++ b/workspace-hack/Cargo.toml
@@ -22,7 +22,7 @@ base64 = { version = "0.13", features = ["std"] }
bitflags = { version = "1" }
byteorder = { version = "1", features = ["std"] }
bytes = { version = "1", features = ["std"] }
-chrono = { version = "0.4", features = ["alloc", "clock", "iana-time-zone", "js-sys", "oldtime", "serde", "std", "time", "wasm-bindgen", "wasmbind", "winapi"] }
+chrono = { version = "0.4", default-features = false, features = ["alloc", "clock", "iana-time-zone", "serde", "std", "winapi"] }
crossbeam-utils = { version = "0.8", features = ["std"] }
crypto-common = { version = "0.1", default-features = false, features = ["std"] }
datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev = "dd081d64a2fba8574e63bdd0662c14aec5852b48", features = ["crypto_expressions", "regex_expressions", "unicode_expressions"] }
@@ -160,14 +160,14 @@ ahash-c38e5c1d305a1b54 = { package = "ahash", version = "0.8", default-features
once_cell = { version = "1", default-features = false, features = ["unstable"] }
scopeguard = { version = "1", features = ["use_std"] }
tokio = { version = "1", default-features = false, features = ["winapi"] }
-winapi = { version = "0.3", default-features = false, features = ["accctrl", "aclapi", "activation", "basetsd", "combaseapi", "consoleapi", "errhandlingapi", "fileapi", "handleapi", "impl-debug", "impl-default", "knownfolders", "minwinbase", "minwindef", "namedpipeapi", "ntdef", "ntsecapi", "ntstatus", "objbase", "processenv", "profileapi", "roapi", "shellapi", "shlobj", "std", "stringapiset", "synchapi", "sysinfoapi", "timezoneapi", "winbase", "wincon", "winerror", "winnt", "winreg", "winstring", "winuser", "ws2ipdef", "ws2tcpip", "wtypesbase"] }
+winapi = { version = "0.3", default-features = false, features = ["accctrl", "aclapi", "activation", "basetsd", "combaseapi", "consoleapi", "errhandlingapi", "fileapi", "handleapi", "impl-debug", "impl-default", "knownfolders", "minwinbase", "minwindef", "namedpipeapi", "ntsecapi", "ntstatus", "objbase", "processenv", "roapi", "shellapi", "shlobj", "std", "stringapiset", "synchapi", "timezoneapi", "winbase", "wincon", "winerror", "winnt", "winreg", "winstring", "winuser", "ws2ipdef", "ws2tcpip", "wtypesbase"] }
windows-sys = { version = "0.42", features = ["Win32", "Win32_Foundation", "Win32_Networking", "Win32_Networking_WinSock", "Win32_Security", "Win32_Storage", "Win32_Storage_FileSystem", "Win32_System", "Win32_System_IO", "Win32_System_LibraryLoader", "Win32_System_Pipes", "Win32_System_SystemServices", "Win32_System_WindowsProgramming"] }
[target.x86_64-pc-windows-msvc.build-dependencies]
once_cell = { version = "1", default-features = false, features = ["unstable"] }
scopeguard = { version = "1", features = ["use_std"] }
tokio = { version = "1", default-features = false, features = ["winapi"] }
-winapi = { version = "0.3", default-features = false, features = ["accctrl", "aclapi", "activation", "basetsd", "combaseapi", "consoleapi", "errhandlingapi", "fileapi", "handleapi", "impl-debug", "impl-default", "knownfolders", "minwinbase", "minwindef", "namedpipeapi", "ntdef", "ntsecapi", "ntstatus", "objbase", "processenv", "profileapi", "roapi", "shellapi", "shlobj", "std", "stringapiset", "synchapi", "sysinfoapi", "timezoneapi", "winbase", "wincon", "winerror", "winnt", "winreg", "winstring", "winuser", "ws2ipdef", "ws2tcpip", "wtypesbase"] }
+winapi = { version = "0.3", default-features = false, features = ["accctrl", "aclapi", "activation", "basetsd", "combaseapi", "consoleapi", "errhandlingapi", "fileapi", "handleapi", "impl-debug", "impl-default", "knownfolders", "minwinbase", "minwindef", "namedpipeapi", "ntsecapi", "ntstatus", "objbase", "processenv", "roapi", "shellapi", "shlobj", "std", "stringapiset", "synchapi", "timezoneapi", "winbase", "wincon", "winerror", "winnt", "winreg", "winstring", "winuser", "ws2ipdef", "ws2tcpip", "wtypesbase"] }
windows-sys = { version = "0.42", features = ["Win32", "Win32_Foundation", "Win32_Networking", "Win32_Networking_WinSock", "Win32_Security", "Win32_Storage", "Win32_Storage_FileSystem", "Win32_System", "Win32_System_IO", "Win32_System_LibraryLoader", "Win32_System_Pipes", "Win32_System_SystemServices", "Win32_System_WindowsProgramming"] }
### END HAKARI SECTION
|
8002d34fa2b5170e66a26c365217dfdca977a336
|
Marco Neumann
|
2023-02-27 11:27:48
|
add "split dedup by partition" optimizer rule (#7020)
|
* feat: add "split dedup by partition" optimizer rule
- some additional testing infra
- includes config infra for optimizer passes
- not wired up yet since we still use the old plan generation
For #6098.
* refactor: change default and improve docs
| null |
feat: add "split dedup by partition" optimizer rule (#7020)
* feat: add "split dedup by partition" optimizer rule
- some additional testing infra
- includes config infra for optimizer passes
- not wired up yet since we still use the old plan generation
For #6098.
* refactor: change default and improve docs
|
diff --git a/iox_query/src/config.rs b/iox_query/src/config.rs
new file mode 100644
index 0000000000..608006927c
--- /dev/null
+++ b/iox_query/src/config.rs
@@ -0,0 +1,99 @@
+use datafusion::config::ConfigExtension;
+
+/// IOx-specific config extension prefix.
+pub const IOX_CONFIG_PREFIX: &str = "iox";
+
+macro_rules! cfg {
+ (
+ $(#[doc = $struct_d:tt])*
+ $vis:vis struct $struct_name:ident {
+ $(
+ $(#[doc = $d:tt])*
+ $field_vis:vis $field_name:ident : $field_type:ty, default = $default:expr
+ )*$(,)*
+ }
+ ) => {
+ $(#[doc = $struct_d])*
+ #[derive(Debug, Clone)]
+ #[non_exhaustive]
+ $vis struct $struct_name{
+ $(
+ $(#[doc = $d])*
+ $field_vis $field_name : $field_type,
+ )*
+ }
+
+ impl Default for $struct_name {
+ fn default() -> Self {
+ Self {
+ $($field_name: $default),*
+ }
+ }
+ }
+
+ impl ::datafusion::config::ExtensionOptions for $struct_name {
+ fn as_any(&self) -> &dyn ::std::any::Any {
+ self
+ }
+
+ fn as_any_mut(&mut self) -> &mut dyn ::std::any::Any {
+ self
+ }
+
+ fn cloned(&self) -> Box<dyn ::datafusion::config::ExtensionOptions> {
+ Box::new(self.clone())
+ }
+
+ fn set(&mut self, key: &str, value: &str) -> ::datafusion::error::Result<()> {
+ match key {
+ $(
+ stringify!($field_name) => {
+ self.$field_name = value.parse().map_err(|e| {
+ ::datafusion::error::DataFusionError::Context(
+ format!(concat!("Error parsing {} as ", stringify!($t),), value),
+ Box::new(::datafusion::error::DataFusionError::External(Box::new(e))),
+ )
+ })?;
+ Ok(())
+ }
+ )*
+ _ => Err(::datafusion::error::DataFusionError::Internal(
+ format!(concat!("Config value \"{}\" not found on ", stringify!($struct_name)), key)
+ ))
+ }
+ }
+
+ fn entries(&self) -> Vec<::datafusion::config::ConfigEntry> {
+ vec![
+ $(
+ ::datafusion::config::ConfigEntry {
+ key: stringify!($field_name).to_owned(),
+ value: (self.$field_name != $default).then(|| self.$field_name.to_string()),
+ description: concat!($($d),*).trim(),
+ },
+ )*
+ ]
+ }
+ }
+ }
+}
+
+cfg! {
+ /// Config options for IOx.
+ pub struct IoxConfigExt {
+ /// When splitting de-duplicate operations based on IOx partitions[^iox_part], this is the maximum number of IOx
+ /// partitions that should be considered. If there are more partitions, the split will NOT be performed.
+ ///
+ /// This protects against certain highly degenerative plans.
+ ///
+ ///
+ /// [^iox_part]: "IOx partition" refers to a partition within the IOx catalog, i.e. a partition within the
+ /// primary key space. This is NOT the same as a DataFusion partition which refers to a stream
+ /// within the physical plan data flow.
+ pub max_dedup_partition_split: usize, default = 100
+ }
+}
+
+impl ConfigExtension for IoxConfigExt {
+ const PREFIX: &'static str = IOX_CONFIG_PREFIX;
+}
diff --git a/iox_query/src/lib.rs b/iox_query/src/lib.rs
index 8489966249..ba28635d9c 100644
--- a/iox_query/src/lib.rs
+++ b/iox_query/src/lib.rs
@@ -25,6 +25,7 @@ use schema::{
};
use std::{any::Any, collections::BTreeSet, fmt::Debug, iter::FromIterator, sync::Arc};
+pub mod config;
pub mod exec;
pub mod frontend;
pub mod logical_optimizer;
diff --git a/iox_query/src/physical_optimizer/chunk_extraction.rs b/iox_query/src/physical_optimizer/chunk_extraction.rs
index ccf4bdb910..ad4ed92c3f 100644
--- a/iox_query/src/physical_optimizer/chunk_extraction.rs
+++ b/iox_query/src/physical_optimizer/chunk_extraction.rs
@@ -20,7 +20,6 @@ use crate::{
/// additional nodes (like de-duplication, filtering, projection) then NO data will be returned.
///
/// [`chunks_to_physical_nodes`]: crate::provider::chunks_to_physical_nodes
-#[allow(dead_code)]
pub fn extract_chunks(plan: &dyn ExecutionPlan) -> Option<(Schema, Vec<Arc<dyn QueryChunk>>)> {
let mut visitor = ExtractChunksVisitor::default();
visit_execution_plan(plan, &mut visitor).ok()?;
diff --git a/iox_query/src/physical_optimizer/dedup/mod.rs b/iox_query/src/physical_optimizer/dedup/mod.rs
new file mode 100644
index 0000000000..af30321407
--- /dev/null
+++ b/iox_query/src/physical_optimizer/dedup/mod.rs
@@ -0,0 +1,6 @@
+//! Optimizer passes concering de-duplication.
+
+mod partition_split;
+
+#[cfg(test)]
+mod test_util;
diff --git a/iox_query/src/physical_optimizer/dedup/partition_split.rs b/iox_query/src/physical_optimizer/dedup/partition_split.rs
new file mode 100644
index 0000000000..9b8708a327
--- /dev/null
+++ b/iox_query/src/physical_optimizer/dedup/partition_split.rs
@@ -0,0 +1,232 @@
+use std::sync::Arc;
+
+use data_types::PartitionId;
+use datafusion::{
+ config::ConfigOptions,
+ error::Result,
+ physical_optimizer::PhysicalOptimizerRule,
+ physical_plan::{rewrite::TreeNodeRewritable, union::UnionExec, ExecutionPlan},
+};
+use hashbrown::HashMap;
+use predicate::Predicate;
+
+use crate::{
+ config::IoxConfigExt,
+ physical_optimizer::chunk_extraction::extract_chunks,
+ provider::{chunks_to_physical_nodes, DeduplicateExec},
+ QueryChunk,
+};
+
+/// Split de-duplication operations based on partitons.
+///
+/// This should usually be more cost-efficient.
+#[derive(Debug, Default)]
+pub struct PartitionSplit;
+
+impl PhysicalOptimizerRule for PartitionSplit {
+ fn optimize(
+ &self,
+ plan: Arc<dyn ExecutionPlan>,
+ config: &ConfigOptions,
+ ) -> Result<Arc<dyn ExecutionPlan>> {
+ plan.transform_up(&|plan| {
+ let plan_any = plan.as_any();
+
+ if let Some(dedup_exec) = plan_any.downcast_ref::<DeduplicateExec>() {
+ let mut children = dedup_exec.children();
+ assert_eq!(children.len(), 1);
+ let child = children.remove(0);
+ let Some((schema, chunks)) = extract_chunks(child.as_ref()) else {
+ return Ok(None);
+ };
+
+ let mut chunks_by_partition: HashMap<PartitionId, Vec<Arc<dyn QueryChunk>>> =
+ Default::default();
+ for chunk in chunks {
+ chunks_by_partition
+ .entry(chunk.partition_id())
+ .or_default()
+ .push(chunk);
+ }
+
+ // If there not multiple partitions (0 or 1), then this optimizer is a no-op. Signal that to the
+ // optimizer framework.
+ if chunks_by_partition.len() < 2 {
+ return Ok(None);
+ }
+
+ // Protect against degenerative plans
+ if chunks_by_partition.len()
+ > config
+ .extensions
+ .get::<IoxConfigExt>()
+ .cloned()
+ .unwrap_or_default()
+ .max_dedup_partition_split
+ {
+ return Ok(None);
+ }
+
+ // ensure deterministic order
+ let mut chunks_by_partition = chunks_by_partition.into_iter().collect::<Vec<_>>();
+ chunks_by_partition.sort_by_key(|(p_id, _chunks)| *p_id);
+
+ let out = UnionExec::new(
+ chunks_by_partition
+ .into_iter()
+ .map(|(_p_id, chunks)| {
+ Arc::new(DeduplicateExec::new(
+ chunks_to_physical_nodes(
+ &schema,
+ None,
+ chunks,
+ Predicate::new(),
+ config.execution.target_partitions,
+ ),
+ dedup_exec.sort_keys().to_vec(),
+ )) as _
+ })
+ .collect(),
+ );
+ return Ok(Some(Arc::new(out)));
+ }
+
+ Ok(None)
+ })
+ }
+
+ fn name(&self) -> &str {
+ "partition_split"
+ }
+
+ fn schema_check(&self) -> bool {
+ true
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use crate::{
+ physical_optimizer::{
+ dedup::test_util::{chunk, dedup_plan},
+ test_util::OptimizationTest,
+ },
+ QueryChunkMeta,
+ };
+
+ use super::*;
+
+ #[test]
+ fn test_no_chunks() {
+ let schema = chunk(1).schema().clone();
+ let plan = dedup_plan(schema, vec![]);
+ let opt = PartitionSplit::default();
+ insta::assert_yaml_snapshot!(
+ OptimizationTest::new(plan, opt),
+ @r###"
+ ---
+ input:
+ - " DeduplicateExec: [tag1@1 ASC,tag2@2 ASC,time@3 ASC]"
+ - " EmptyExec: produce_one_row=false"
+ output:
+ Ok:
+ - " DeduplicateExec: [tag1@1 ASC,tag2@2 ASC,time@3 ASC]"
+ - " EmptyExec: produce_one_row=false"
+ "###
+ );
+ }
+
+ #[test]
+ fn test_same_partition() {
+ let chunk1 = chunk(1);
+ let chunk2 = chunk(2);
+ let chunk3 = chunk(3).with_dummy_parquet_file();
+ let schema = chunk1.schema().clone();
+ let plan = dedup_plan(schema, vec![chunk1, chunk2, chunk3]);
+ let opt = PartitionSplit::default();
+ insta::assert_yaml_snapshot!(
+ OptimizationTest::new(plan, opt),
+ @r###"
+ ---
+ input:
+ - " DeduplicateExec: [tag1@1 ASC,tag2@2 ASC,time@3 ASC]"
+ - " UnionExec"
+ - " RecordBatchesExec: batches_groups=2 batches=0 total_rows=0"
+ - " ParquetExec: limit=None, partitions={1 group: [[3.parquet]]}, projection=[field, tag1, tag2, time]"
+ output:
+ Ok:
+ - " DeduplicateExec: [tag1@1 ASC,tag2@2 ASC,time@3 ASC]"
+ - " UnionExec"
+ - " RecordBatchesExec: batches_groups=2 batches=0 total_rows=0"
+ - " ParquetExec: limit=None, partitions={1 group: [[3.parquet]]}, projection=[field, tag1, tag2, time]"
+ "###
+ );
+ }
+
+ #[test]
+ fn test_different_partitions() {
+ let chunk1 = chunk(1).with_partition_id(1);
+ let chunk2 = chunk(2).with_partition_id(2);
+ // use at least 3 parquet files for one of the two partitions to validate that `target_partitions` is forwared correctly
+ let chunk3 = chunk(3).with_dummy_parquet_file().with_partition_id(1);
+ let chunk4 = chunk(4).with_dummy_parquet_file().with_partition_id(2);
+ let chunk5 = chunk(5).with_dummy_parquet_file().with_partition_id(1);
+ let chunk6 = chunk(6).with_dummy_parquet_file().with_partition_id(1);
+ let schema = chunk1.schema().clone();
+ let plan = dedup_plan(schema, vec![chunk1, chunk2, chunk3, chunk4, chunk5, chunk6]);
+ let opt = PartitionSplit::default();
+ let mut config = ConfigOptions::default();
+ config.execution.target_partitions = 2;
+ insta::assert_yaml_snapshot!(
+ OptimizationTest::new_with_config(plan, opt, &config),
+ @r###"
+ ---
+ input:
+ - " DeduplicateExec: [tag1@1 ASC,tag2@2 ASC,time@3 ASC]"
+ - " UnionExec"
+ - " RecordBatchesExec: batches_groups=2 batches=0 total_rows=0"
+ - " ParquetExec: limit=None, partitions={2 groups: [[3.parquet, 5.parquet], [4.parquet, 6.parquet]]}, projection=[field, tag1, tag2, time]"
+ output:
+ Ok:
+ - " UnionExec"
+ - " DeduplicateExec: [tag1@1 ASC,tag2@2 ASC,time@3 ASC]"
+ - " UnionExec"
+ - " RecordBatchesExec: batches_groups=1 batches=0 total_rows=0"
+ - " ParquetExec: limit=None, partitions={2 groups: [[3.parquet, 6.parquet], [5.parquet]]}, projection=[field, tag1, tag2, time]"
+ - " DeduplicateExec: [tag1@1 ASC,tag2@2 ASC,time@3 ASC]"
+ - " UnionExec"
+ - " RecordBatchesExec: batches_groups=1 batches=0 total_rows=0"
+ - " ParquetExec: limit=None, partitions={1 group: [[4.parquet]]}, projection=[field, tag1, tag2, time]"
+ "###
+ );
+ }
+
+ #[test]
+ fn test_max_split() {
+ let chunk1 = chunk(1).with_partition_id(1);
+ let chunk2 = chunk(2).with_partition_id(2);
+ let chunk3 = chunk(3).with_partition_id(3);
+ let schema = chunk1.schema().clone();
+ let plan = dedup_plan(schema, vec![chunk1, chunk2, chunk3]);
+ let opt = PartitionSplit::default();
+ let mut config = ConfigOptions::default();
+ config.extensions.insert(IoxConfigExt {
+ max_dedup_partition_split: 2,
+ });
+ insta::assert_yaml_snapshot!(
+ OptimizationTest::new_with_config(plan, opt, &config),
+ @r###"
+ ---
+ input:
+ - " DeduplicateExec: [tag1@1 ASC,tag2@2 ASC,time@3 ASC]"
+ - " UnionExec"
+ - " RecordBatchesExec: batches_groups=3 batches=0 total_rows=0"
+ output:
+ Ok:
+ - " DeduplicateExec: [tag1@1 ASC,tag2@2 ASC,time@3 ASC]"
+ - " UnionExec"
+ - " RecordBatchesExec: batches_groups=3 batches=0 total_rows=0"
+ "###
+ );
+ }
+}
diff --git a/iox_query/src/physical_optimizer/dedup/test_util.rs b/iox_query/src/physical_optimizer/dedup/test_util.rs
new file mode 100644
index 0000000000..43a7283e5c
--- /dev/null
+++ b/iox_query/src/physical_optimizer/dedup/test_util.rs
@@ -0,0 +1,36 @@
+use std::sync::Arc;
+
+use datafusion::physical_plan::ExecutionPlan;
+use predicate::Predicate;
+use schema::{sort::SortKeyBuilder, Schema, TIME_COLUMN_NAME};
+
+use crate::{
+ provider::{chunks_to_physical_nodes, DeduplicateExec},
+ test::TestChunk,
+ util::arrow_sort_key_exprs,
+ QueryChunk,
+};
+
+pub fn dedup_plan(schema: Schema, chunks: Vec<TestChunk>) -> Arc<dyn ExecutionPlan> {
+ let chunks = chunks
+ .into_iter()
+ .map(|c| Arc::new(c) as _)
+ .collect::<Vec<Arc<dyn QueryChunk>>>();
+ let plan = chunks_to_physical_nodes(&schema, None, chunks, Predicate::new(), 2);
+ let sort_key = SortKeyBuilder::new()
+ .with_col("tag1")
+ .with_col("tag2")
+ .with_col(TIME_COLUMN_NAME)
+ .build();
+ let sort_exprs = arrow_sort_key_exprs(&sort_key, &schema.as_arrow());
+ Arc::new(DeduplicateExec::new(plan, sort_exprs))
+}
+
+pub fn chunk(id: u128) -> TestChunk {
+ TestChunk::new("table")
+ .with_id(id)
+ .with_tag_column("tag1")
+ .with_tag_column("tag2")
+ .with_i64_field_column("field")
+ .with_time_column()
+}
diff --git a/iox_query/src/physical_optimizer/mod.rs b/iox_query/src/physical_optimizer/mod.rs
index 346c5a0454..203f370331 100644
--- a/iox_query/src/physical_optimizer/mod.rs
+++ b/iox_query/src/physical_optimizer/mod.rs
@@ -5,6 +5,7 @@ use datafusion::{execution::context::SessionState, physical_optimizer::PhysicalO
use self::union::one_union::OneUnion;
mod chunk_extraction;
+mod dedup;
mod union;
#[cfg(test)]
diff --git a/iox_query/src/physical_optimizer/test_util.rs b/iox_query/src/physical_optimizer/test_util.rs
index 447635b5b0..ae00c9173b 100644
--- a/iox_query/src/physical_optimizer/test_util.rs
+++ b/iox_query/src/physical_optimizer/test_util.rs
@@ -15,13 +15,24 @@ pub struct OptimizationTest {
impl OptimizationTest {
pub fn new<O>(input_plan: Arc<dyn ExecutionPlan>, opt: O) -> Self
+ where
+ O: PhysicalOptimizerRule,
+ {
+ Self::new_with_config(input_plan, opt, &ConfigOptions::default())
+ }
+
+ pub fn new_with_config<O>(
+ input_plan: Arc<dyn ExecutionPlan>,
+ opt: O,
+ config: &ConfigOptions,
+ ) -> Self
where
O: PhysicalOptimizerRule,
{
Self {
input: format_execution_plan(&input_plan),
output: opt
- .optimize(input_plan, &ConfigOptions::default())
+ .optimize(input_plan, config)
.map(|plan| format_execution_plan(&plan))
.map_err(|e| e.to_string()),
}
diff --git a/iox_query/src/provider/deduplicate.rs b/iox_query/src/provider/deduplicate.rs
index a163e0153f..d31052ead3 100644
--- a/iox_query/src/provider/deduplicate.rs
+++ b/iox_query/src/provider/deduplicate.rs
@@ -119,6 +119,10 @@ impl DeduplicateExec {
metrics: ExecutionPlanMetricsSet::new(),
}
}
+
+ pub fn sort_keys(&self) -> &[PhysicalSortExpr] {
+ &self.sort_keys
+ }
}
#[derive(Debug)]
|
95732fe221e38e1c48c0081eed5ba42862b63eee
|
Jason Stirnaman
|
2023-03-28 06:26:40
|
Update gRPC example in README (#7345)
|
Given that `ManagementService` no longer exists, the example is misleading and returns an error.
- Replace `ManagementService` example with `NamespaceService` examples.
- Revise instruction for providing `.proto` files and using the wrapper script.
|
Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
|
docs: Update gRPC example in README (#7345)
Given that `ManagementService` no longer exists, the example is misleading and returns an error.
- Replace `ManagementService` example with `NamespaceService` examples.
- Revise instruction for providing `.proto` files and using the wrapper script.
Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
|
diff --git a/README.md b/README.md
index 7e6aaf52d8..ea1341959b 100644
--- a/README.md
+++ b/README.md
@@ -303,20 +303,63 @@ status: SERVING
### Manually call the gRPC API
To manually invoke one of the gRPC APIs, use a gRPC CLI client such as [grpcurl](https://github.com/fullstorydev/grpcurl).
+Because the gRPC server library in IOx doesn't provide service reflection, you need to pass the IOx `.proto` files to your client
+when making requests.
+After you install **grpcurl**, you can use the `./scripts/grpcurl` wrapper script to make requests that the `.proto` files for you--for example:
-Tonic (the gRPC server library we're using) currently doesn't have support for gRPC reflection, hence you must pass all `.proto` files to your client.
-You can find a convenient `grpcurl` wrapper that does that in the `scripts` directory:
+Use the `list` command to list gRPC API services:
```console
-$ ./scripts/grpcurl -plaintext 127.0.0.1:8082 list
+./scripts/grpcurl -plaintext 127.0.0.1:8082 list
+```
+
+```console
+google.longrunning.Operations
grpc.health.v1.Health
-influxdata.iox.management.v1.ManagementService
+influxdata.iox.authz.v1.IoxAuthorizerService
+influxdata.iox.catalog.v1.CatalogService
+influxdata.iox.compactor.v1.CompactionService
+influxdata.iox.delete.v1.DeleteService
+influxdata.iox.ingester.v1.PartitionBufferService
+influxdata.iox.ingester.v1.PersistService
+influxdata.iox.ingester.v1.ReplicationService
+influxdata.iox.ingester.v1.WriteInfoService
+influxdata.iox.ingester.v1.WriteService
+influxdata.iox.namespace.v1.NamespaceService
+influxdata.iox.object_store.v1.ObjectStoreService
+influxdata.iox.schema.v1.SchemaService
+influxdata.iox.sharder.v1.ShardService
influxdata.platform.storage.IOxTesting
influxdata.platform.storage.Storage
-$ ./scripts/grpcurl -plaintext 127.0.0.1:8082 influxdata.iox.management.v1.ManagementService.ListDatabases
+```
+
+Use the `describe` command to view methods for a service:
+
+```console
+./scripts/grpcurl -plaintext 127.0.0.1:8082 describe influxdata.iox.namespace.v1.NamespaceService
+```
+
+```console
+service NamespaceService {
+ ...
+ rpc GetNamespaces ( .influxdata.iox.namespace.v1.GetNamespacesRequest ) returns ( .influxdata.iox.namespace.v1.GetNamespacesResponse );
+ ...
+}
+```
+
+Invoke a method:
+
+```console
+./scripts/grpcurl -plaintext 127.0.0.1:8082 influxdata.iox.namespace.v1.NamespaceService.GetNamespaces
+```
+
+```console
{
- "names": [
- "foobar_weather"
+ "namespaces": [
+ {
+ "id": "1",
+ "name": "company_sensors"
+ }
]
}
```
|
231e0f48ab82e0705cc97db3a2c4e445643f8c55
|
Marco Neumann
|
2023-05-03 14:46:51
|
add test for InfluxQL md queries w/ `FROM ""` (#7728)
|
See https://github.com/influxdata/idpe/issues/17559 .
|
Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
|
test: add test for InfluxQL md queries w/ `FROM ""` (#7728)
See https://github.com/influxdata/idpe/issues/17559 .
Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
|
diff --git a/influxdb_iox/tests/query_tests2/cases/in/influxql_metadata.influxql b/influxdb_iox/tests/query_tests2/cases/in/influxql_metadata.influxql
index c8a44f8033..0cdf0f0048 100644
--- a/influxdb_iox/tests/query_tests2/cases/in/influxql_metadata.influxql
+++ b/influxdb_iox/tests/query_tests2/cases/in/influxql_metadata.influxql
@@ -36,6 +36,7 @@ SHOW FIELD KEYS FROM cpu,disk,cpu;
SHOW FIELD KEYS FROM /m.*/;
SHOW FIELD KEYS FROM /d\isk/;
SHOW FIELD KEYS FROM does_not_exist;
+SHOW FIELD KEYS FROM "";
-- unimplemented features in `SHOW FIELD KEYS`
SHOW FIELD KEYS ON my_db;
@@ -59,6 +60,7 @@ SHOW TAG VALUES FROM m1,m0,m1 WITH KEY = "tag0";
SHOW TAG VALUES FROM /m.*/ WITH KEY = "tag0";
SHOW TAG VALUES FROM /d\isk/ WITH KEY = "device";
SHOW TAG VALUES FROM does_not_exist WITH KEY = "tag0";
+SHOW TAG VALUES FROM "" WITH KEY = "tag0";
SHOW TAG VALUES WITH KEY = "tt_tag";
SHOW TAG VALUES WITH KEY = "tt_tag" WHERE time >= '1990-01-01T00:00:00Z';
SHOW TAG VALUES WITH KEY = "tt_tag" WHERE time >= '2022-10-31T02:00:00Z';
@@ -83,6 +85,7 @@ SHOW TAG KEYS FROM cpu,disk,cpu;
SHOW TAG KEYS FROM /m.*/;
SHOW TAG KEYS FROM /d\isk/;
SHOW TAG KEYS FROM does_not_exist;
+SHOW TAG KEYS FROM "";
SHOW TAG KEYS FROM time_test WHERE time >= '1990-01-01T00:00:00Z';
SHOW TAG KEYS FROM time_test WHERE time >= '2022-10-31T02:00:00Z';
SHOW TAG KEYS FROM time_test WHERE time >= '1970-01-01T01:00:00Z';
diff --git a/influxdb_iox/tests/query_tests2/cases/in/influxql_metadata.influxql.expected b/influxdb_iox/tests/query_tests2/cases/in/influxql_metadata.influxql.expected
index 1351c6e3ec..015ee6c829 100644
--- a/influxdb_iox/tests/query_tests2/cases/in/influxql_metadata.influxql.expected
+++ b/influxdb_iox/tests/query_tests2/cases/in/influxql_metadata.influxql.expected
@@ -408,6 +408,11 @@ name: disk
| fieldKey | fieldType |
+----------+-----------+
+----------+-----------+
+-- InfluxQL: SHOW FIELD KEYS FROM "";
++----------+-----------+
+| fieldKey | fieldType |
++----------+-----------+
++----------+-----------+
-- InfluxQL: SHOW FIELD KEYS ON my_db;
Error while planning query: This feature is not implemented: SHOW FIELD KEYS ON <database>
-- InfluxQL: SHOW FIELD KEYS FROM x.my_db;
@@ -923,6 +928,11 @@ name: disk
| key | value |
+-----+-------+
+-----+-------+
+-- InfluxQL: SHOW TAG VALUES FROM "" WITH KEY = "tag0";
++-----+-------+
+| key | value |
++-----+-------+
++-----+-------+
-- InfluxQL: SHOW TAG VALUES WITH KEY = "tt_tag";
name: time_test
+--------+-------------------+
@@ -1284,6 +1294,11 @@ name: disk
| tagKey |
+--------+
+--------+
+-- InfluxQL: SHOW TAG KEYS FROM "";
++--------+
+| tagKey |
++--------+
++--------+
-- InfluxQL: SHOW TAG KEYS FROM time_test WHERE time >= '1990-01-01T00:00:00Z';
name: time_test
+--------------------------+
|
413635d25ab8d7a19d8eb34f7b862ec8051982e9
|
Carol (Nichols || Goulding)
|
2023-03-10 17:14:47
|
Don't add a partition to skipped_compactions if it makes progress
|
If a partition takes longer than `partition_timeout` to compact, but it
did make _some_ progress, let the compactor try that partition again at
a later time so that compaction for the partition will eventually
complete.
If a partition times out and _no_ progress has been made, then still add
it to the skipped_compactions table because it's either too big to ever
compact or is otherwise stuck.
Closes influxdata/idpe#17234.
| null |
feat: Don't add a partition to skipped_compactions if it makes progress
If a partition takes longer than `partition_timeout` to compact, but it
did make _some_ progress, let the compactor try that partition again at
a later time so that compaction for the partition will eventually
complete.
If a partition times out and _no_ progress has been made, then still add
it to the skipped_compactions table because it's either too big to ever
compact or is otherwise stuck.
Closes influxdata/idpe#17234.
|
diff --git a/Cargo.lock b/Cargo.lock
index a0a661f7db..79944f0bd3 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -975,6 +975,7 @@ name = "compactor2"
version = "0.1.0"
dependencies = [
"arrow_util",
+ "assert_matches",
"async-trait",
"backoff",
"bytes",
diff --git a/compactor2/Cargo.toml b/compactor2/Cargo.toml
index c0149292ec..8757cdd1b3 100644
--- a/compactor2/Cargo.toml
+++ b/compactor2/Cargo.toml
@@ -33,6 +33,7 @@ workspace-hack = { version = "0.1", path = "../workspace-hack" }
[dev-dependencies]
arrow_util = { path = "../arrow_util" }
+assert_matches = "1"
compactor2_test_utils = { path = "../compactor2_test_utils" }
iox_tests = { path = "../iox_tests" }
test_helpers = { path = "../test_helpers"}
diff --git a/compactor2/src/driver.rs b/compactor2/src/driver.rs
index eef72200ae..70fa41a9ff 100644
--- a/compactor2/src/driver.rs
+++ b/compactor2/src/driver.rs
@@ -1,14 +1,15 @@
-use std::{num::NonZeroUsize, sync::Arc, time::Duration};
+use std::{fmt, future::Future, num::NonZeroUsize, sync::Arc, time::Duration};
use data_types::{CompactionLevel, ParquetFile, ParquetFileParams, PartitionId};
use futures::StreamExt;
use observability_deps::tracing::info;
use parquet_file::ParquetFilePath;
+use tokio::sync::watch::{self, Sender};
use tracker::InstrumentedAsyncSemaphore;
use crate::{
components::{scratchpad::Scratchpad, Components},
- error::DynError,
+ error::{DynError, ErrorKind, SimpleError},
file_classification::{FileToSplit, FilesToCompactOrSplit},
partition_info::PartitionInfo,
PlanIR,
@@ -49,19 +50,37 @@ async fn compact_partition(
info!(partition_id = partition_id.get(), "compact partition",);
let mut scratchpad = components.scratchpad_gen.pad();
- let res = tokio::time::timeout(
- partition_timeout,
- try_compact_partition(
- partition_id,
- job_semaphore,
- Arc::clone(&components),
- scratchpad.as_mut(),
- ),
- )
+ let res = timeout_with_progress_checking(partition_timeout, |transmit_progress_signal| {
+ let components = Arc::clone(&components);
+ async {
+ try_compact_partition(
+ partition_id,
+ job_semaphore,
+ components,
+ scratchpad.as_mut(),
+ transmit_progress_signal,
+ )
+ .await
+ }
+ })
.await;
+
let res = match res {
- Ok(res) => res,
- Err(e) => Err(Box::new(e) as _),
+ // If `try_compact_partition` timed out and didn't make any progress, something is wrong
+ // with this partition and it should get added to the `skipped_compactions` table by
+ // sending a timeout error to the `partition_done_sink`.
+ TimeoutWithProgress::NoWorkTimeOutError => Err(Box::new(SimpleError::new(
+ ErrorKind::Timeout,
+ "timeout without making any progress",
+ )) as _),
+ // If `try_compact_partition` timed out but *did* make some progress, this is fine, don't
+ // add it to the `skipped_compactions` table.
+ TimeoutWithProgress::SomeWorkTryAgain => Ok(()),
+ // If `try_compact_partition` finished before the timeout, return the `Result` that it
+ // returned. If an error was returned, there could be something wrong with the partiton;
+ // let the `partition_done_sink` decide if the error means the partition should be added
+ // to the `skipped_compactions` table or not.
+ TimeoutWithProgress::Completed(res) => res,
};
components
.partition_done_sink
@@ -163,6 +182,7 @@ async fn try_compact_partition(
job_semaphore: Arc<InstrumentedAsyncSemaphore>,
components: Arc<Components>,
scratchpad_ctx: &mut dyn Scratchpad,
+ transmit_progress_signal: Sender<bool>,
) -> Result<(), DynError> {
let mut files = components.partition_files_source.fetch(partition_id).await;
let partition_info = components.partition_info_source.fetch(partition_id).await?;
@@ -257,6 +277,13 @@ async fn try_compact_partition(
}
files = files_next;
+
+ // Report to `timeout_with_progress_checking` that some progress has been made; stop
+ // if sending this signal fails because something has gone terribly wrong for the other
+ // end of the channel to not be listening anymore.
+ if let Err(e) = transmit_progress_signal.send(true) {
+ return Err(Box::new(e));
+ }
}
}
@@ -507,3 +534,140 @@ async fn update_catalog(
(created_file_params, upgraded_files)
}
+
+/// Returned information from a call to [`timeout_with_progress_checking`].
+enum TimeoutWithProgress<R> {
+ /// The inner future timed out and _no_ progress was reported.
+ NoWorkTimeOutError,
+ /// The inner future timed out and _some_ progress was reported.
+ SomeWorkTryAgain,
+ /// The inner future completed before the timeout and returned a value of type `R`.
+ Completed(R),
+}
+
+impl<R: fmt::Debug> fmt::Debug for TimeoutWithProgress<R> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match self {
+ Self::NoWorkTimeOutError => write!(f, "TimeoutWithProgress::NoWorkTimeOutError"),
+ Self::SomeWorkTryAgain => write!(f, "TimeoutWithProgress::SomeWorkTryAgain"),
+ Self::Completed(r) => write!(f, "TimeoutWithProgress::Completed({:?})", r),
+ }
+ }
+}
+
+/// Set an overall timeout for a future that has some concept of making progress or not, and if the
+/// future times out, send a different [`TimeoutWithProgress`] value depending on whether there
+/// was no work done or some work done. This lets the calling code assess whether it might be worth
+/// trying the operation again to make more progress, or whether the future is somehow stuck or
+/// takes too long to ever work.
+///
+/// # Parameters
+///
+/// * `full_timeout`: The timeout duration the future is allowed to spend
+/// * `inner_future`: A function taking a [`tokio::sync::watch::Sender<bool>`] that returns a
+/// future. This function expects that the body of the future will call `send(true)` to indicate
+/// that progress has been made, however the future defines "progress". If the future times out,
+/// this function will return `TimeoutWithProgress::SomeWorkTryAgain` if it has received at least
+/// one `true` value and `TimeoutWithProgress::NoWorkTimeOutError` if nothing was sent. If the
+/// future finishes before `full_timeout`, this function will return
+/// `TimeoutWithProgress::Completed` and pass along the returned value from the future.
+async fn timeout_with_progress_checking<F, Fut>(
+ full_timeout: Duration,
+ inner_future: F,
+) -> TimeoutWithProgress<Fut::Output>
+where
+ F: FnOnce(Sender<bool>) -> Fut + Send,
+ Fut: Future + Send,
+{
+ let (transmit_progress_signal, receive_progress_signal) = watch::channel(false);
+
+ let called_inner_future = inner_future(transmit_progress_signal);
+
+ match tokio::time::timeout(full_timeout, called_inner_future).await {
+ Ok(val) => TimeoutWithProgress::Completed(val),
+ Err(_) => {
+ let progress = *receive_progress_signal.borrow();
+ if progress {
+ TimeoutWithProgress::SomeWorkTryAgain
+ } else {
+ TimeoutWithProgress::NoWorkTimeOutError
+ }
+ }
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use assert_matches::assert_matches;
+
+ #[tokio::test]
+ async fn reports_progress_completes_and_returns_ok_under_timeout() {
+ let state = timeout_with_progress_checking(Duration::from_millis(5), |tx| async move {
+ // No loop in this test; report progress and then return success to simulate
+ // successfully completing all work before the timeout.
+ let _ignore_send_errors = tx.send(true);
+ Result::<(), String>::Ok(())
+ })
+ .await;
+
+ assert_matches!(state, TimeoutWithProgress::Completed(Ok(())));
+ }
+
+ #[tokio::test]
+ async fn reports_progress_completes_and_returns_err_under_timeout() {
+ let state = timeout_with_progress_checking(Duration::from_millis(5), |tx| async move {
+ // No loop in this test; report progress and then return an error to simulate
+ // a problem occurring before the timeout.
+ let _ignore_send_errors = tx.send(true);
+ Result::<(), String>::Err(String::from("there was a problem"))
+ })
+ .await;
+
+ assert_matches!(
+ state,
+ TimeoutWithProgress::Completed(Err(e)) if e == "there was a problem"
+ );
+ }
+
+ #[tokio::test]
+ async fn doesnt_report_progress_returns_err_under_timeout() {
+ let state = timeout_with_progress_checking(Duration::from_millis(5), |tx| async move {
+ // No loop in this test; report progress and then return success to simulate
+ // successfully completing all work before the timeout.
+ let _ignore_send_errors = tx.send(true);
+ Result::<(), String>::Ok(())
+ })
+ .await;
+
+ assert_matches!(state, TimeoutWithProgress::Completed(Ok(())));
+ }
+
+ #[tokio::test]
+ async fn reports_progress_then_times_out() {
+ let state = timeout_with_progress_checking(Duration::from_millis(5), |tx| async move {
+ loop {
+ // Sleep for 2 ms, which should be able to run and report progress and then timeout
+ // because it will never complete
+ tokio::time::sleep(Duration::from_millis(2)).await;
+ let _ignore_send_errors = tx.send(true);
+ }
+ })
+ .await;
+
+ assert_matches!(state, TimeoutWithProgress::SomeWorkTryAgain);
+ }
+
+ #[tokio::test]
+ async fn doesnt_report_progress_then_times_out() {
+ let state = timeout_with_progress_checking(Duration::from_millis(5), |_tx| async move {
+ // No loop in this test; don't report progress and then sleep enough that this will
+ // time out.
+ tokio::time::sleep(Duration::from_millis(10)).await;
+ Result::<(), String>::Ok(())
+ })
+ .await;
+
+ assert_matches!(state, TimeoutWithProgress::NoWorkTimeOutError);
+ }
+}
|
77bfd5757993d00fb543f9f3ca5c5ae80aa996a9
|
Dom Dwyer
|
2023-09-01 17:20:28
|
typo entire
|
Fix typo.
| null |
docs: typo entire
Fix typo.
|
diff --git a/generated_types/protos/influxdata/iox/gossip/v1/compaction.proto b/generated_types/protos/influxdata/iox/gossip/v1/compaction.proto
index 936a216646..6392b836fd 100644
--- a/generated_types/protos/influxdata/iox/gossip/v1/compaction.proto
+++ b/generated_types/protos/influxdata/iox/gossip/v1/compaction.proto
@@ -9,13 +9,13 @@ import "influxdata/iox/catalog/v1/parquet_file.proto";
// This message defines the output of the compaction round - the files deleted,
// upgraded, and created. Deleted and upgraded files are addressed by their
// catalog row IDs ("parquet file ID"), while newly created files are provided
-// with their enitre metadata.
+// with their entire metadata.
//
// # Atomicity
//
// This message is atomic - it describes the output of a single compaction job
// in its entirety. It is never split into multiple messages.
-message CompactionEvent {
+message CompactionEvent {
// Files that were deleted by this compaction event, addressed by their
// parquet row ID in the catalog.
repeated int64 deleted_file_ids = 1;
|
84e29791e5c58816de8ffe1b76ca029eea91e78a
|
Dom Dwyer
|
2022-12-19 12:46:21
|
fix incomplete comment
|
Finishes the incomplete sentence that
| null |
docs: fix incomplete comment
Finishes the incomplete sentence that
|
diff --git a/ingester2/src/persist/backpressure.rs b/ingester2/src/persist/backpressure.rs
index 1a8f0084aa..d3c968e8e7 100644
--- a/ingester2/src/persist/backpressure.rs
+++ b/ingester2/src/persist/backpressure.rs
@@ -125,7 +125,8 @@ impl PersistState {
/// Reading this value is extremely cheap and can be done without
/// performance concern.
///
- /// This value is eventually consistent, with a presumption of
+ /// This value is eventually consistent, with a presumption of being visible
+ /// in a reasonable amount of time.
pub(crate) fn get(&self) -> CurrentState {
// Correctness: relaxed as reading the current state is allowed to be
// racy for performance reasons; this call should be as cheap as
|
7f52959d29d73205229f166989b727df8b558677
|
Dom Dwyer
|
2023-05-15 12:31:19
|
move column names for Schema construction
|
When converting from a ColumnsByName into a schema::Schema instance,
move the column names instead of cloning them.
| null |
perf: move column names for Schema construction
When converting from a ColumnsByName into a schema::Schema instance,
move the column names instead of cloning them.
|
diff --git a/data_types/src/columns.rs b/data_types/src/columns.rs
index 0d993c5d1e..ec120491df 100644
--- a/data_types/src/columns.rs
+++ b/data_types/src/columns.rs
@@ -126,7 +126,7 @@ impl TryFrom<ColumnsByName> for Schema {
fn try_from(value: ColumnsByName) -> Result<Self, Self::Error> {
let mut builder = SchemaBuilder::new();
- for (column_name, column_schema) in value.iter() {
+ for (column_name, column_schema) in value.into_iter() {
let t = InfluxColumnType::from(column_schema.column_type);
builder.influx_column(column_name, t);
}
|
cbb7bc5901ff0da2667344ae2ac4a45d6eda8363
|
Trevor Hilton
|
2024-08-22 07:41:33
|
remove Persister trait in favour of concrete impl (#25260)
|
The Persister trait was only implemented by a single type, because the
underlying ObjectStore interface has several ways of being mocked, we
mock that instead of the Persister interface.
This commit removes the Persister trait, and moves its interface/impl
directly on a single Persister type in the persister module of the
influxdb3_write crate.
deny.toml had some incorrect field names in license.exceptions, those
were fixed from 'crate' to 'name'.
| null |
refactor: remove Persister trait in favour of concrete impl (#25260)
The Persister trait was only implemented by a single type, because the
underlying ObjectStore interface has several ways of being mocked, we
mock that instead of the Persister interface.
This commit removes the Persister trait, and moves its interface/impl
directly on a single Persister type in the persister module of the
influxdb3_write crate.
deny.toml had some incorrect field names in license.exceptions, those
were fixed from 'crate' to 'name'.
|
diff --git a/deny.toml b/deny.toml
index b35c42ee95..3ddf22d3a8 100644
--- a/deny.toml
+++ b/deny.toml
@@ -22,8 +22,8 @@ allow = [
exceptions = [
# We should probably NOT bundle CA certs but use the OS ones.
{ name = "webpki-roots", allow = ["MPL-2.0"] },
- { allow = ["Unicode-DFS-2016"], crate = "unicode-ident" },
- { allow = ["OpenSSL"], crate = "ring" },
+ { allow = ["Unicode-DFS-2016"], name = "unicode-ident" },
+ { allow = ["OpenSSL"], name = "ring" },
]
[[licenses.clarify]]
diff --git a/influxdb3/src/commands/serve.rs b/influxdb3/src/commands/serve.rs
index 0117ffed48..bd11da25fc 100644
--- a/influxdb3/src/commands/serve.rs
+++ b/influxdb3/src/commands/serve.rs
@@ -15,7 +15,7 @@ use influxdb3_server::{
CommonServerState,
};
use influxdb3_wal::{Level0Duration, WalConfig};
-use influxdb3_write::persister::PersisterImpl;
+use influxdb3_write::persister::Persister;
use influxdb3_write::write_buffer::WriteBufferImpl;
use iox_query::exec::{DedicatedExecutor, Executor, ExecutorConfig};
use iox_time::SystemProvider;
@@ -293,7 +293,7 @@ pub async fn command(config: Config) -> Result<()> {
let common_state =
CommonServerState::new(Arc::clone(&metrics), trace_exporter, trace_header_parser)?;
- let persister = Arc::new(PersisterImpl::new(
+ let persister = Arc::new(Persister::new(
Arc::clone(&object_store),
config.host_identifier_prefix,
));
diff --git a/influxdb3_server/src/builder.rs b/influxdb3_server/src/builder.rs
index 7a44648f07..c5cdfb6649 100644
--- a/influxdb3_server/src/builder.rs
+++ b/influxdb3_server/src/builder.rs
@@ -1,6 +1,7 @@
use std::sync::Arc;
use authz::Authorizer;
+use influxdb3_write::persister::Persister;
use tokio::net::TcpListener;
use crate::{auth::DefaultAuthorizer, http::HttpApi, CommonServerState, Server};
@@ -55,7 +56,7 @@ pub struct WithQueryExec<Q>(Arc<Q>);
#[derive(Debug)]
pub struct NoPersister;
#[derive(Debug)]
-pub struct WithPersister<P>(Arc<P>);
+pub struct WithPersister(Arc<Persister>);
#[derive(Debug)]
pub struct NoTimeProvider;
#[derive(Debug)]
@@ -96,7 +97,7 @@ impl<W, P, T, L> ServerBuilder<W, NoQueryExec, P, T, L> {
}
impl<W, Q, T, L> ServerBuilder<W, Q, NoPersister, T, L> {
- pub fn persister<P>(self, p: Arc<P>) -> ServerBuilder<W, Q, WithPersister<P>, T, L> {
+ pub fn persister(self, p: Arc<Persister>) -> ServerBuilder<W, Q, WithPersister, T, L> {
ServerBuilder {
common_state: self.common_state,
time_provider: self.time_provider,
@@ -140,16 +141,16 @@ impl<W, Q, P, T> ServerBuilder<W, Q, P, T, NoListener> {
}
}
-impl<W, Q, P, T>
+impl<W, Q, T>
ServerBuilder<
WithWriteBuf<W>,
WithQueryExec<Q>,
- WithPersister<P>,
+ WithPersister,
WithTimeProvider<T>,
WithListener,
>
{
- pub fn build(self) -> Server<W, Q, P, T> {
+ pub fn build(self) -> Server<W, Q, T> {
let persister = Arc::clone(&self.persister.0);
let authorizer = Arc::clone(&self.authorizer);
let http = Arc::new(HttpApi::new(
diff --git a/influxdb3_server/src/lib.rs b/influxdb3_server/src/lib.rs
index 025dbe5f87..19a9a94839 100644
--- a/influxdb3_server/src/lib.rs
+++ b/influxdb3_server/src/lib.rs
@@ -28,7 +28,8 @@ use datafusion::execution::SendableRecordBatchStream;
use hyper::server::conn::AddrIncoming;
use hyper::server::conn::Http;
use hyper::service::service_fn;
-use influxdb3_write::{Persister, WriteBuffer};
+use influxdb3_write::persister::Persister;
+use influxdb3_write::WriteBuffer;
use iox_query::QueryDatabase;
use iox_query_params::StatementParams;
use iox_time::TimeProvider;
@@ -115,10 +116,10 @@ impl CommonServerState {
#[allow(dead_code)]
#[derive(Debug)]
-pub struct Server<W, Q, P, T> {
+pub struct Server<W, Q, T> {
common_state: CommonServerState,
http: Arc<HttpApi<W, Q, T>>,
- persister: Arc<P>,
+ persister: Arc<Persister>,
authorizer: Arc<dyn Authorizer>,
listener: TcpListener,
}
@@ -151,21 +152,17 @@ pub enum QueryKind {
Sql,
InfluxQl,
}
-impl<W, Q, P, T> Server<W, Q, P, T> {
+impl<W, Q, T> Server<W, Q, T> {
pub fn authorizer(&self) -> Arc<dyn Authorizer> {
Arc::clone(&self.authorizer)
}
}
-pub async fn serve<W, Q, P, T>(
- server: Server<W, Q, P, T>,
- shutdown: CancellationToken,
-) -> Result<()>
+pub async fn serve<W, Q, T>(server: Server<W, Q, T>, shutdown: CancellationToken) -> Result<()>
where
W: WriteBuffer,
Q: QueryExecutor,
http::Error: From<<Q as QueryExecutor>::Error>,
- P: Persister,
T: TimeProvider,
{
let req_metrics = RequestMetrics::new(
@@ -233,7 +230,7 @@ mod tests {
use datafusion::parquet::data_type::AsBytes;
use hyper::{body, Body, Client, Request, Response, StatusCode};
use influxdb3_wal::WalConfig;
- use influxdb3_write::persister::PersisterImpl;
+ use influxdb3_write::persister::Persister;
use influxdb3_write::write_buffer::WriteBufferImpl;
use influxdb3_write::LastCacheManager;
use iox_query::exec::{DedicatedExecutor, Executor, ExecutorConfig};
@@ -768,7 +765,7 @@ mod tests {
},
DedicatedExecutor::new_testing(),
));
- let persister = Arc::new(PersisterImpl::new(Arc::clone(&object_store), "test_host"));
+ let persister = Arc::new(Persister::new(Arc::clone(&object_store), "test_host"));
let time_provider = Arc::new(MockProvider::new(Time::from_timestamp_nanos(start_time)));
let write_buffer = Arc::new(
diff --git a/influxdb3_server/src/query_executor.rs b/influxdb3_server/src/query_executor.rs
index a3b6d15b2a..29a899ff79 100644
--- a/influxdb3_server/src/query_executor.rs
+++ b/influxdb3_server/src/query_executor.rs
@@ -582,7 +582,7 @@ mod tests {
use datafusion::{assert_batches_sorted_eq, error::DataFusionError};
use futures::TryStreamExt;
use influxdb3_wal::{Level0Duration, WalConfig};
- use influxdb3_write::{persister::PersisterImpl, write_buffer::WriteBufferImpl, Bufferer};
+ use influxdb3_write::{persister::Persister, write_buffer::WriteBufferImpl, Bufferer};
use iox_query::exec::{DedicatedExecutor, Executor, ExecutorConfig};
use iox_time::{MockProvider, Time};
use metric::Registry;
@@ -621,7 +621,7 @@ mod tests {
// Set up QueryExecutor
let object_store: Arc<dyn ObjectStore> =
Arc::new(LocalFileSystem::new_with_prefix(test_helpers::tmp_dir().unwrap()).unwrap());
- let persister = Arc::new(PersisterImpl::new(Arc::clone(&object_store), "test_host"));
+ let persister = Arc::new(Persister::new(Arc::clone(&object_store), "test_host"));
let time_provider = Arc::new(MockProvider::new(Time::from_timestamp_nanos(0)));
let executor = make_exec(object_store);
let write_buffer = Arc::new(
diff --git a/influxdb3_write/src/last_cache/mod.rs b/influxdb3_write/src/last_cache/mod.rs
index 776809c4de..2bd4d395ad 100644
--- a/influxdb3_write/src/last_cache/mod.rs
+++ b/influxdb3_write/src/last_cache/mod.rs
@@ -1568,7 +1568,7 @@ mod tests {
use crate::{
last_cache::{KeyValue, LastCacheProvider, Predicate, DEFAULT_CACHE_TTL},
- persister::PersisterImpl,
+ persister::Persister,
write_buffer::WriteBufferImpl,
Bufferer, LastCacheManager, Precision,
};
@@ -1582,7 +1582,7 @@ mod tests {
async fn setup_write_buffer() -> WriteBufferImpl<MockProvider> {
let obj_store: Arc<dyn ObjectStore> = Arc::new(InMemory::new());
- let persister = Arc::new(PersisterImpl::new(obj_store, "test_host"));
+ let persister = Arc::new(Persister::new(obj_store, "test_host"));
let time_provider = Arc::new(MockProvider::new(Time::from_timestamp_nanos(0)));
WriteBufferImpl::new(
persister,
diff --git a/influxdb3_write/src/lib.rs b/influxdb3_write/src/lib.rs
index 7560de00ec..4fa68f81e4 100644
--- a/influxdb3_write/src/lib.rs
+++ b/influxdb3_write/src/lib.rs
@@ -11,23 +11,17 @@ pub mod paths;
pub mod persister;
pub mod write_buffer;
-use crate::paths::ParquetFilePath;
use async_trait::async_trait;
-use bytes::Bytes;
use data_types::{NamespaceName, TimestampMinMax};
-use datafusion::datasource::object_store::ObjectStoreUrl;
use datafusion::error::DataFusionError;
use datafusion::execution::context::SessionState;
-use datafusion::physical_plan::SendableRecordBatchStream;
use datafusion::prelude::Expr;
use influxdb3_catalog::catalog::{self, SequenceNumber};
use influxdb3_wal::{LastCacheDefinition, SnapshotSequenceNumber, WalFileSequenceNumber};
use iox_query::QueryChunk;
use iox_time::Time;
use last_cache::LastCacheProvider;
-use parquet::format::FileMetaData;
use serde::{Deserialize, Serialize};
-use std::any::Any;
use std::collections::HashMap;
use std::fmt::Debug;
use std::sync::atomic::AtomicU64;
@@ -132,58 +126,6 @@ pub trait LastCacheManager: Debug + Send + Sync + 'static {
) -> Result<(), write_buffer::Error>;
}
-pub const DEFAULT_OBJECT_STORE_URL: &str = "iox://influxdb3/";
-
-#[async_trait]
-pub trait Persister: Debug + Send + Sync + 'static {
- /// Loads the most recently persisted catalog from object storage.
- async fn load_catalog(&self) -> persister::Result<Option<PersistedCatalog>>;
-
- /// Loads the most recently persisted N snapshot parquet file lists from object storage.
- async fn load_snapshots(
- &self,
- most_recent_n: usize,
- ) -> persister::Result<Vec<PersistedSnapshot>>;
-
- // Loads a Parquet file from ObjectStore
- async fn load_parquet_file(&self, path: ParquetFilePath) -> persister::Result<Bytes>;
-
- /// Persists the catalog with the given `WalFileSequenceNumber`. If this is the highest ID, it will
- /// be the catalog that is returned the next time `load_catalog` is called.
- async fn persist_catalog(
- &self,
- wal_file_sequence_number: WalFileSequenceNumber,
- catalog: catalog::Catalog,
- ) -> persister::Result<()>;
-
- /// Persists the snapshot file
- async fn persist_snapshot(
- &self,
- persisted_snapshot: &PersistedSnapshot,
- ) -> persister::Result<()>;
-
- // Writes a SendableRecorgBatchStream to the Parquet format and persists it
- // to Object Store at the given path. Returns the number of bytes written and the file metadata.
- async fn persist_parquet_file(
- &self,
- path: ParquetFilePath,
- record_batch: SendableRecordBatchStream,
- ) -> persister::Result<(u64, FileMetaData)>;
-
- /// Returns the configured `ObjectStore` that data is loaded from and persisted to.
- fn object_store(&self) -> Arc<dyn object_store::ObjectStore>;
-
- // This is used by the query engine to know where to read parquet files from. This assumes
- // that there is a `ParquetStorage` with an id of `influxdb3` and that this url has been
- // registered with the query execution context. Kind of ugly here, but not sure where else
- // to keep this.
- fn object_store_url(&self) -> ObjectStoreUrl {
- ObjectStoreUrl::parse(DEFAULT_OBJECT_STORE_URL).unwrap()
- }
-
- fn as_any(&self) -> &dyn Any;
-}
-
/// A single write request can have many lines in it. A writer can request to accept all lines that are valid, while
/// returning an error for any invalid lines. This is the error information for a single invalid line.
#[derive(Debug, Serialize)]
diff --git a/influxdb3_write/src/persister.rs b/influxdb3_write/src/persister.rs
index d47e798b8b..e135bc2a3a 100644
--- a/influxdb3_write/src/persister.rs
+++ b/influxdb3_write/src/persister.rs
@@ -6,16 +6,15 @@ use crate::paths::ParquetFilePath;
use crate::paths::SnapshotInfoFilePath;
use crate::PersistedCatalog;
use crate::PersistedSnapshot;
-use crate::Persister;
use arrow::datatypes::SchemaRef;
use arrow::record_batch::RecordBatch;
-use async_trait::async_trait;
use bytes::Bytes;
use datafusion::common::DataFusionError;
use datafusion::execution::memory_pool::MemoryConsumer;
use datafusion::execution::memory_pool::MemoryPool;
use datafusion::execution::memory_pool::MemoryReservation;
use datafusion::execution::memory_pool::UnboundedMemoryPool;
+use datafusion::execution::object_store::ObjectStoreUrl;
use datafusion::physical_plan::SendableRecordBatchStream;
use futures_util::pin_mut;
use futures_util::stream::StreamExt;
@@ -68,25 +67,40 @@ impl From<Error> for DataFusionError {
pub type Result<T, E = Error> = std::result::Result<T, E>;
+const DEFAULT_OBJECT_STORE_URL: &str = "iox://influxdb3/";
+
+/// The persister is the primary interface with object storage where InfluxDB stores all Parquet
+/// data, catalog information, as well as WAL and snapshot data.
#[derive(Debug)]
-pub struct PersisterImpl {
+pub struct Persister {
+ /// This is used by the query engine to know where to read parquet files from. This assumes
+ /// that there is a `ParquetStorage` with an id of `influxdb3` and that this url has been
+ /// registered with the query execution context.
+ object_store_url: ObjectStoreUrl,
+ /// The interface to the object store being used
object_store: Arc<dyn ObjectStore>,
+ /// Prefix used for all paths in the object store for this persister
host_identifier_prefix: String,
pub(crate) mem_pool: Arc<dyn MemoryPool>,
}
-impl PersisterImpl {
+impl Persister {
pub fn new(
object_store: Arc<dyn ObjectStore>,
host_identifier_prefix: impl Into<String>,
) -> Self {
Self {
+ object_store_url: ObjectStoreUrl::parse(DEFAULT_OBJECT_STORE_URL).unwrap(),
object_store,
host_identifier_prefix: host_identifier_prefix.into(),
mem_pool: Arc::new(UnboundedMemoryPool::default()),
}
}
+ pub fn object_store_url(&self) -> &ObjectStoreUrl {
+ &self.object_store_url
+ }
+
async fn serialize_to_parquet(
&self,
batches: SendableRecordBatchStream,
@@ -97,42 +111,9 @@ impl PersisterImpl {
pub fn host_identifier_prefix(&self) -> &str {
&self.host_identifier_prefix
}
-}
-
-pub async fn serialize_to_parquet(
- mem_pool: Arc<dyn MemoryPool>,
- batches: SendableRecordBatchStream,
-) -> Result<ParquetBytes> {
- // The ArrowWriter::write() call will return an error if any subsequent
- // batch does not match this schema, enforcing schema uniformity.
- let schema = batches.schema();
-
- let stream = batches;
- let mut bytes = Vec::new();
- pin_mut!(stream);
-
- // Construct the arrow serializer with the metadata as part of the parquet
- // file properties.
- let mut writer = TrackedMemoryArrowWriter::try_new(&mut bytes, Arc::clone(&schema), mem_pool)?;
-
- while let Some(batch) = stream.try_next().await? {
- writer.write(batch)?;
- }
-
- let writer_meta = writer.close()?;
- if writer_meta.num_rows == 0 {
- return Err(Error::NoRows);
- }
-
- Ok(ParquetBytes {
- meta_data: writer_meta,
- bytes: Bytes::from(bytes),
- })
-}
-#[async_trait]
-impl Persister for PersisterImpl {
- async fn load_catalog(&self) -> Result<Option<PersistedCatalog>> {
+ /// Loads the most recently persisted catalog from object storage.
+ pub async fn load_catalog(&self) -> Result<Option<PersistedCatalog>> {
let mut list = self
.object_store
.list(Some(&CatalogFilePath::dir(&self.host_identifier_prefix)));
@@ -187,7 +168,8 @@ impl Persister for PersisterImpl {
}
}
- async fn load_snapshots(&self, mut most_recent_n: usize) -> Result<Vec<PersistedSnapshot>> {
+ /// Loads the most recently persisted N snapshot parquet file lists from object storage.
+ pub async fn load_snapshots(&self, mut most_recent_n: usize) -> Result<Vec<PersistedSnapshot>> {
let mut output = Vec::new();
let mut offset: Option<ObjPath> = None;
while most_recent_n > 0 {
@@ -247,11 +229,14 @@ impl Persister for PersisterImpl {
Ok(output)
}
- async fn load_parquet_file(&self, path: ParquetFilePath) -> Result<Bytes> {
+ /// Loads a Parquet file from ObjectStore
+ pub async fn load_parquet_file(&self, path: ParquetFilePath) -> Result<Bytes> {
Ok(self.object_store.get(&path).await?.bytes().await?)
}
- async fn persist_catalog(
+ /// Persists the catalog with the given `WalFileSequenceNumber`. If this is the highest ID, it will
+ /// be the catalog that is returned the next time `load_catalog` is called.
+ pub async fn persist_catalog(
&self,
wal_file_sequence_number: WalFileSequenceNumber,
catalog: Catalog,
@@ -267,7 +252,8 @@ impl Persister for PersisterImpl {
Ok(())
}
- async fn persist_snapshot(&self, persisted_snapshot: &PersistedSnapshot) -> Result<()> {
+ /// Persists the snapshot file
+ pub async fn persist_snapshot(&self, persisted_snapshot: &PersistedSnapshot) -> Result<()> {
let snapshot_file_path = SnapshotInfoFilePath::new(
self.host_identifier_prefix.as_str(),
persisted_snapshot.snapshot_sequence_number,
@@ -279,7 +265,9 @@ impl Persister for PersisterImpl {
Ok(())
}
- async fn persist_parquet_file(
+ /// Writes a [`SendableRecordBatchStream`] to the Parquet format and persists it to Object Store
+ /// at the given path. Returns the number of bytes written and the file metadata.
+ pub async fn persist_parquet_file(
&self,
path: ParquetFilePath,
record_batch: SendableRecordBatchStream,
@@ -293,15 +281,47 @@ impl Persister for PersisterImpl {
Ok((bytes_written, parquet.meta_data))
}
- fn object_store(&self) -> Arc<dyn ObjectStore> {
+ /// Returns the configured `ObjectStore` that data is loaded from and persisted to.
+ pub fn object_store(&self) -> Arc<dyn ObjectStore> {
self.object_store.clone()
}
- fn as_any(&self) -> &dyn Any {
+ pub fn as_any(&self) -> &dyn Any {
self as &dyn Any
}
}
+pub async fn serialize_to_parquet(
+ mem_pool: Arc<dyn MemoryPool>,
+ batches: SendableRecordBatchStream,
+) -> Result<ParquetBytes> {
+ // The ArrowWriter::write() call will return an error if any subsequent
+ // batch does not match this schema, enforcing schema uniformity.
+ let schema = batches.schema();
+
+ let stream = batches;
+ let mut bytes = Vec::new();
+ pin_mut!(stream);
+
+ // Construct the arrow serializer with the metadata as part of the parquet
+ // file properties.
+ let mut writer = TrackedMemoryArrowWriter::try_new(&mut bytes, Arc::clone(&schema), mem_pool)?;
+
+ while let Some(batch) = stream.try_next().await? {
+ writer.write(batch)?;
+ }
+
+ let writer_meta = writer.close()?;
+ if writer_meta.num_rows == 0 {
+ return Err(Error::NoRows);
+ }
+
+ Ok(ParquetBytes {
+ meta_data: writer_meta,
+ bytes: Bytes::from(bytes),
+ })
+}
+
pub struct ParquetBytes {
pub bytes: Bytes,
pub meta_data: FileMetaData,
@@ -377,7 +397,7 @@ mod tests {
async fn persist_catalog() {
let local_disk =
LocalFileSystem::new_with_prefix(test_helpers::tmp_dir().unwrap()).unwrap();
- let persister = PersisterImpl::new(Arc::new(local_disk), "test_host");
+ let persister = Persister::new(Arc::new(local_disk), "test_host");
let catalog = Catalog::new();
let _ = catalog.db_or_create("my_db");
@@ -391,7 +411,7 @@ mod tests {
async fn persist_and_load_newest_catalog() {
let local_disk =
LocalFileSystem::new_with_prefix(test_helpers::tmp_dir().unwrap()).unwrap();
- let persister = PersisterImpl::new(Arc::new(local_disk), "test_host");
+ let persister = Persister::new(Arc::new(local_disk), "test_host");
let catalog = Catalog::new();
let _ = catalog.db_or_create("my_db");
@@ -426,7 +446,7 @@ mod tests {
async fn persist_snapshot_info_file() {
let local_disk =
LocalFileSystem::new_with_prefix(test_helpers::tmp_dir().unwrap()).unwrap();
- let persister = PersisterImpl::new(Arc::new(local_disk), "test_host");
+ let persister = Persister::new(Arc::new(local_disk), "test_host");
let info_file = PersistedSnapshot {
next_file_id: ParquetFileId::from(0),
snapshot_sequence_number: SnapshotSequenceNumber::new(0),
@@ -446,7 +466,7 @@ mod tests {
async fn persist_and_load_snapshot_info_files() {
let local_disk =
LocalFileSystem::new_with_prefix(test_helpers::tmp_dir().unwrap()).unwrap();
- let persister = PersisterImpl::new(Arc::new(local_disk), "test_host");
+ let persister = Persister::new(Arc::new(local_disk), "test_host");
let info_file = PersistedSnapshot {
next_file_id: ParquetFileId::from(0),
snapshot_sequence_number: SnapshotSequenceNumber::new(0),
@@ -500,7 +520,7 @@ mod tests {
async fn persist_and_load_snapshot_info_files_with_fewer_than_requested() {
let local_disk =
LocalFileSystem::new_with_prefix(test_helpers::tmp_dir().unwrap()).unwrap();
- let persister = PersisterImpl::new(Arc::new(local_disk), "test_host");
+ let persister = Persister::new(Arc::new(local_disk), "test_host");
let info_file = PersistedSnapshot {
next_file_id: ParquetFileId::from(0),
snapshot_sequence_number: SnapshotSequenceNumber::new(0),
@@ -524,7 +544,7 @@ mod tests {
async fn persist_and_load_over_9000_snapshot_info_files() {
let local_disk =
LocalFileSystem::new_with_prefix(test_helpers::tmp_dir().unwrap()).unwrap();
- let persister = PersisterImpl::new(Arc::new(local_disk), "test_host");
+ let persister = Persister::new(Arc::new(local_disk), "test_host");
for id in 0..9001 {
let info_file = PersistedSnapshot {
next_file_id: ParquetFileId::from(id),
@@ -554,7 +574,7 @@ mod tests {
async fn persist_add_parquet_file_and_load_snapshot() {
let local_disk =
LocalFileSystem::new_with_prefix(test_helpers::tmp_dir().unwrap()).unwrap();
- let persister = PersisterImpl::new(Arc::new(local_disk), "test_host");
+ let persister = Persister::new(Arc::new(local_disk), "test_host");
let mut info_file = PersistedSnapshot::new(
SnapshotSequenceNumber::new(0),
WalFileSequenceNumber::new(0),
@@ -589,7 +609,7 @@ mod tests {
#[tokio::test]
async fn load_snapshot_works_with_no_exising_snapshots() {
let store = InMemory::new();
- let persister = PersisterImpl::new(Arc::new(store), "test_host");
+ let persister = Persister::new(Arc::new(store), "test_host");
let snapshots = persister.load_snapshots(100).await.unwrap();
assert!(snapshots.is_empty());
@@ -599,7 +619,7 @@ mod tests {
async fn get_parquet_bytes() {
let local_disk =
LocalFileSystem::new_with_prefix(test_helpers::tmp_dir().unwrap()).unwrap();
- let persister = PersisterImpl::new(Arc::new(local_disk), "test_host");
+ let persister = Persister::new(Arc::new(local_disk), "test_host");
let schema = Arc::new(Schema::new(vec![Field::new("id", DataType::Int32, false)]));
let stream_builder = RecordBatchReceiverStreamBuilder::new(schema.clone(), 5);
@@ -626,7 +646,7 @@ mod tests {
async fn persist_and_load_parquet_bytes() {
let local_disk =
LocalFileSystem::new_with_prefix(test_helpers::tmp_dir().unwrap()).unwrap();
- let persister = PersisterImpl::new(Arc::new(local_disk), "test_host");
+ let persister = Persister::new(Arc::new(local_disk), "test_host");
let schema = Arc::new(Schema::new(vec![Field::new("id", DataType::Int32, false)]));
let stream_builder = RecordBatchReceiverStreamBuilder::new(schema.clone(), 5);
diff --git a/influxdb3_write/src/write_buffer/mod.rs b/influxdb3_write/src/write_buffer/mod.rs
index 12b4f65a3c..78b206454e 100644
--- a/influxdb3_write/src/write_buffer/mod.rs
+++ b/influxdb3_write/src/write_buffer/mod.rs
@@ -8,13 +8,13 @@ pub(crate) mod validator;
use crate::cache::ParquetCache;
use crate::chunk::ParquetChunk;
use crate::last_cache::{self, CreateCacheArguments, LastCacheProvider};
-use crate::persister::PersisterImpl;
+use crate::persister::Persister;
use crate::write_buffer::persisted_files::PersistedFiles;
use crate::write_buffer::queryable_buffer::QueryableBuffer;
use crate::write_buffer::validator::WriteValidator;
use crate::{
- BufferedWriteRequest, Bufferer, ChunkContainer, LastCacheManager, ParquetFile, Persister,
- Precision, WriteBuffer, WriteLineError, NEXT_FILE_ID,
+ BufferedWriteRequest, Bufferer, ChunkContainer, LastCacheManager, ParquetFile, Precision,
+ WriteBuffer, WriteLineError, NEXT_FILE_ID,
};
use async_trait::async_trait;
use data_types::{ChunkId, ChunkOrder, ColumnType, NamespaceName, NamespaceNameError};
@@ -101,7 +101,7 @@ pub struct WriteRequest<'a> {
#[derive(Debug)]
pub struct WriteBufferImpl<T> {
catalog: Arc<Catalog>,
- persister: Arc<PersisterImpl>,
+ persister: Arc<Persister>,
parquet_cache: Arc<ParquetCache>,
persisted_files: Arc<PersistedFiles>,
buffer: Arc<QueryableBuffer>,
@@ -116,7 +116,7 @@ const N_SNAPSHOTS_TO_LOAD_ON_START: usize = 1_000;
impl<T: TimeProvider> WriteBufferImpl<T> {
pub async fn new(
- persister: Arc<PersisterImpl>,
+ persister: Arc<Persister>,
time_provider: Arc<T>,
executor: Arc<iox_query::exec::Executor>,
wal_config: WalConfig,
@@ -317,7 +317,7 @@ impl<T: TimeProvider> WriteBufferImpl<T> {
let parquet_chunk = parquet_chunk_from_file(
&parquet_file,
&table_schema,
- self.persister.object_store_url(),
+ self.persister.object_store_url().clone(),
self.persister.object_store(),
chunk_order,
);
@@ -350,7 +350,7 @@ impl<T: TimeProvider> WriteBufferImpl<T> {
let location = ObjPath::from(parquet_file.path.clone());
let parquet_exec = ParquetExecInput {
- object_store_url: self.persister.object_store_url(),
+ object_store_url: self.persister.object_store_url().clone(),
object_meta: ObjectMeta {
location,
last_modified: Default::default(),
@@ -599,7 +599,7 @@ impl<T: TimeProvider> WriteBuffer for WriteBufferImpl<T> {}
mod tests {
use super::*;
use crate::paths::{CatalogFilePath, SnapshotInfoFilePath};
- use crate::persister::PersisterImpl;
+ use crate::persister::Persister;
use crate::PersistedSnapshot;
use arrow::record_batch::RecordBatch;
use arrow_util::assert_batches_eq;
@@ -640,7 +640,7 @@ mod tests {
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn writes_data_to_wal_and_is_queryable() {
let object_store: Arc<dyn ObjectStore> = Arc::new(InMemory::new());
- let persister = Arc::new(PersisterImpl::new(Arc::clone(&object_store), "test_host"));
+ let persister = Arc::new(Persister::new(Arc::clone(&object_store), "test_host"));
let time_provider = Arc::new(MockProvider::new(Time::from_timestamp_nanos(0)));
let write_buffer = WriteBufferImpl::new(
Arc::clone(&persister),
@@ -1309,7 +1309,7 @@ mod tests {
}
}
- async fn verify_snapshot_count(n: usize, persister: &Arc<PersisterImpl>) {
+ async fn verify_snapshot_count(n: usize, persister: &Arc<Persister>) {
let mut checks = 0;
loop {
let persisted_snapshots = persister.load_snapshots(1000).await.unwrap();
@@ -1342,7 +1342,7 @@ mod tests {
object_store: Arc<dyn ObjectStore>,
wal_config: WalConfig,
) -> (WriteBufferImpl<MockProvider>, IOxSessionContext) {
- let persister = Arc::new(PersisterImpl::new(Arc::clone(&object_store), "test_host"));
+ let persister = Arc::new(Persister::new(Arc::clone(&object_store), "test_host"));
let time_provider = Arc::new(MockProvider::new(start));
let wbuf = WriteBufferImpl::new(
Arc::clone(&persister),
diff --git a/influxdb3_write/src/write_buffer/queryable_buffer.rs b/influxdb3_write/src/write_buffer/queryable_buffer.rs
index b553ca1f08..7bab3a15e4 100644
--- a/influxdb3_write/src/write_buffer/queryable_buffer.rs
+++ b/influxdb3_write/src/write_buffer/queryable_buffer.rs
@@ -1,10 +1,10 @@
use crate::chunk::BufferChunk;
use crate::last_cache::LastCacheProvider;
use crate::paths::ParquetFilePath;
-use crate::persister::PersisterImpl;
+use crate::persister::Persister;
use crate::write_buffer::persisted_files::PersistedFiles;
use crate::write_buffer::table_buffer::TableBuffer;
-use crate::{ParquetFile, ParquetFileId, PersistedSnapshot, Persister};
+use crate::{ParquetFile, ParquetFileId, PersistedSnapshot};
use arrow::record_batch::RecordBatch;
use async_trait::async_trait;
use data_types::{
@@ -37,7 +37,7 @@ pub(crate) struct QueryableBuffer {
pub(crate) executor: Arc<Executor>,
catalog: Arc<Catalog>,
last_cache_provider: Arc<LastCacheProvider>,
- persister: Arc<PersisterImpl>,
+ persister: Arc<Persister>,
persisted_files: Arc<PersistedFiles>,
buffer: Arc<RwLock<BufferState>>,
}
@@ -46,7 +46,7 @@ impl QueryableBuffer {
pub(crate) fn new(
executor: Arc<Executor>,
catalog: Arc<Catalog>,
- persister: Arc<PersisterImpl>,
+ persister: Arc<Persister>,
last_cache_provider: Arc<LastCacheProvider>,
persisted_files: Arc<PersistedFiles>,
) -> Self {
@@ -418,14 +418,11 @@ struct PersistJob {
sort_key: SortKey,
}
-async fn sort_dedupe_persist<P>(
+async fn sort_dedupe_persist(
persist_job: PersistJob,
- persister: Arc<P>,
+ persister: Arc<Persister>,
executor: Arc<Executor>,
-) -> (u64, FileMetaData)
-where
- P: Persister,
-{
+) -> (u64, FileMetaData) {
// Dedupe and sort using the COMPACT query built into
// iox_query
let row_count = persist_job.batch.num_rows();
|
1817104236dd1ef3d7fad34d48b98260ecb2eb11
|
Andrew Lamb
|
2023-03-24 19:45:53
|
Update DataFusion again (#7329)
|
* chore: update datafusion
* chore: Run cargo hakari tasks
---------
|
Co-authored-by: CircleCI[bot] <[email protected]>
Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
|
chore: Update DataFusion again (#7329)
* chore: update datafusion
* chore: Run cargo hakari tasks
---------
Co-authored-by: CircleCI[bot] <[email protected]>
Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
|
diff --git a/Cargo.lock b/Cargo.lock
index b0b4613fe6..0029a49cfc 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -1496,8 +1496,8 @@ dependencies = [
[[package]]
name = "datafusion"
-version = "20.0.0"
-source = "git+https://github.com/apache/arrow-datafusion.git?rev=3e247958b1dc30b67b309f712be75f3c725d427e#3e247958b1dc30b67b309f712be75f3c725d427e"
+version = "21.0.0"
+source = "git+https://github.com/apache/arrow-datafusion.git?rev=74c3955db48f7ef6458125100eed3999512a56ba#74c3955db48f7ef6458125100eed3999512a56ba"
dependencies = [
"ahash 0.8.3",
"arrow",
@@ -1543,8 +1543,8 @@ dependencies = [
[[package]]
name = "datafusion-common"
-version = "20.0.0"
-source = "git+https://github.com/apache/arrow-datafusion.git?rev=3e247958b1dc30b67b309f712be75f3c725d427e#3e247958b1dc30b67b309f712be75f3c725d427e"
+version = "21.0.0"
+source = "git+https://github.com/apache/arrow-datafusion.git?rev=74c3955db48f7ef6458125100eed3999512a56ba#74c3955db48f7ef6458125100eed3999512a56ba"
dependencies = [
"arrow",
"arrow-array 35.0.0",
@@ -1557,8 +1557,8 @@ dependencies = [
[[package]]
name = "datafusion-execution"
-version = "20.0.0"
-source = "git+https://github.com/apache/arrow-datafusion.git?rev=3e247958b1dc30b67b309f712be75f3c725d427e#3e247958b1dc30b67b309f712be75f3c725d427e"
+version = "21.0.0"
+source = "git+https://github.com/apache/arrow-datafusion.git?rev=74c3955db48f7ef6458125100eed3999512a56ba#74c3955db48f7ef6458125100eed3999512a56ba"
dependencies = [
"dashmap",
"datafusion-common",
@@ -1574,8 +1574,8 @@ dependencies = [
[[package]]
name = "datafusion-expr"
-version = "20.0.0"
-source = "git+https://github.com/apache/arrow-datafusion.git?rev=3e247958b1dc30b67b309f712be75f3c725d427e#3e247958b1dc30b67b309f712be75f3c725d427e"
+version = "21.0.0"
+source = "git+https://github.com/apache/arrow-datafusion.git?rev=74c3955db48f7ef6458125100eed3999512a56ba#74c3955db48f7ef6458125100eed3999512a56ba"
dependencies = [
"ahash 0.8.3",
"arrow",
@@ -1585,8 +1585,8 @@ dependencies = [
[[package]]
name = "datafusion-optimizer"
-version = "20.0.0"
-source = "git+https://github.com/apache/arrow-datafusion.git?rev=3e247958b1dc30b67b309f712be75f3c725d427e#3e247958b1dc30b67b309f712be75f3c725d427e"
+version = "21.0.0"
+source = "git+https://github.com/apache/arrow-datafusion.git?rev=74c3955db48f7ef6458125100eed3999512a56ba#74c3955db48f7ef6458125100eed3999512a56ba"
dependencies = [
"arrow",
"async-trait",
@@ -1602,8 +1602,8 @@ dependencies = [
[[package]]
name = "datafusion-physical-expr"
-version = "20.0.0"
-source = "git+https://github.com/apache/arrow-datafusion.git?rev=3e247958b1dc30b67b309f712be75f3c725d427e#3e247958b1dc30b67b309f712be75f3c725d427e"
+version = "21.0.0"
+source = "git+https://github.com/apache/arrow-datafusion.git?rev=74c3955db48f7ef6458125100eed3999512a56ba#74c3955db48f7ef6458125100eed3999512a56ba"
dependencies = [
"ahash 0.8.3",
"arrow",
@@ -1632,8 +1632,8 @@ dependencies = [
[[package]]
name = "datafusion-proto"
-version = "20.0.0"
-source = "git+https://github.com/apache/arrow-datafusion.git?rev=3e247958b1dc30b67b309f712be75f3c725d427e#3e247958b1dc30b67b309f712be75f3c725d427e"
+version = "21.0.0"
+source = "git+https://github.com/apache/arrow-datafusion.git?rev=74c3955db48f7ef6458125100eed3999512a56ba#74c3955db48f7ef6458125100eed3999512a56ba"
dependencies = [
"arrow",
"chrono",
@@ -1648,8 +1648,8 @@ dependencies = [
[[package]]
name = "datafusion-row"
-version = "20.0.0"
-source = "git+https://github.com/apache/arrow-datafusion.git?rev=3e247958b1dc30b67b309f712be75f3c725d427e#3e247958b1dc30b67b309f712be75f3c725d427e"
+version = "21.0.0"
+source = "git+https://github.com/apache/arrow-datafusion.git?rev=74c3955db48f7ef6458125100eed3999512a56ba#74c3955db48f7ef6458125100eed3999512a56ba"
dependencies = [
"arrow",
"datafusion-common",
@@ -1659,8 +1659,8 @@ dependencies = [
[[package]]
name = "datafusion-sql"
-version = "20.0.0"
-source = "git+https://github.com/apache/arrow-datafusion.git?rev=3e247958b1dc30b67b309f712be75f3c725d427e#3e247958b1dc30b67b309f712be75f3c725d427e"
+version = "21.0.0"
+source = "git+https://github.com/apache/arrow-datafusion.git?rev=74c3955db48f7ef6458125100eed3999512a56ba#74c3955db48f7ef6458125100eed3999512a56ba"
dependencies = [
"arrow-schema 34.0.0",
"datafusion-common",
diff --git a/Cargo.toml b/Cargo.toml
index 77ca24f5cf..1af05f976c 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -121,8 +121,8 @@ license = "MIT OR Apache-2.0"
[workspace.dependencies]
arrow = { version = "34.0.0" }
arrow-flight = { version = "34.0.0" }
-datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev="3e247958b1dc30b67b309f712be75f3c725d427e", default-features = false }
-datafusion-proto = { git = "https://github.com/apache/arrow-datafusion.git", rev="3e247958b1dc30b67b309f712be75f3c725d427e" }
+datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev="74c3955db48f7ef6458125100eed3999512a56ba", default-features = false }
+datafusion-proto = { git = "https://github.com/apache/arrow-datafusion.git", rev="74c3955db48f7ef6458125100eed3999512a56ba" }
hashbrown = { version = "0.13.2" }
parquet = { version = "34.0.0" }
diff --git a/workspace-hack/Cargo.toml b/workspace-hack/Cargo.toml
index d59cb3646f..b740605c59 100644
--- a/workspace-hack/Cargo.toml
+++ b/workspace-hack/Cargo.toml
@@ -29,9 +29,9 @@ bytes = { version = "1" }
chrono = { version = "0.4", default-features = false, features = ["alloc", "clock", "serde"] }
crossbeam-utils = { version = "0.8" }
crypto-common = { version = "0.1", default-features = false, features = ["std"] }
-datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev = "3e247958b1dc30b67b309f712be75f3c725d427e" }
-datafusion-optimizer = { git = "https://github.com/apache/arrow-datafusion.git", rev = "3e247958b1dc30b67b309f712be75f3c725d427e", default-features = false, features = ["crypto_expressions", "regex_expressions", "unicode_expressions"] }
-datafusion-physical-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "3e247958b1dc30b67b309f712be75f3c725d427e", default-features = false, features = ["crypto_expressions", "regex_expressions", "unicode_expressions"] }
+datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev = "74c3955db48f7ef6458125100eed3999512a56ba" }
+datafusion-optimizer = { git = "https://github.com/apache/arrow-datafusion.git", rev = "74c3955db48f7ef6458125100eed3999512a56ba", default-features = false, features = ["crypto_expressions", "regex_expressions", "unicode_expressions"] }
+datafusion-physical-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "74c3955db48f7ef6458125100eed3999512a56ba", default-features = false, features = ["crypto_expressions", "regex_expressions", "unicode_expressions"] }
digest = { version = "0.10", features = ["mac", "std"] }
either = { version = "1" }
fixedbitset = { version = "0.4" }
|
48466bfa89491969f6d0571c834e3317fb6286a1
|
Dom Dwyer
|
2023-06-14 15:12:50
|
bytes/frames sent/received & peers
|
Emit metrics tracking the number of bytes sent / received, and number of
frames sent / received by the local node.
Track the number of discovered peers to record peer discovery rate and
current number of known peers per node.
| null |
feat(metrics): bytes/frames sent/received & peers
Emit metrics tracking the number of bytes sent / received, and number of
frames sent / received by the local node.
Track the number of discovered peers to record peer discovery rate and
current number of known peers per node.
|
diff --git a/Cargo.lock b/Cargo.lock
index 6cc4a061ae..7a2b7b364a 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -2007,6 +2007,7 @@ dependencies = [
"async-trait",
"futures",
"hashbrown 0.14.0",
+ "metric",
"prost",
"prost-build",
"test_helpers",
diff --git a/gossip/Cargo.toml b/gossip/Cargo.toml
index 8000dd32f6..36110ca420 100644
--- a/gossip/Cargo.toml
+++ b/gossip/Cargo.toml
@@ -7,6 +7,7 @@ edition = "2021"
async-trait = "0.1.68"
futures = "0.3.28"
hashbrown.workspace = true
+metric = { version = "0.1.0", path = "../metric" }
prost = "0.11.9"
thiserror = "1.0.40"
tokio = { version = "1.28.2", features = ["net", "io-util", "time", "rt", "sync", "macros"] }
diff --git a/gossip/src/builder.rs b/gossip/src/builder.rs
index 503c8e42ad..35d7ebab83 100644
--- a/gossip/src/builder.rs
+++ b/gossip/src/builder.rs
@@ -1,3 +1,5 @@
+use std::sync::Arc;
+
use tokio::{
net::{ToSocketAddrs, UdpSocket},
sync::mpsc,
@@ -10,6 +12,7 @@ use crate::{handle::GossipHandle, reactor::Reactor, Dispatcher};
pub struct Builder<T> {
seed_addrs: Vec<String>,
dispatcher: T,
+ metric: Arc<metric::Registry>,
}
impl<T> Builder<T> {
@@ -18,10 +21,11 @@ impl<T> Builder<T> {
///
/// Each address in `seed_addrs` is re-resolved periodically and the first
/// resolved IP address is used for peer communication.
- pub fn new(seed_addrs: Vec<String>, dispatcher: T) -> Self {
+ pub fn new(seed_addrs: Vec<String>, dispatcher: T, metric: Arc<metric::Registry>) -> Self {
Self {
seed_addrs,
dispatcher,
+ metric,
}
}
}
@@ -41,7 +45,7 @@ where
let (tx, rx) = mpsc::channel(1000);
// Initialise the reactor
- let reactor = Reactor::new(self.seed_addrs, socket, self.dispatcher);
+ let reactor = Reactor::new(self.seed_addrs, socket, self.dispatcher, &self.metric);
let identity = reactor.identity().clone();
// Start the message reactor.
diff --git a/gossip/src/lib.rs b/gossip/src/lib.rs
index c52e50e7e3..66ecad726f 100644
--- a/gossip/src/lib.rs
+++ b/gossip/src/lib.rs
@@ -38,6 +38,7 @@
mod builder;
mod dispatcher;
mod handle;
+mod metric;
mod peers;
mod proto;
mod reactor;
diff --git a/gossip/src/metric.rs b/gossip/src/metric.rs
new file mode 100644
index 0000000000..c7ae249458
--- /dev/null
+++ b/gossip/src/metric.rs
@@ -0,0 +1,60 @@
+//! Metric newtype wrappers for type safety.
+//!
+//! The metrics are easily confused (they're all counters) so have the compiler
+//! check the right ones are being used in the right places.
+
+use metric::U64Counter;
+
+#[derive(Debug, Clone)]
+pub(crate) struct SentFrames(metric::U64Counter);
+
+impl SentFrames {
+ pub(crate) fn inc(&self, v: usize) {
+ self.0.inc(v as u64)
+ }
+}
+
+#[derive(Debug)]
+pub(crate) struct ReceivedFrames(metric::U64Counter);
+
+impl ReceivedFrames {
+ pub(crate) fn inc(&self, v: usize) {
+ self.0.inc(v as u64)
+ }
+}
+
+#[derive(Debug, Clone)]
+pub(crate) struct SentBytes(metric::U64Counter);
+
+impl SentBytes {
+ pub(crate) fn inc(&self, v: usize) {
+ self.0.inc(v as u64)
+ }
+}
+
+#[derive(Debug)]
+pub(crate) struct ReceivedBytes(metric::U64Counter);
+
+impl ReceivedBytes {
+ pub(crate) fn inc(&self, v: usize) {
+ self.0.inc(v as u64)
+ }
+}
+
+pub(crate) fn new_metrics(
+ metrics: &metric::Registry,
+) -> (SentFrames, ReceivedFrames, SentBytes, ReceivedBytes) {
+ let metric_frames = metrics.register_metric::<U64Counter>(
+ "gossip_frames",
+ "number of frames sent/received by this node",
+ );
+ let metric_bytes = metrics
+ .register_metric::<U64Counter>("gossip_bytes", "sum of bytes sent/received by this node");
+
+ (
+ SentFrames(metric_frames.recorder(&[("direction", "sent")])),
+ ReceivedFrames(metric_frames.recorder(&[("direction", "received")])),
+ SentBytes(metric_bytes.recorder(&[("direction", "sent")])),
+ ReceivedBytes(metric_bytes.recorder(&[("direction", "received")])),
+ )
+}
diff --git a/gossip/src/peers.rs b/gossip/src/peers.rs
index 6ab7f61607..f4531af87b 100644
--- a/gossip/src/peers.rs
+++ b/gossip/src/peers.rs
@@ -2,12 +2,16 @@ use std::{io, net::SocketAddr};
use futures::{stream::FuturesUnordered, StreamExt};
use hashbrown::{hash_map::RawEntryMut, HashMap};
+use metric::U64Counter;
use prost::bytes::Bytes;
use tokio::net::UdpSocket;
use tracing::{trace, warn};
use uuid::Uuid;
-use crate::MAX_FRAME_BYTES;
+use crate::{
+ metric::{SentBytes, SentFrames},
+ MAX_FRAME_BYTES,
+};
/// A unique generated identity containing 128 bits of randomness (V4 UUID).
#[derive(Debug, Eq, Clone)]
@@ -69,7 +73,13 @@ pub(crate) struct Peer {
}
impl Peer {
- pub(crate) async fn send(&self, buf: &[u8], socket: &UdpSocket) -> Result<usize, io::Error> {
+ pub(crate) async fn send(
+ &self,
+ buf: &[u8],
+ socket: &UdpSocket,
+ frames_sent: &SentFrames,
+ bytes_sent: &SentBytes,
+ ) -> Result<usize, io::Error> {
// If the frame is larger than the allowed maximum, then the receiver
// will truncate the frame when reading the socket.
//
@@ -88,6 +98,8 @@ impl Peer {
let ret = socket.send_to(buf, self.addr).await;
match &ret {
Ok(n_bytes) => {
+ frames_sent.inc(1);
+ bytes_sent.inc(*n_bytes);
trace!(identity=%self.identity, n_bytes, peer_addr=%self.addr, "send frame")
}
Err(e) => {
@@ -102,14 +114,25 @@ impl Peer {
#[derive(Debug, Default)]
pub(crate) struct PeerList {
list: HashMap<Identity, Peer>,
+
+ /// The number of known, believed-to-be-healthy peers.
+ metric_peer_count: metric::U64Counter,
}
impl PeerList {
/// Initialise the [`PeerList`] with capacity for `cap` number of [`Peer`]
/// instances.
- pub(crate) fn with_capacity(cap: usize) -> Self {
+ pub(crate) fn with_capacity(cap: usize, metrics: &metric::Registry) -> Self {
+ let metric_peer_count = metrics
+ .register_metric::<U64Counter>(
+ "gossip_known_peers",
+ "number of likely healthy peers known to this node",
+ )
+ .recorder(&[]);
+
Self {
list: HashMap::with_capacity(cap),
+ metric_peer_count,
}
}
@@ -123,6 +146,7 @@ impl PeerList {
pub(crate) fn upsert(&mut self, identity: &Identity, peer_addr: SocketAddr) -> &mut Peer {
let p = match self.list.raw_entry_mut().from_key(identity) {
RawEntryMut::Vacant(v) => {
+ self.metric_peer_count.inc(1);
v.insert(
identity.to_owned(),
Peer {
@@ -141,10 +165,16 @@ impl PeerList {
/// Broadcast `buf` to all known peers over `socket`, returning the number
/// of bytes sent in total.
- pub(crate) async fn broadcast(&self, buf: &[u8], socket: &UdpSocket) -> usize {
+ pub(crate) async fn broadcast(
+ &self,
+ buf: &[u8],
+ socket: &UdpSocket,
+ frames_sent: &SentFrames,
+ bytes_sent: &SentBytes,
+ ) -> usize {
self.list
.values()
- .map(|v| v.send(buf, socket))
+ .map(|v| v.send(buf, socket, frames_sent, bytes_sent))
.collect::<FuturesUnordered<_>>()
.fold(0, |acc, res| async move {
match res {
diff --git a/gossip/src/reactor.rs b/gossip/src/reactor.rs
index 9058398cfb..58c771b4ef 100644
--- a/gossip/src/reactor.rs
+++ b/gossip/src/reactor.rs
@@ -8,6 +8,7 @@ use tokio::{
use tracing::{debug, error, info, trace, warn};
use crate::{
+ metric::*,
peers::{Identity, PeerList},
proto::{self, frame_message::Payload, FrameMessage},
seed::{seed_ping_task, Seed},
@@ -83,14 +84,28 @@ pub(crate) struct Reactor<T> {
/// contain less peers than the number of initial seeds.
peer_list: PeerList,
+ /// The UDP socket used for communication with peers.
socket: Arc<UdpSocket>,
+
+ /// The count of frames sent and received.
+ metric_frames_sent: SentFrames,
+ metric_frames_received: ReceivedFrames,
+
+ /// The sum of bytes sent and received.
+ metric_bytes_sent: SentBytes,
+ metric_bytes_received: ReceivedBytes,
}
impl<T> Reactor<T>
where
T: Dispatcher,
{
- pub(crate) fn new(seed_list: Vec<String>, socket: UdpSocket, dispatch: T) -> Self {
+ pub(crate) fn new(
+ seed_list: Vec<String>,
+ socket: UdpSocket,
+ dispatch: T,
+ metrics: &metric::Registry,
+ ) -> Self {
// Generate a unique UUID for this Reactor instance, and cache the wire
// representation.
let identity = Identity::new();
@@ -117,6 +132,11 @@ where
serialisation_buf.clone()
};
+ // Initialise the various metrics with wrappers to help distinguish
+ // between the (very similar) counters.
+ let (metric_frames_sent, metric_frames_received, metric_bytes_sent, metric_bytes_received) =
+ new_metrics(metrics);
+
// Spawn a task that periodically pings all known seeds.
//
// Pinging all seeds announces this node as alive, propagating the
@@ -126,6 +146,8 @@ where
Arc::clone(&seed_list),
Arc::clone(&socket),
cached_ping_frame,
+ metric_frames_sent.clone(),
+ metric_bytes_sent.clone(),
)));
Self {
@@ -133,10 +155,14 @@ where
identity,
cached_frame,
serialisation_buf,
- peer_list: PeerList::with_capacity(seed_list.len()),
+ peer_list: PeerList::with_capacity(seed_list.len(), metrics),
seed_list,
_seed_ping_task: seed_ping_task,
socket,
+ metric_frames_sent,
+ metric_frames_received,
+ metric_bytes_sent,
+ metric_bytes_received,
}
}
@@ -148,10 +174,10 @@ where
);
loop {
- let (_bytes_read, _bytes_sent) = tokio::select! {
+ tokio::select! {
msg = self.read() => {
match msg {
- Ok((bytes_read, bytes_sent)) => (bytes_read, bytes_sent),
+ Ok(()) => {},
Err(Error::NoPayload { peer, addr }) => {
warn!(%peer, %addr, "message contains no payload");
continue;
@@ -182,7 +208,6 @@ where
}
Some(Request::GetPeers(tx)) => {
let _ = tx.send(self.peer_list.peer_uuids());
- (0, 0)
},
Some(Request::Broadcast(payload)) => {
// The user is guaranteed MAX_USER_PAYLOAD_BYTES to
@@ -196,8 +221,7 @@ where
{
continue
}
- let bytes_sent = self.peer_list.broadcast(&self.serialisation_buf, &self.socket).await;
- (0, bytes_sent)
+ self.peer_list.broadcast(&self.serialisation_buf, &self.socket, &self.metric_frames_sent, &self.metric_bytes_sent).await;
}
}
}
@@ -212,9 +236,11 @@ where
/// returns the result to the sender of the original frame.
///
/// Returns the bytes read and bytes sent during execution of this method.
- async fn read(&mut self) -> Result<(usize, usize), Error> {
+ async fn read(&mut self) -> Result<(), Error> {
// Read a frame into buf.
let (bytes_read, frame, peer_addr) = read_frame(&self.socket).await?;
+ self.metric_frames_received.inc(1);
+ self.metric_bytes_received.inc(bytes_read as _);
// Read the peer identity from the frame
let identity =
@@ -228,7 +254,7 @@ where
// this node will not be added to the active peer list.
if identity == self.identity {
debug!(%identity, %peer_addr, bytes_read, "dropping frame from self");
- return Ok((bytes_read, 0));
+ return Ok(());
}
// Find or create the peer in the peer list.
@@ -264,7 +290,7 @@ where
// Sometimes no message will be returned to the peer - there's no need
// to send an empty frame.
if out_messages.is_empty() {
- return Ok((bytes_read, 0));
+ return Ok(());
}
// Serialise the frame into the serialisation buffer.
@@ -274,9 +300,15 @@ where
&mut self.serialisation_buf,
)?;
- let bytes_sent = peer.send(&self.serialisation_buf, &self.socket).await?;
+ peer.send(
+ &self.serialisation_buf,
+ &self.socket,
+ &self.metric_frames_sent,
+ &self.metric_bytes_sent,
+ )
+ .await?;
- Ok((bytes_read, bytes_sent))
+ Ok(())
}
/// Return the randomised identity assigned to this instance.
@@ -351,10 +383,18 @@ fn new_payload(p: Payload) -> proto::FrameMessage {
}
/// Send a PING message to `socket`, using `peer_name` as logging context.
-pub(crate) async fn ping(ping_frame: &[u8], socket: &UdpSocket, addr: SocketAddr) -> usize {
+pub(crate) async fn ping(
+ ping_frame: &[u8],
+ socket: &UdpSocket,
+ addr: SocketAddr,
+ sent_frames: &SentFrames,
+ sent_bytes: &SentBytes,
+) -> usize {
match socket.send_to(ping_frame, &addr).await {
Ok(n_bytes) => {
debug!(addr = %addr, "ping");
+ sent_frames.inc(1);
+ sent_bytes.inc(n_bytes);
n_bytes
}
Err(e) => {
diff --git a/gossip/src/seed.rs b/gossip/src/seed.rs
index 843a8960ba..97df4f245c 100644
--- a/gossip/src/seed.rs
+++ b/gossip/src/seed.rs
@@ -7,7 +7,11 @@ use tokio::{
};
use tracing::{debug, warn};
-use crate::{reactor::ping, RESOLVE_TIMEOUT, SEED_PING_INTERVAL};
+use crate::{
+ metric::{SentBytes, SentFrames},
+ reactor::ping,
+ RESOLVE_TIMEOUT, SEED_PING_INTERVAL,
+};
/// The user-provided seed peer address.
///
@@ -63,6 +67,8 @@ pub(super) async fn seed_ping_task(
seeds: Arc<[Seed]>,
socket: Arc<UdpSocket>,
ping_frame: Vec<u8>,
+ sent_frames: SentFrames,
+ sent_bytes: SentBytes,
) {
let mut interval = tokio::time::interval(SEED_PING_INTERVAL);
@@ -77,7 +83,7 @@ pub(super) async fn seed_ping_task(
.iter()
.map(|seed| async {
if let Some(addr) = seed.resolve().await {
- ping(&ping_frame, &socket, addr).await
+ ping(&ping_frame, &socket, addr, &sent_frames, &sent_bytes).await
} else {
0
}
diff --git a/gossip/tests/smoke.rs b/gossip/tests/smoke.rs
index c46be5f343..16ff06c469 100644
--- a/gossip/tests/smoke.rs
+++ b/gossip/tests/smoke.rs
@@ -1,4 +1,4 @@
-use std::time::Duration;
+use std::{sync::Arc, time::Duration};
use test_helpers::{maybe_start_logging, timeout::FutureTimeout};
use tokio::{net::UdpSocket, sync::mpsc};
@@ -11,6 +11,8 @@ use gossip::*;
async fn test_payload_exchange() {
maybe_start_logging();
+ let metrics = Arc::new(metric::Registry::default());
+
// How long to wait for peer discovery to complete.
const TIMEOUT: Duration = Duration::from_secs(5);
@@ -32,8 +34,8 @@ async fn test_payload_exchange() {
// Initialise both reactors
let addrs = dbg!(vec![a_addr.to_string(), b_addr.to_string()]);
- let a = Builder::new(addrs.clone(), a_tx).build(a_socket);
- let b = Builder::new(addrs, b_tx).build(b_socket);
+ let a = Builder::new(addrs.clone(), a_tx, Arc::clone(&metrics)).build(a_socket);
+ let b = Builder::new(addrs, b_tx, Arc::clone(&metrics)).build(b_socket);
// Wait for peer discovery to occur
async {
|
e899dc70c03c5271f4523921a99f1d209592581f
|
Dom Dwyer
|
2023-03-23 15:46:19
|
meaningful RPC write failure errors
|
Whenever an RPC write to an upstream ingester fails, it is retried after
an increasing delay, until the RPC_TIMEOUT is hit. Because of this, any
RPC write error would be returned as a "timeout", masking the underling
reason the write actually failed.
This commit pushes down the timeout logic, and retains the most recently
observed RPC write error, returning it to the user instead of the
timeout error.
| null |
feat: meaningful RPC write failure errors
Whenever an RPC write to an upstream ingester fails, it is retried after
an increasing delay, until the RPC_TIMEOUT is hit. Because of this, any
RPC write error would be returned as a "timeout", masking the underling
reason the write actually failed.
This commit pushes down the timeout logic, and retains the most recently
observed RPC write error, returning it to the user instead of the
timeout error.
|
diff --git a/router/Cargo.toml b/router/Cargo.toml
index 8f1f2c3632..f3cb1c2b33 100644
--- a/router/Cargo.toml
+++ b/router/Cargo.toml
@@ -40,7 +40,7 @@ service_grpc_object_store = { path = "../service_grpc_object_store" }
sharder = { path = "../sharder" }
snafu = "0.7"
thiserror = "1.0"
-tokio = { version = "1", features = ["rt-multi-thread", "macros"] }
+tokio = { version = "1", features = ["rt-multi-thread", "macros", "time"] }
tonic = "0.8"
trace = { path = "../trace/" }
workspace-hack = { version = "0.1", path = "../workspace-hack" }
@@ -61,6 +61,7 @@ pretty_assertions = "1.3.0"
rand = "0.8.3"
schema = { path = "../schema" }
test_helpers = { version = "0.1.0", path = "../test_helpers", features = ["future_timeout"] }
+tokio = { version = "1", features = ["test-util"] }
tokio-stream = { version = "0.1.12", default_features = false, features = [] }
[lib]
diff --git a/router/src/dml_handlers/rpc_write.rs b/router/src/dml_handlers/rpc_write.rs
index 380369bf7a..d1add64be8 100644
--- a/router/src/dml_handlers/rpc_write.rs
+++ b/router/src/dml_handlers/rpc_write.rs
@@ -41,7 +41,7 @@ pub enum RpcWriteError {
/// The RPC call timed out after [`RPC_TIMEOUT`] length of time.
#[error("timeout writing to upstream ingester")]
- Timeout(#[from] tokio::time::error::Elapsed),
+ Timeout(tokio::time::error::Elapsed),
/// There are no healthy ingesters to route a write to.
#[error("no healthy upstream ingesters available")]
@@ -135,16 +135,15 @@ where
};
// Perform the gRPC write to an ingester.
- tokio::time::timeout(
- RPC_TIMEOUT,
- write_loop(
- self.endpoints
- .endpoints()
- .ok_or(RpcWriteError::NoUpstreams)?,
- req,
- ),
+ //
+ // This call is bounded to at most RPC_TIMEOUT duration of time.
+ write_loop(
+ self.endpoints
+ .endpoints()
+ .ok_or(RpcWriteError::NoUpstreams)?,
+ req,
)
- .await??;
+ .await?;
debug!(
%partition_key,
@@ -177,6 +176,13 @@ where
}
}
+/// Perform an RPC write with `req` against one of the upstream ingesters in
+/// `endpoints`.
+///
+/// This write attempt is bounded in time to at most [`RPC_TIMEOUT`].
+///
+/// If at least one upstream request has failed (returning an error), the most
+/// recent error is returned.
async fn write_loop<T>(
mut endpoints: UpstreamSnapshot<'_, T>,
req: WriteRequest,
@@ -184,22 +190,52 @@ async fn write_loop<T>(
where
T: WriteClient,
{
- // Infinitely cycle through the snapshot, trying each node in turn until the
- // request succeeds or this async call times out.
- let mut delay = Duration::from_millis(50);
- loop {
- match endpoints
- .next()
- .ok_or(RpcWriteError::NoUpstreams)?
- .write(req.clone())
- .await
- {
- Ok(()) => return Ok(()),
- Err(e) => warn!(error=%e, "failed ingester rpc write"),
- };
- tokio::time::sleep(delay).await;
- delay = delay.saturating_mul(2);
- }
+ // The last error returned from an upstream write request attempt.
+ let mut last_err = None;
+
+ tokio::time::timeout(RPC_TIMEOUT, async {
+ // Infinitely cycle through the snapshot, trying each node in turn until the
+ // request succeeds or this async call times out.
+ let mut delay = Duration::from_millis(50);
+ loop {
+ match endpoints
+ .next()
+ .ok_or(RpcWriteError::NoUpstreams)?
+ .write(req.clone())
+ .await
+ {
+ Ok(()) => return Ok(()),
+ Err(e) => {
+ warn!(error=%e, "failed ingester rpc write");
+ last_err = Some(e);
+ }
+ };
+ tokio::time::sleep(delay).await;
+ delay = delay.saturating_mul(2);
+ }
+ })
+ .await
+ .map_err(|e| match last_err {
+ // This error is an internal implementation detail - the meaningful
+ // error for the user is "there's no healthy upstreams".
+ Some(RpcWriteError::UpstreamNotConnected(_)) => RpcWriteError::NoUpstreams,
+ // Any other error is returned as-is.
+ Some(v) => v,
+ // If the entire write attempt fails during the first RPC write
+ // request, then the per-request timeout is greater than the write
+ // attempt timeout, and therefore only one upstream is ever tried.
+ //
+ // Log a warning so the logs show the timeout, but also include
+ // helpful hint for the user to adjust the configuration.
+ None => {
+ warn!(
+ "failed ingester rpc write - rpc write request timed out during \
+ the first rpc attempt; consider decreasing rpc request timeout \
+ below {RPC_TIMEOUT:?}"
+ );
+ RpcWriteError::Timeout(e)
+ }
+ })?
}
#[cfg(test)]
@@ -422,24 +458,16 @@ mod tests {
assert_eq!(got_tables, want_tables);
}
- /// Assert the error response for a write request when there are no healthy
- /// upstreams.
- #[tokio::test]
- async fn test_write_no_healthy_upstreams() {
- let client_1 = Arc::new(MockWriteClient::default());
- let circuit_1 = Arc::new(MockCircuitBreaker::default());
-
- // Mark the client circuit breaker as unhealthy
- circuit_1.set_healthy(false);
-
- let handler =
- RpcWrite {
- endpoints: Balancer::new(
- [CircuitBreakingClient::new(client_1, "client_1")
- .with_circuit_breaker(circuit_1)],
- None,
- ),
- };
+ async fn make_request<T, C>(
+ endpoints: impl IntoIterator<Item = CircuitBreakingClient<T, C>> + Send,
+ ) -> Result<Vec<DmlMeta>, RpcWriteError>
+ where
+ T: WriteClient + 'static,
+ C: CircuitBreakerState + 'static,
+ {
+ let handler = RpcWrite {
+ endpoints: Balancer::new(endpoints, None),
+ };
// Generate some write input
let input = Partitioned::new(
@@ -447,15 +475,36 @@ mod tests {
lp_to_writes("bananas,tag1=A,tag2=B val=42i 1"),
);
+ // Use tokio's "auto-advance" time feature to avoid waiting for the
+ // actual timeout duration.
+ tokio::time::pause();
+
// Drive the RPC writer
- let got = handler
+ handler
.write(
&NamespaceName::new(NAMESPACE_NAME).unwrap(),
NAMESPACE_ID,
input,
None,
)
- .await;
+ .await
+ }
+
+ /// Assert the error response for a write request when there are no healthy
+ /// upstreams.
+ #[tokio::test]
+ async fn test_write_no_healthy_upstreams() {
+ let client_1 = Arc::new(MockWriteClient::default());
+ let circuit_1 = Arc::new(MockCircuitBreaker::default());
+
+ // Mark the client circuit breaker as unhealthy
+ circuit_1.set_healthy(false);
+
+ let got = make_request([
+ CircuitBreakingClient::new(client_1, "client_1").with_circuit_breaker(circuit_1)
+ ])
+ .await;
+
assert_matches!(got, Err(RpcWriteError::NoUpstreams));
}
@@ -463,33 +512,42 @@ mod tests {
/// error.
#[tokio::test]
async fn test_write_upstream_error() {
- let client_1 = Arc::new(MockWriteClient::default());
+ let client_1 = Arc::new(
+ MockWriteClient::default().with_ret(Box::new(iter::repeat_with(|| {
+ Err(RpcWriteError::Upstream(tonic::Status::internal("bananas")))
+ }))),
+ );
let circuit_1 = Arc::new(MockCircuitBreaker::default());
+ circuit_1.set_healthy(true);
- let handler =
- RpcWrite {
- endpoints: Balancer::new(
- [CircuitBreakingClient::new(client_1, "client_1")
- .with_circuit_breaker(circuit_1)],
- None,
- ),
- };
+ let got = make_request([
+ CircuitBreakingClient::new(client_1, "client_1").with_circuit_breaker(circuit_1)
+ ])
+ .await;
- // Generate some write input
- let input = Partitioned::new(
- PartitionKey::from("2022-01-01"),
- lp_to_writes("bananas,tag1=A,tag2=B val=42i 1"),
+ assert_matches!(got, Err(RpcWriteError::Upstream(s)) => {
+ assert_eq!(s.code(), tonic::Code::Internal);
+ assert_eq!(s.message(), "bananas");
+ });
+ }
+
+ /// Assert that an [`RpcWriteError::UpstreamNotConnected`] error is mapped
+ /// to a user-friendly [`RpcWriteError::NoUpstreams`] for consistency.
+ #[tokio::test]
+ async fn test_write_map_upstream_not_connected_error() {
+ let client_1 = Arc::new(
+ MockWriteClient::default().with_ret(Box::new(iter::repeat_with(|| {
+ Err(RpcWriteError::UpstreamNotConnected("bananas".to_string()))
+ }))),
);
+ let circuit_1 = Arc::new(MockCircuitBreaker::default());
+ circuit_1.set_healthy(true);
+
+ let got = make_request([
+ CircuitBreakingClient::new(client_1, "client_1").with_circuit_breaker(circuit_1)
+ ])
+ .await;
- // Drive the RPC writer
- let got = handler
- .write(
- &NamespaceName::new(NAMESPACE_NAME).unwrap(),
- NAMESPACE_ID,
- input,
- None,
- )
- .await;
assert_matches!(got, Err(RpcWriteError::NoUpstreams));
}
}
|
7306ea942489a1d5cdb4b505279fb2cfb6d2b615
|
Marco Neumann
|
2023-01-25 15:45:20
|
divide&conquer framework (#6697)
|
Allows compactor2 to run a fixed-point loop (until all work is done) and
in every loop in can run mulitiple jobs.
The jobs are currently organized by "branches". This is because our
upcoming OOM handling may split a branch further if it doesn't complete.
Also note that the current config resembles the state prior to this PR.
So the FP-loop will only iterate ONCE and then runs out of L0 files. A
more advanced setup can be built using the framework though.
| null |
feat: divide&conquer framework (#6697)
Allows compactor2 to run a fixed-point loop (until all work is done) and
in every loop in can run mulitiple jobs.
The jobs are currently organized by "branches". This is because our
upcoming OOM handling may split a branch further if it doesn't complete.
Also note that the current config resembles the state prior to this PR.
So the FP-loop will only iterate ONCE and then runs out of L0 files. A
more advanced setup can be built using the framework though.
|
diff --git a/compactor2/src/components/divide_initial/mod.rs b/compactor2/src/components/divide_initial/mod.rs
new file mode 100644
index 0000000000..c0c0be7018
--- /dev/null
+++ b/compactor2/src/components/divide_initial/mod.rs
@@ -0,0 +1,9 @@
+use std::fmt::{Debug, Display};
+
+use data_types::ParquetFile;
+
+pub mod single_branch;
+
+pub trait DivideInitial: Debug + Display + Send + Sync {
+ fn divide(&self, files: Vec<ParquetFile>) -> Vec<Vec<ParquetFile>>;
+}
diff --git a/compactor2/src/components/divide_initial/single_branch.rs b/compactor2/src/components/divide_initial/single_branch.rs
new file mode 100644
index 0000000000..8de9527fdc
--- /dev/null
+++ b/compactor2/src/components/divide_initial/single_branch.rs
@@ -0,0 +1,61 @@
+use std::fmt::Display;
+
+use data_types::ParquetFile;
+
+use super::DivideInitial;
+
+#[derive(Debug, Default)]
+pub struct SingleBranchDivideInitial;
+
+impl SingleBranchDivideInitial {
+ pub fn new() -> Self {
+ Self::default()
+ }
+}
+
+impl Display for SingleBranchDivideInitial {
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ write!(f, "single_branch")
+ }
+}
+
+impl DivideInitial for SingleBranchDivideInitial {
+ fn divide(&self, files: Vec<ParquetFile>) -> Vec<Vec<ParquetFile>> {
+ if files.is_empty() {
+ vec![]
+ } else {
+ vec![files]
+ }
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use crate::test_util::ParquetFileBuilder;
+
+ use super::*;
+
+ #[test]
+ fn test_display() {
+ assert_eq!(
+ SingleBranchDivideInitial::new().to_string(),
+ "single_branch"
+ );
+ }
+
+ #[test]
+ fn test_divide() {
+ let divide = SingleBranchDivideInitial::new();
+
+ // empty input
+ assert_eq!(divide.divide(vec![]), Vec::<Vec<_>>::new());
+
+ // not empty
+ let f1 = ParquetFileBuilder::new(1).build();
+ let f2 = ParquetFileBuilder::new(2).build();
+ assert_eq!(
+ divide.divide(vec![f1.clone(), f2.clone()]),
+ vec![vec![f1, f2]]
+ );
+ }
+}
diff --git a/compactor2/src/components/hardcoded.rs b/compactor2/src/components/hardcoded.rs
index c906cc2059..54efbc8fbc 100644
--- a/compactor2/src/components/hardcoded.rs
+++ b/compactor2/src/components/hardcoded.rs
@@ -21,6 +21,7 @@ use super::{
},
df_plan_exec::dedicated::DedicatedDataFusionPlanExec,
df_planner::planner_v1::V1DataFusionPlanner,
+ divide_initial::single_branch::SingleBranchDivideInitial,
file_filter::{and::AndFileFilter, level_range::LevelRangeFileFilter},
files_filter::{chain::FilesFilterChain, per_file::PerFileFilesFilter},
parquet_file_sink::{
@@ -34,14 +35,15 @@ use super::{
partition_files_source::catalog::CatalogPartitionFilesSource,
partition_filter::{
and::AndPartitionFilter, has_files::HasFilesPartitionFilter,
- logging::LoggingPartitionFilterWrapper, metrics::MetricsPartitionFilterWrapper,
- never_skipped::NeverSkippedPartitionFilter,
+ has_matching_file::HasMatchingFilePartitionFilter, logging::LoggingPartitionFilterWrapper,
+ metrics::MetricsPartitionFilterWrapper, never_skipped::NeverSkippedPartitionFilter,
},
partitions_source::{
catalog::CatalogPartitionsSource, logging::LoggingPartitionsSourceWrapper,
metrics::MetricsPartitionsSourceWrapper,
randomize_order::RandomizeOrderPartitionsSourcesWrapper,
},
+ round_split::all_now::AllNowRoundSplit,
skipped_compactions_source::catalog::CatalogSkippedCompactionsSource,
Components,
};
@@ -86,6 +88,11 @@ pub fn hardcoded_components(config: &Config) -> Arc<Components> {
Arc::clone(&config.catalog),
),
)),
+ Arc::new(HasMatchingFilePartitionFilter::new(
+ LevelRangeFileFilter::new(
+ CompactionLevel::Initial..=CompactionLevel::Initial,
+ ),
+ )),
Arc::new(HasFilesPartitionFilter::new()),
]),
&config.metric_registry,
@@ -133,5 +140,7 @@ pub fn hardcoded_components(config: &Config) -> Arc<Components> {
Arc::clone(&config.exec),
),
)),
+ round_split: Arc::new(AllNowRoundSplit::new()),
+ divide_initial: Arc::new(SingleBranchDivideInitial::new()),
})
}
diff --git a/compactor2/src/components/mod.rs b/compactor2/src/components/mod.rs
index 4c6c615801..61b45c2d30 100644
--- a/compactor2/src/components/mod.rs
+++ b/compactor2/src/components/mod.rs
@@ -2,15 +2,16 @@ use std::sync::Arc;
use self::{
commit::Commit, df_plan_exec::DataFusionPlanExec, df_planner::DataFusionPlanner,
- files_filter::FilesFilter, namespaces_source::NamespacesSource,
+ divide_initial::DivideInitial, files_filter::FilesFilter, namespaces_source::NamespacesSource,
parquet_file_sink::ParquetFileSink, partition_done_sink::PartitionDoneSink,
partition_files_source::PartitionFilesSource, partition_filter::PartitionFilter,
- partitions_source::PartitionsSource, tables_source::TablesSource,
+ partitions_source::PartitionsSource, round_split::RoundSplit, tables_source::TablesSource,
};
pub mod commit;
pub mod df_plan_exec;
pub mod df_planner;
+pub mod divide_initial;
pub mod file_filter;
pub mod files_filter;
pub mod hardcoded;
@@ -21,6 +22,7 @@ pub mod partition_files_source;
pub mod partition_filter;
pub mod partitions_source;
pub mod report;
+pub mod round_split;
pub mod skipped_compactions_source;
pub mod tables_source;
@@ -37,4 +39,6 @@ pub struct Components {
pub df_planner: Arc<dyn DataFusionPlanner>,
pub df_plan_exec: Arc<dyn DataFusionPlanExec>,
pub parquet_file_sink: Arc<dyn ParquetFileSink>,
+ pub round_split: Arc<dyn RoundSplit>,
+ pub divide_initial: Arc<dyn DivideInitial>,
}
diff --git a/compactor2/src/components/partition_filter/has_matching_file.rs b/compactor2/src/components/partition_filter/has_matching_file.rs
new file mode 100644
index 0000000000..684a5c6235
--- /dev/null
+++ b/compactor2/src/components/partition_filter/has_matching_file.rs
@@ -0,0 +1,88 @@
+use std::fmt::Display;
+
+use async_trait::async_trait;
+use data_types::{ParquetFile, PartitionId};
+
+use crate::components::file_filter::FileFilter;
+
+use super::PartitionFilter;
+
+#[derive(Debug)]
+pub struct HasMatchingFilePartitionFilter<T>
+where
+ T: FileFilter,
+{
+ filter: T,
+}
+
+impl<T> HasMatchingFilePartitionFilter<T>
+where
+ T: FileFilter,
+{
+ pub fn new(filter: T) -> Self {
+ Self { filter }
+ }
+}
+
+impl<T> Display for HasMatchingFilePartitionFilter<T>
+where
+ T: FileFilter,
+{
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ write!(f, "has_matching_file({})", self.filter)
+ }
+}
+
+#[async_trait]
+impl<T> PartitionFilter for HasMatchingFilePartitionFilter<T>
+where
+ T: FileFilter,
+{
+ async fn apply(&self, _partition_id: PartitionId, files: &[ParquetFile]) -> bool {
+ files.iter().any(|file| self.filter.apply(file))
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use data_types::CompactionLevel;
+
+ use crate::{
+ components::file_filter::level_range::LevelRangeFileFilter, test_util::ParquetFileBuilder,
+ };
+
+ use super::*;
+
+ #[test]
+ fn test_display() {
+ let filter = HasMatchingFilePartitionFilter::new(LevelRangeFileFilter::new(
+ CompactionLevel::Initial..=CompactionLevel::FileNonOverlapped,
+ ));
+ assert_eq!(filter.to_string(), "has_matching_file(level_range(0..=1))");
+ }
+
+ #[tokio::test]
+ async fn test_apply() {
+ let filter = HasMatchingFilePartitionFilter::new(LevelRangeFileFilter::new(
+ CompactionLevel::Initial..=CompactionLevel::FileNonOverlapped,
+ ));
+ let f1 = ParquetFileBuilder::new(0)
+ .with_compaction_level(CompactionLevel::FileNonOverlapped)
+ .build();
+ let f2 = ParquetFileBuilder::new(1)
+ .with_compaction_level(CompactionLevel::Final)
+ .build();
+
+ // empty
+ assert!(!filter.apply(PartitionId::new(1), &[]).await);
+
+ // all matching
+ assert!(filter.apply(PartitionId::new(1), &[f1.clone()]).await);
+
+ // none matching
+ assert!(!filter.apply(PartitionId::new(1), &[f2.clone()]).await);
+
+ // some matching
+ assert!(filter.apply(PartitionId::new(1), &[f1, f2]).await);
+ }
+}
diff --git a/compactor2/src/components/partition_filter/mod.rs b/compactor2/src/components/partition_filter/mod.rs
index ab91b2707b..1ccafe18d4 100644
--- a/compactor2/src/components/partition_filter/mod.rs
+++ b/compactor2/src/components/partition_filter/mod.rs
@@ -5,6 +5,7 @@ use data_types::{ParquetFile, PartitionId};
pub mod and;
pub mod has_files;
+pub mod has_matching_file;
pub mod logging;
pub mod metrics;
pub mod never_skipped;
diff --git a/compactor2/src/components/report.rs b/compactor2/src/components/report.rs
index 76dccd8743..35fd6e5fee 100644
--- a/compactor2/src/components/report.rs
+++ b/compactor2/src/components/report.rs
@@ -19,6 +19,8 @@ pub fn log_components(components: &Components) {
df_planner,
df_plan_exec,
parquet_file_sink,
+ round_split,
+ divide_initial,
} = components;
info!(
@@ -33,6 +35,8 @@ pub fn log_components(components: &Components) {
%df_planner,
%df_plan_exec,
%parquet_file_sink,
+ %round_split,
+ %divide_initial,
"component setup",
);
}
diff --git a/compactor2/src/components/round_split/all_now.rs b/compactor2/src/components/round_split/all_now.rs
new file mode 100644
index 0000000000..4287d8cedb
--- /dev/null
+++ b/compactor2/src/components/round_split/all_now.rs
@@ -0,0 +1,54 @@
+use std::fmt::Display;
+
+use data_types::ParquetFile;
+
+use super::RoundSplit;
+
+#[derive(Debug, Default)]
+pub struct AllNowRoundSplit;
+
+impl AllNowRoundSplit {
+ pub fn new() -> Self {
+ Self::default()
+ }
+}
+
+impl Display for AllNowRoundSplit {
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ write!(f, "now")
+ }
+}
+
+impl RoundSplit for AllNowRoundSplit {
+ fn split(&self, files: Vec<ParquetFile>) -> (Vec<ParquetFile>, Vec<ParquetFile>) {
+ (files, vec![])
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use crate::test_util::ParquetFileBuilder;
+
+ use super::*;
+
+ #[test]
+ fn test_display() {
+ assert_eq!(AllNowRoundSplit::new().to_string(), "now");
+ }
+
+ #[test]
+ fn test_split() {
+ let split = AllNowRoundSplit::new();
+
+ // empty input
+ assert_eq!(split.split(vec![]), (vec![], vec![]));
+
+ // not empty
+ let f1 = ParquetFileBuilder::new(1).build();
+ let f2 = ParquetFileBuilder::new(2).build();
+ assert_eq!(
+ split.split(vec![f1.clone(), f2.clone()]),
+ (vec![f1, f2], vec![])
+ );
+ }
+}
diff --git a/compactor2/src/components/round_split/mod.rs b/compactor2/src/components/round_split/mod.rs
new file mode 100644
index 0000000000..02809d115f
--- /dev/null
+++ b/compactor2/src/components/round_split/mod.rs
@@ -0,0 +1,15 @@
+use std::fmt::{Debug, Display};
+
+use data_types::ParquetFile;
+
+pub mod all_now;
+
+pub trait RoundSplit: Debug + Display + Send + Sync {
+ /// Split files into two buckets "now" and "later".
+ ///
+ /// All files belong to the same partition.
+ ///
+ /// - **now:** will be processed in this round
+ /// - **later:** will be processed in the next round
+ fn split(&self, files: Vec<ParquetFile>) -> (Vec<ParquetFile>, Vec<ParquetFile>);
+}
diff --git a/compactor2/src/driver.rs b/compactor2/src/driver.rs
index f9417d5a19..c2431a0326 100644
--- a/compactor2/src/driver.rs
+++ b/compactor2/src/driver.rs
@@ -1,7 +1,8 @@
-use std::{num::NonZeroUsize, sync::Arc, time::Duration};
+use std::{future::Future, num::NonZeroUsize, sync::Arc, time::Duration};
-use data_types::{CompactionLevel, PartitionId};
-use futures::{stream::FuturesOrdered, StreamExt, TryStreamExt};
+use data_types::{CompactionLevel, ParquetFile, ParquetFileParams, PartitionId};
+use datafusion::physical_plan::SendableRecordBatchStream;
+use futures::{stream::FuturesOrdered, StreamExt, TryFutureExt, TryStreamExt};
use tracker::InstrumentedAsyncSemaphore;
use crate::{components::Components, partition_info::PartitionInfo};
@@ -70,57 +71,82 @@ async fn try_compact_partition(
job_semaphore: Arc<InstrumentedAsyncSemaphore>,
components: Arc<Components>,
) -> Result<(), Error> {
- let files = components.partition_files_source.fetch(partition_id).await;
- let files = components.files_filter.apply(files);
- let delete_ids = files.iter().map(|f| f.id).collect::<Vec<_>>();
+ let mut files = components.partition_files_source.fetch(partition_id).await;
- if !components
- .partition_filter
- .apply(partition_id, &files)
- .await
- {
- return Ok(());
- }
+ // fetch partition info only if we need it
+ let mut lazy_partition_info = None;
- let partition_info = fetch_partition_info(partition_id, &components).await?;
-
- // TODO: Need a wraper funtion to:
- // . split files into L0, L1 and L2
- // . identify right files for hot/cold compaction
- // . filter right amount of files
- // . compact many steps hot/cold (need more thinking)
- let target_level = CompactionLevel::FileNonOverlapped;
- let plan = components
- .df_planner
- .plan(files, Arc::clone(&partition_info), target_level)
- .await?;
- let streams = components.df_plan_exec.exec(plan);
- let job = streams
- .into_iter()
- .map(|stream| {
- components
- .parquet_file_sink
- .store(stream, Arc::clone(&partition_info), target_level)
- })
- // NB: FuturesOrdered allows the futures to run in parallel
- .collect::<FuturesOrdered<_>>()
- // Discard the streams that resulted in empty output / no file uploaded
- // to the object store.
- .try_filter_map(|v| futures::future::ready(Ok(v)))
- // Collect all the persisted parquet files together.
- .try_collect::<Vec<_>>();
+ loop {
+ files = components.files_filter.apply(files);
- let create = {
- let _permit = job_semaphore
- .acquire(None)
+ if !components
+ .partition_filter
+ .apply(partition_id, &files)
.await
- .expect("semaphore not closed");
- job.await?
- };
-
- components.commit.commit(&delete_ids, &create).await;
-
- Ok(())
+ {
+ return Ok(());
+ }
+
+ // fetch partition info
+ if lazy_partition_info.is_none() {
+ lazy_partition_info = Some(fetch_partition_info(partition_id, &components).await?);
+ }
+ let partition_info = lazy_partition_info.as_ref().expect("just fetched");
+
+ let (files_now, files_later) = components.round_split.split(files);
+
+ let mut branches = components.divide_initial.divide(files_now);
+
+ let mut files_next = files_later;
+ while let Some(branch) = branches.pop() {
+ let delete_ids = branch.iter().map(|f| f.id).collect::<Vec<_>>();
+
+ let create = {
+ // draw semaphore BEFORE creating the DataFusion plan and drop it directly AFTER finishing the
+ // DataFusion computation (but BEFORE doing any additional external IO).
+ //
+ // We guard the DataFusion planning (that doesn't perform any IO) via the semaphore as well in case
+ // DataFusion ever starts to pre-allocate buffers during the physical planning. To the best of our
+ // knowledge, this is currently (2023-01-25) not the case but if this ever changes, then we are prepared.
+ let _permit = job_semaphore
+ .acquire(None)
+ .await
+ .expect("semaphore not closed");
+
+ // TODO: Need a wraper funtion to:
+ // . split files into L0, L1 and L2
+ // . identify right files for hot/cold compaction
+ // . filter right amount of files
+ // . compact many steps hot/cold (need more thinking)
+ let target_level = CompactionLevel::FileNonOverlapped;
+ let plan = components
+ .df_planner
+ .plan(branch, Arc::clone(partition_info), target_level)
+ .await?;
+ let streams = components.df_plan_exec.exec(plan);
+ let job = stream_into_file_sink(
+ streams,
+ Arc::clone(partition_info),
+ target_level,
+ Arc::clone(&components),
+ );
+
+ // TODO: react to OOM and try to divide branch
+ job.await?
+ };
+
+ let ids = components.commit.commit(&delete_ids, &create).await;
+
+ files_next.extend(
+ create
+ .into_iter()
+ .zip(ids)
+ .map(|(params, id)| ParquetFile::from_params(params, id)),
+ );
+ }
+
+ files = files_next;
+ }
}
async fn fetch_partition_info(
@@ -170,3 +196,31 @@ async fn fetch_partition_info(
partition_key: partition.partition_key,
}))
}
+
+fn stream_into_file_sink(
+ streams: Vec<SendableRecordBatchStream>,
+ partition_info: Arc<PartitionInfo>,
+ target_level: CompactionLevel,
+ components: Arc<Components>,
+) -> impl Future<Output = Result<Vec<ParquetFileParams>, Error>> {
+ streams
+ .into_iter()
+ .map(move |stream| {
+ let components = Arc::clone(&components);
+ let partition_info = Arc::clone(&partition_info);
+ async move {
+ components
+ .parquet_file_sink
+ .store(stream, partition_info, target_level)
+ .await
+ }
+ })
+ // NB: FuturesOrdered allows the futures to run in parallel
+ .collect::<FuturesOrdered<_>>()
+ // Discard the streams that resulted in empty output / no file uploaded
+ // to the object store.
+ .try_filter_map(|v| futures::future::ready(Ok(v)))
+ // Collect all the persisted parquet files together.
+ .try_collect::<Vec<_>>()
+ .map_err(|e| Box::new(e) as _)
+}
|
0937615dbaf296c4e0cbbfb8dbceafdb84791b01
|
Christopher M. Wolff
|
2023-04-12 14:41:11
|
make interpolate() fill null values in input (#7490)
|
* fix: make interpolate() fill null values in input
* chore: cargo doc
| null |
fix: make interpolate() fill null values in input (#7490)
* fix: make interpolate() fill null values in input
* chore: cargo doc
|
diff --git a/iox_query/src/exec/gapfill/algo/interpolate.rs b/iox_query/src/exec/gapfill/algo/interpolate.rs
index 79955012f8..2ed1d49379 100644
--- a/iox_query/src/exec/gapfill/algo/interpolate.rs
+++ b/iox_query/src/exec/gapfill/algo/interpolate.rs
@@ -90,7 +90,6 @@ impl Cursor {
.map(|seg| Segment::<T::Native>::try_from(seg.clone()))
.transpose()?;
let mut builder = InterpolateBuilder {
- params,
values: Vec::with_capacity(self.remaining_output_batch_size),
segment,
input_time_array,
@@ -173,7 +172,6 @@ impl_from_segment_scalar_value!(f64);
/// Implements [`VecBuilder`] for build aggregate columns whose gaps
/// are being filled using linear interpolation.
pub(super) struct InterpolateBuilder<'a, T: ArrowPrimitiveType> {
- pub params: &'a GapFillParams,
pub values: Vec<Option<T::Native>>,
pub segment: Option<Segment<T::Native>>,
pub input_time_array: &'a TimestampNanosecondArray,
@@ -193,27 +191,25 @@ where
offset,
series_end_offset,
} => {
- // If
- // we are not at the last point
- // and the distance to the next point is greater than the stride
- // and both this point and the next are not null
- // then create a segment that will be used to fill in the missing rows.
- if offset + 1 < series_end_offset
- && self.input_time_array.value(offset + 1) > ts + self.params.stride
- && self.input_aggr_array.is_valid(offset)
- && self.input_aggr_array.is_valid(offset + 1)
- {
- self.segment = Some(Segment {
+ if self.input_aggr_array.is_valid(offset) {
+ let end_offset = self.find_end_offset(offset, series_end_offset);
+ // Find the next non-null value in this column for the series.
+ // If there is one, start a new segment at the current value.
+ self.segment = end_offset.map(|end_offset| Segment {
start_point: (ts, self.input_aggr_array.value(offset)),
end_point: (
- self.input_time_array.value(offset + 1),
- self.input_aggr_array.value(offset + 1),
+ self.input_time_array.value(end_offset),
+ self.input_aggr_array.value(end_offset),
),
- })
+ });
+ self.copy_point(offset);
} else {
- self.segment = None;
+ self.values.push(
+ self.segment
+ .as_ref()
+ .map(|seg| T::Native::interpolate(seg, ts)),
+ );
}
- self.copy_point(offset);
}
RowStatus::Missing { ts, .. } => self.values.push(
self.segment
@@ -243,6 +239,17 @@ where
.then_some(self.input_aggr_array.value(offset));
self.values.push(v)
}
+
+ /// Scan forward to find the endpoint for a segment that starts at `start_offset`.
+ /// Skip over any null values.
+ ///
+ /// We are guaranteed to have buffered enough input to find the next non-null point for this series,
+ /// if there is one, by the logic in [`BufferedInput`].
+ ///
+ /// [`BufferedInput`]: super::super::buffered_input::BufferedInput
+ fn find_end_offset(&self, start_offset: usize, series_end_offset: usize) -> Option<usize> {
+ ((start_offset + 1)..series_end_offset).find(|&i| self.input_aggr_array.is_valid(i))
+ }
}
/// A trait for the native numeric types that can be interpolated
@@ -375,8 +382,8 @@ mod test {
- "| 1970-01-01T00:00:00.000001200Z | 133 |"
- "| 1970-01-01T00:00:00.000001300Z | 166 |"
- "| 1970-01-01T00:00:00.000001400Z | 200 |"
- - "| 1970-01-01T00:00:00.000001500Z | |"
- - "| 1970-01-01T00:00:00.000001600Z | |"
+ - "| 1970-01-01T00:00:00.000001500Z | 466 |"
+ - "| 1970-01-01T00:00:00.000001600Z | 733 |"
- "| 1970-01-01T00:00:00.000001700Z | 1000 |"
- "| 1970-01-01T00:00:00.000001800Z | 500 |"
- "| 1970-01-01T00:00:00.000001900Z | 0 |"
@@ -447,8 +454,8 @@ mod test {
- "| 1970-01-01T00:00:00.000001200Z | 133 |"
- "| 1970-01-01T00:00:00.000001300Z | 166 |"
- "| 1970-01-01T00:00:00.000001400Z | 200 |"
- - "| 1970-01-01T00:00:00.000001500Z | |"
- - "| 1970-01-01T00:00:00.000001600Z | |"
+ - "| 1970-01-01T00:00:00.000001500Z | 466 |"
+ - "| 1970-01-01T00:00:00.000001600Z | 733 |"
- "| 1970-01-01T00:00:00.000001700Z | 1000 |"
- "| 1970-01-01T00:00:00.000001800Z | 500 |"
- "| 1970-01-01T00:00:00.000001900Z | 0 |"
@@ -519,8 +526,8 @@ mod test {
- "| 1970-01-01T00:00:00.000001200Z | 200.0 |"
- "| 1970-01-01T00:00:00.000001300Z | 300.0 |"
- "| 1970-01-01T00:00:00.000001400Z | 400.0 |"
- - "| 1970-01-01T00:00:00.000001500Z | |"
- - "| 1970-01-01T00:00:00.000001600Z | |"
+ - "| 1970-01-01T00:00:00.000001500Z | 600.0 |"
+ - "| 1970-01-01T00:00:00.000001600Z | 800.0 |"
- "| 1970-01-01T00:00:00.000001700Z | 1000.0 |"
- "| 1970-01-01T00:00:00.000001800Z | 500.0 |"
- "| 1970-01-01T00:00:00.000001900Z | 0.0 |"
diff --git a/iox_query/src/exec/gapfill/exec_tests.rs b/iox_query/src/exec/gapfill/exec_tests.rs
index e33db776ff..bcb674831f 100644
--- a/iox_query/src/exec/gapfill/exec_tests.rs
+++ b/iox_query/src/exec/gapfill/exec_tests.rs
@@ -775,6 +775,7 @@ fn test_gapfill_fill_interpolate() {
Some("b"),
Some("b"),
Some("b"),
+ Some("b"),
]],
time_col: vec![
None,
@@ -788,7 +789,7 @@ fn test_gapfill_fill_interpolate() {
// --- new series
None,
Some(975),
- // 1000
+ Some(1000),
Some(1025),
// 1050
Some(1075),
@@ -807,7 +808,7 @@ fn test_gapfill_fill_interpolate() {
// --- new series
Some(-10),
Some(1100), // 975
- // 1200 1000
+ None, // 1200 1000 (this null value will be filled)
Some(1300), // 1025
// 1325 1050
Some(1350), // 1075
|
a0b0bb0a93689ee1e2f1bd7f07d5e29f3ec902ab
|
Dom Dwyer
|
2023-02-01 16:24:52
|
fatal panics for ingester2, not ingester
|
I put the calls in the wrong "ingester".
| null |
refactor: fatal panics for ingester2, not ingester
I put the calls in the wrong "ingester".
|
diff --git a/influxdb_iox/src/commands/run/ingester.rs b/influxdb_iox/src/commands/run/ingester.rs
index 02ada6e667..19e9c03410 100644
--- a/influxdb_iox/src/commands/run/ingester.rs
+++ b/influxdb_iox/src/commands/run/ingester.rs
@@ -13,7 +13,6 @@ use ioxd_ingester::create_ingester_server_type;
use object_store::DynObjectStore;
use object_store_metrics::ObjectStoreMetrics;
use observability_deps::tracing::*;
-use panic_logging::make_panics_fatal;
use std::sync::Arc;
use thiserror::Error;
@@ -95,9 +94,6 @@ pub async fn command(config: Config) -> Result<()> {
);
}
- // Ensure panics are fatal when running in this server mode.
- make_panics_fatal();
-
let common_state = CommonServerState::from_config(config.run_config.clone())?;
let time_provider = Arc::new(SystemProvider::new()) as Arc<dyn TimeProvider>;
diff --git a/influxdb_iox/src/commands/run/ingester2.rs b/influxdb_iox/src/commands/run/ingester2.rs
index ae0ffbd96a..632be386f0 100644
--- a/influxdb_iox/src/commands/run/ingester2.rs
+++ b/influxdb_iox/src/commands/run/ingester2.rs
@@ -16,6 +16,7 @@ use ioxd_ingester2::create_ingester_server_type;
use object_store::DynObjectStore;
use object_store_metrics::ObjectStoreMetrics;
use observability_deps::tracing::*;
+use panic_logging::make_panics_fatal;
use parquet_file::storage::{ParquetStorage, StorageId};
use std::sync::Arc;
use thiserror::Error;
@@ -92,6 +93,9 @@ pub async fn command(config: Config) -> Result<()> {
);
}
+ // Ensure panics are fatal when running in this server mode.
+ make_panics_fatal();
+
let common_state = CommonServerState::from_config(config.run_config.clone())?;
let time_provider = Arc::new(SystemProvider::new()) as Arc<dyn TimeProvider>;
let metric_registry = setup_metric_registry();
|
d4715a9fde75430122cb363c5a7601646f48e264
|
Carol (Nichols || Goulding)
|
2022-11-16 17:30:11
|
Simplify tests by using and creating more test helpers
|
The most important part of this is creating the DmlWrites in one spot.
| null |
fix: Simplify tests by using and creating more test helpers
The most important part of this is creating the DmlWrites in one spot.
|
diff --git a/ingester/src/data.rs b/ingester/src/data.rs
index 0d97bbf402..9b16370ed5 100644
--- a/ingester/src/data.rs
+++ b/ingester/src/data.rs
@@ -695,19 +695,19 @@ pub enum DmlApplyAction {
#[cfg(test)]
mod tests {
use super::*;
- use crate::lifecycle::{LifecycleConfig, LifecycleManager};
+ use crate::{
+ lifecycle::{LifecycleConfig, LifecycleManager},
+ test_util::make_write_op,
+ };
use assert_matches::assert_matches;
use data_types::{
- DeletePredicate, Namespace, NamespaceSchema, NonEmptyString, Sequence, Shard, Timestamp,
- TimestampRange,
+ DeletePredicate, Namespace, NamespaceSchema, NonEmptyString, PartitionKey, Sequence, Shard,
+ Table, TimestampRange,
};
use dml::{DmlDelete, DmlMeta, DmlWrite};
use futures::TryStreamExt;
- use hashbrown::HashMap;
- use iox_catalog::{interface::RepoCollection, mem::MemCatalog, validate_or_insert_schema};
+ use iox_catalog::{mem::MemCatalog, validate_or_insert_schema};
use iox_time::Time;
- use mutable_batch::MutableBatch;
- use mutable_batch_lp::lines_to_batches;
use object_store::memory::InMemory;
use schema::sort::SortKey;
use std::{ops::DerefMut, sync::Arc, time::Duration};
@@ -717,7 +717,9 @@ mod tests {
catalog: Arc<dyn Catalog>,
object_store: Arc<DynObjectStore>,
namespace: Namespace,
- schema: NamespaceSchema,
+ table1: Table,
+ table2: Table,
+ partition_key: PartitionKey,
shard1: Shard,
shard2: Shard,
data: Arc<IngesterData>,
@@ -727,8 +729,9 @@ mod tests {
async fn new() -> Self {
let metrics = Arc::new(metric::Registry::new());
let catalog: Arc<dyn Catalog> = Arc::new(MemCatalog::new(Arc::clone(&metrics)));
+ let partition_key = PartitionKey::from("1970-01-01");
- let (namespace, schema, shard1, shard2) = {
+ let (namespace, table1, table2, shard1, shard2) = {
let mut repos = catalog.repositories().await;
let topic = repos.topics().create_or_get("whatevs").await.unwrap();
@@ -739,6 +742,19 @@ mod tests {
.create("foo", topic.id, query_pool.id)
.await
.unwrap();
+
+ let table1 = repos
+ .tables()
+ .create_or_get("mem", namespace.id)
+ .await
+ .unwrap();
+
+ let table2 = repos
+ .tables()
+ .create_or_get("cpu", namespace.id)
+ .await
+ .unwrap();
+
let schema = NamespaceSchema::new(namespace.id, topic.id, query_pool.id, 100);
let shard_index = ShardIndex::new(0);
@@ -753,7 +769,36 @@ mod tests {
.create_or_get(&topic, shard_index)
.await
.unwrap();
- (namespace, schema, shard1, shard2)
+
+ // Put the columns in the catalog (these writes don't actually get inserted)
+ // This will be different once column IDs are used instead of names
+ let table1_write = Self::arbitrary_write_with_seq_num_at_time(
+ 1,
+ 0,
+ &partition_key,
+ shard_index,
+ namespace.id,
+ &table1,
+ );
+ validate_or_insert_schema(table1_write.tables(), &schema, repos.deref_mut())
+ .await
+ .unwrap()
+ .unwrap();
+
+ let table2_write = Self::arbitrary_write_with_seq_num_at_time(
+ 1,
+ 0,
+ &partition_key,
+ shard_index,
+ namespace.id,
+ &table2,
+ );
+ validate_or_insert_schema(table2_write.tables(), &schema, repos.deref_mut())
+ .await
+ .unwrap()
+ .unwrap();
+
+ (namespace, table1, table2, shard1, shard2)
};
let object_store: Arc<DynObjectStore> = Arc::new(InMemory::new());
@@ -779,51 +824,79 @@ mod tests {
catalog,
object_store,
namespace,
- schema,
+ table1,
+ table2,
+ partition_key,
shard1,
shard2,
data,
}
}
+
+ fn arbitrary_write_with_seq_num_at_time(
+ sequence_number: i64,
+ timestamp: i64,
+ partition_key: &PartitionKey,
+ shard_index: ShardIndex,
+ namespace_id: NamespaceId,
+ table: &Table,
+ ) -> DmlWrite {
+ make_write_op(
+ partition_key,
+ shard_index,
+ namespace_id,
+ &table.name,
+ table.id,
+ sequence_number,
+ &format!(
+ "{} foo=1 {}\n{} foo=2 {}",
+ table.name,
+ timestamp,
+ table.name,
+ timestamp + 10
+ ),
+ )
+ }
+
+ fn arbitrary_write_with_seq_num(&self, table: &Table, sequence_number: i64) -> DmlWrite {
+ Self::arbitrary_write_with_seq_num_at_time(
+ sequence_number,
+ 10,
+ &self.partition_key,
+ self.shard1.shard_index,
+ self.namespace.id,
+ table,
+ )
+ }
+
+ async fn persist_data(&self, table: &Table) {
+ let partition_id = {
+ let sd = self.data.shards.get(&self.shard1.id).unwrap();
+ let n = sd.namespace(self.namespace.id).unwrap();
+ let t = n.table(table.id).unwrap();
+ let p = t
+ .get_partition_by_key(&self.partition_key)
+ .unwrap()
+ .lock()
+ .partition_id();
+ p
+ };
+
+ self.data
+ .persist(self.shard1.id, self.namespace.id, table.id, partition_id)
+ .await;
+ }
}
#[tokio::test]
async fn buffer_write_updates_lifecycle_manager_indicates_pause() {
test_helpers::maybe_start_logging();
- let TestContext {
- metrics,
- catalog,
- namespace,
- schema,
- shard1: shard,
- data,
- ..
- } = TestContext::new().await;
- let mut repos = catalog.repositories().await;
+ let ctx = TestContext::new().await;
+ let shard = &ctx.shard1;
- let ignored_ts = Time::from_timestamp_millis(42).unwrap();
+ let w1 = ctx.arbitrary_write_with_seq_num(&ctx.table1, 1);
- let batch = lines_to_batches("mem foo=1 10", 0).unwrap();
- let w1 = DmlWrite::new(
- namespace.id,
- batch.clone(),
- build_id_map(repos.deref_mut(), namespace.id, &batch).await,
- "1970-01-01".into(),
- DmlMeta::sequenced(
- Sequence::new(ShardIndex::new(1), SequenceNumber::new(1)),
- ignored_ts,
- None,
- 50,
- ),
- );
-
- let _ = validate_or_insert_schema(w1.tables(), &schema, repos.deref_mut())
- .await
- .unwrap()
- .unwrap();
-
- std::mem::drop(repos);
let pause_size = w1.size() + 1;
let manager = LifecycleManager::new(
LifecycleConfig::new(
@@ -834,30 +907,21 @@ mod tests {
Duration::from_secs(1),
1000000,
),
- metrics,
+ Arc::clone(&ctx.metrics),
Arc::new(SystemProvider::new()),
);
- let action = data
- .buffer_operation(shard.id, DmlOperation::Write(w1.clone()), &manager.handle())
+
+ let action = ctx
+ .data
+ .buffer_operation(shard.id, DmlOperation::Write(w1), &manager.handle())
.await
.unwrap();
assert_matches!(action, DmlApplyAction::Applied(false));
- let batch = lines_to_batches("mem foo=1 10", 0).unwrap();
- let w2 = DmlWrite::new(
- namespace.id,
- batch.clone(),
- build_id_map(&mut *catalog.repositories().await, namespace.id, &batch).await,
- "1970-01-01".into(),
- DmlMeta::sequenced(
- Sequence::new(ShardIndex::new(1), SequenceNumber::new(2)),
- ignored_ts,
- None,
- 50,
- ),
- );
+ let w2 = ctx.arbitrary_write_with_seq_num(&ctx.table1, 2);
- let action = data
+ let action = ctx
+ .data
.buffer_operation(shard.id, DmlOperation::Write(w2), &manager.handle())
.await
.unwrap();
@@ -867,39 +931,11 @@ mod tests {
#[tokio::test]
async fn persist_row_count_trigger() {
test_helpers::maybe_start_logging();
- let TestContext {
- metrics,
- catalog,
- object_store,
- namespace,
- schema,
- shard1: shard,
- data,
- ..
- } = TestContext::new().await;
-
- let mut repos = catalog.repositories().await;
-
- let batch = lines_to_batches("mem foo=1 10\nmem foo=1 11", 0).unwrap();
- let w1 = DmlWrite::new(
- namespace.id,
- batch.clone(),
- build_id_map(repos.deref_mut(), namespace.id, &batch).await,
- "1970-01-01".into(),
- DmlMeta::sequenced(
- Sequence::new(ShardIndex::new(1), SequenceNumber::new(1)),
- Time::from_timestamp_millis(42).unwrap(),
- None,
- 50,
- ),
- );
- let _schema = validate_or_insert_schema(w1.tables(), &schema, repos.deref_mut())
- .await
- .unwrap()
- .unwrap();
- // drop repos so the mem catalog won't deadlock.
- std::mem::drop(repos);
+ let ctx = TestContext::new().await;
+ let shard = &ctx.shard1;
+
+ let w1 = ctx.arbitrary_write_with_seq_num(&ctx.table1, 1);
let manager = LifecycleManager::new(
LifecycleConfig::new(
@@ -910,11 +946,12 @@ mod tests {
Duration::from_secs(1),
1, // This row count will be hit
),
- Arc::clone(&metrics),
+ Arc::clone(&ctx.metrics),
Arc::new(SystemProvider::new()),
);
- let action = data
+ let action = ctx
+ .data
.buffer_operation(shard.id, DmlOperation::Write(w1), &manager.handle())
.await
.unwrap();
@@ -922,33 +959,11 @@ mod tests {
// limits)
assert_matches!(action, DmlApplyAction::Applied(false));
- let table_id = catalog
- .repositories()
- .await
- .tables()
- .get_by_namespace_and_name(namespace.id, "mem")
- .await
- .unwrap()
- .unwrap()
- .id;
-
- let partition_id = {
- let sd = data.shards.get(&shard.id).unwrap();
- let n = sd.namespace(namespace.id).unwrap();
- let mem_table = n.table(table_id).unwrap();
- let p = mem_table
- .get_partition_by_key(&"1970-01-01".into())
- .unwrap()
- .lock()
- .partition_id();
- p
- };
-
- data.persist(shard.id, namespace.id, table_id, partition_id)
- .await;
+ ctx.persist_data(&ctx.table1).await;
// verify that a file got put into object store
- let file_paths: Vec<_> = object_store
+ let file_paths: Vec<_> = ctx
+ .object_store
.list(None)
.await
.unwrap()
@@ -961,72 +976,17 @@ mod tests {
#[tokio::test]
async fn persist() {
test_helpers::maybe_start_logging();
- let TestContext {
- metrics,
- catalog,
- object_store,
- namespace,
- schema,
- shard1,
- shard2,
- data,
- } = TestContext::new().await;
-
- let mut repos = catalog.repositories().await;
-
- let ignored_ts = Time::from_timestamp_millis(42).unwrap();
-
- let batch = lines_to_batches("mem foo=1 10", 0).unwrap();
- let w1 = DmlWrite::new(
- namespace.id,
- batch.clone(),
- build_id_map(repos.deref_mut(), namespace.id, &batch).await,
- "1970-01-01".into(),
- DmlMeta::sequenced(
- Sequence::new(ShardIndex::new(1), SequenceNumber::new(1)),
- ignored_ts,
- None,
- 50,
- ),
- );
- let schema = validate_or_insert_schema(w1.tables(), &schema, repos.deref_mut())
- .await
- .unwrap()
- .unwrap();
-
- let batch = lines_to_batches("cpu foo=1 10", 1).unwrap();
- let w2 = DmlWrite::new(
- namespace.id,
- batch.clone(),
- build_id_map(repos.deref_mut(), namespace.id, &batch).await,
- "1970-01-01".into(),
- DmlMeta::sequenced(
- Sequence::new(ShardIndex::new(2), SequenceNumber::new(1)),
- ignored_ts,
- None,
- 50,
- ),
- );
- let _ = validate_or_insert_schema(w2.tables(), &schema, repos.deref_mut())
- .await
- .unwrap()
- .unwrap();
-
- // drop repos so the mem catalog won't deadlock.
- std::mem::drop(repos);
- let batch = lines_to_batches("mem foo=1 30", 2).unwrap();
- let w3 = DmlWrite::new(
- namespace.id,
- batch.clone(),
- build_id_map(&mut *catalog.repositories().await, namespace.id, &batch).await,
- "1970-01-01".into(),
- DmlMeta::sequenced(
- Sequence::new(ShardIndex::new(1), SequenceNumber::new(2)),
- ignored_ts,
- None,
- 50,
- ),
- );
+ let ctx = TestContext::new().await;
+ let namespace = &ctx.namespace;
+ let shard1 = &ctx.shard1;
+ let shard2 = &ctx.shard2;
+ let data = &ctx.data;
+
+ let w1 = ctx.arbitrary_write_with_seq_num(&ctx.table1, 1);
+ // different table as w1, same sequence number
+ let w2 = ctx.arbitrary_write_with_seq_num(&ctx.table2, 1);
+ // same table as w1, next sequence number
+ let w3 = ctx.arbitrary_write_with_seq_num(&ctx.table1, 2);
let manager = LifecycleManager::new(
LifecycleConfig::new(
@@ -1037,7 +997,7 @@ mod tests {
Duration::from_secs(1),
1000000,
),
- Arc::clone(&metrics),
+ Arc::clone(&ctx.metrics),
Arc::new(SystemProvider::new()),
);
@@ -1054,23 +1014,13 @@ mod tests {
let expected_progress = ShardProgress::new()
.with_buffered(SequenceNumber::new(1))
.with_buffered(SequenceNumber::new(2));
- assert_progress(&data, shard1.shard_index, expected_progress).await;
-
- let table_id = catalog
- .repositories()
- .await
- .tables()
- .get_by_namespace_and_name(namespace.id, "mem")
- .await
- .unwrap()
- .unwrap()
- .id;
+ assert_progress(data, shard1.shard_index, expected_progress).await;
let sd = data.shards.get(&shard1.id).unwrap();
let n = sd.namespace(namespace.id).unwrap();
let partition_id;
{
- let mem_table = n.table(table_id).unwrap();
+ let mem_table = n.table(ctx.table1.id).unwrap();
let p = mem_table
.get_partition_by_key(&"1970-01-01".into())
@@ -1079,7 +1029,7 @@ mod tests {
}
{
// verify the partition doesn't have a sort key before any data has been persisted
- let mut repos = catalog.repositories().await;
+ let mut repos = ctx.catalog.repositories().await;
let partition_info = repos
.partitions()
.get_by_id(partition_id)
@@ -1089,11 +1039,12 @@ mod tests {
assert!(partition_info.sort_key.is_empty());
}
- data.persist(shard1.id, namespace.id, table_id, partition_id)
+ data.persist(shard1.id, namespace.id, ctx.table1.id, partition_id)
.await;
// verify that a file got put into object store
- let file_paths: Vec<_> = object_store
+ let file_paths: Vec<_> = ctx
+ .object_store
.list(None)
.await
.unwrap()
@@ -1102,7 +1053,7 @@ mod tests {
.unwrap();
assert_eq!(file_paths.len(), 1);
- let mut repos = catalog.repositories().await;
+ let mut repos = ctx.catalog.repositories().await;
// verify it put the record in the catalog
let parquet_files = repos
.parquet_files()
@@ -1112,9 +1063,7 @@ mod tests {
assert_eq!(parquet_files.len(), 1);
let pf = parquet_files.first().unwrap();
assert_eq!(pf.partition_id, partition_id);
- assert_eq!(pf.table_id, table_id);
- assert_eq!(pf.min_time, Timestamp::new(10));
- assert_eq!(pf.max_time, Timestamp::new(30));
+ assert_eq!(pf.table_id, ctx.table1.id);
assert_eq!(pf.max_sequence_number, SequenceNumber::new(2));
assert_eq!(pf.shard_id, shard1.id);
assert!(pf.to_delete.is_none());
@@ -1143,7 +1092,7 @@ mod tests {
.unwrap()
.namespace(namespace.id)
.unwrap()
- .table(table_id)
+ .table(ctx.table1.id)
.unwrap()
.get_partition(partition_id)
.unwrap()
@@ -1176,7 +1125,8 @@ mod tests {
);
// verify metrics
- let persisted_file_size_bytes: Metric<U64Histogram> = metrics
+ let persisted_file_size_bytes: Metric<U64Histogram> = ctx
+ .metrics
.get_instrument("ingester_persisted_file_size_bytes")
.unwrap();
@@ -1196,7 +1146,7 @@ mod tests {
// Only the < 500 KB bucket has a count
assert_eq!(buckets_with_counts, &[500 * 1024]);
- let mem_table = n.table(table_id).unwrap();
+ let mem_table = n.table(ctx.table1.id).unwrap();
// verify that the parquet_max_sequence_number got updated
assert_eq!(
@@ -1212,65 +1162,20 @@ mod tests {
let expected_progress = ShardProgress::new()
.with_buffered(SequenceNumber::new(1))
.with_persisted(SequenceNumber::new(2));
- assert_progress(&data, shard1.shard_index, expected_progress).await;
+ assert_progress(data, shard1.shard_index, expected_progress).await;
}
#[tokio::test]
async fn partial_write_progress() {
test_helpers::maybe_start_logging();
- let TestContext {
- metrics,
- catalog,
- namespace,
- schema,
- shard1,
- data,
- ..
- } = TestContext::new().await;
-
- let mut repos = catalog.repositories().await;
-
- let ignored_ts = Time::from_timestamp_millis(42).unwrap();
-
- // write with sequence number 1
- let batch = lines_to_batches("mem foo=1 10", 0).unwrap();
- let w1 = DmlWrite::new(
- namespace.id,
- batch.clone(),
- build_id_map(repos.deref_mut(), namespace.id, &batch).await,
- "1970-01-01".into(),
- DmlMeta::sequenced(
- Sequence::new(ShardIndex::new(1), SequenceNumber::new(1)),
- ignored_ts,
- None,
- 50,
- ),
- );
- let _ = validate_or_insert_schema(w1.tables(), &schema, repos.deref_mut())
- .await
- .unwrap()
- .unwrap();
+ let ctx = TestContext::new().await;
+ let namespace = &ctx.namespace;
+ let shard1 = &ctx.shard1;
+ let data = &ctx.data;
+ let w1 = ctx.arbitrary_write_with_seq_num(&ctx.table1, 1);
// write with sequence number 2
- let batch = lines_to_batches("mem foo=1 30\ncpu bar=1 20", 0).unwrap();
- let w2 = DmlWrite::new(
- namespace.id,
- batch.clone(),
- build_id_map(repos.deref_mut(), namespace.id, &batch).await,
- "1970-01-01".into(),
- DmlMeta::sequenced(
- Sequence::new(ShardIndex::new(1), SequenceNumber::new(2)),
- ignored_ts,
- None,
- 50,
- ),
- );
- let _ = validate_or_insert_schema(w2.tables(), &schema, repos.deref_mut())
- .await
- .unwrap()
- .unwrap();
-
- drop(repos); // release catalog transaction
+ let w2 = ctx.arbitrary_write_with_seq_num(&ctx.table1, 2);
let manager = LifecycleManager::new(
LifecycleConfig::new(
@@ -1281,7 +1186,7 @@ mod tests {
Duration::from_secs(1),
1000000,
),
- metrics,
+ Arc::clone(&ctx.metrics),
Arc::new(SystemProvider::new()),
);
@@ -1295,16 +1200,17 @@ mod tests {
let n = sd.namespace(namespace.id).unwrap();
let expected_progress = ShardProgress::new().with_buffered(SequenceNumber::new(1));
- assert_progress(&data, shard1.shard_index, expected_progress).await;
+ assert_progress(data, shard1.shard_index, expected_progress).await;
// configure the the namespace to wait after each insert.
n.test_triggers.enable_pause_after_write().await;
// now, buffer operation 2 which has two tables,
- let captured_data = Arc::clone(&data);
+ let captured_data = Arc::clone(data);
+ let shard1_id = shard1.id;
let task = tokio::task::spawn(async move {
captured_data
- .buffer_operation(shard1.id, DmlOperation::Write(w2), &manager.handle())
+ .buffer_operation(shard1_id, DmlOperation::Write(w2), &manager.handle())
.await
.unwrap();
});
@@ -1316,7 +1222,7 @@ mod tests {
let expected_progress = ShardProgress::new()
// sequence 2 hasn't been buffered yet
.with_buffered(SequenceNumber::new(1));
- assert_progress(&data, shard1.shard_index, expected_progress).await;
+ assert_progress(data, shard1.shard_index, expected_progress).await;
// allow the write to complete
n.test_triggers.release_pause_after_write().await;
@@ -1326,71 +1232,18 @@ mod tests {
let expected_progress = ShardProgress::new()
.with_buffered(SequenceNumber::new(1))
.with_buffered(SequenceNumber::new(2));
- assert_progress(&data, shard1.shard_index, expected_progress).await;
+ assert_progress(data, shard1.shard_index, expected_progress).await;
}
#[tokio::test]
async fn buffer_deletes_updates_tombstone_watermark() {
- let metrics = Arc::new(metric::Registry::new());
- let catalog: Arc<dyn Catalog> = Arc::new(MemCatalog::new(Arc::clone(&metrics)));
- let mut repos = catalog.repositories().await;
- let topic = repos.topics().create_or_get("whatevs").await.unwrap();
- let query_pool = repos.query_pools().create_or_get("whatevs").await.unwrap();
- let shard_index = ShardIndex::new(0);
- let namespace = repos
- .namespaces()
- .create("foo", topic.id, query_pool.id)
- .await
- .unwrap();
- let shard1 = repos
- .shards()
- .create_or_get(&topic, shard_index)
- .await
- .unwrap();
- let shard_index = ShardIndex::new(0);
-
- let object_store: Arc<DynObjectStore> = Arc::new(InMemory::new());
-
- drop(repos); // test catalog deadlock
- let data = Arc::new(
- IngesterData::new(
- Arc::clone(&object_store),
- Arc::clone(&catalog),
- [(shard1.id, shard_index)],
- Arc::new(Executor::new(1)),
- BackoffConfig::default(),
- Arc::clone(&metrics),
- )
- .await
- .expect("failed to initialise ingester"),
- );
-
- let mut repos = catalog.repositories().await;
-
- let schema = NamespaceSchema::new(namespace.id, topic.id, query_pool.id, 100);
-
- let ignored_ts = Time::from_timestamp_millis(42).unwrap();
-
- let batch = lines_to_batches("mem foo=1 10", 0).unwrap();
- let w1 = DmlWrite::new(
- namespace.id,
- batch.clone(),
- build_id_map(repos.deref_mut(), namespace.id, &batch).await,
- "1970-01-01".into(),
- DmlMeta::sequenced(
- Sequence::new(ShardIndex::new(1), SequenceNumber::new(1)),
- ignored_ts,
- None,
- 50,
- ),
- );
+ test_helpers::maybe_start_logging();
+ let ctx = TestContext::new().await;
+ let shard1 = &ctx.shard1;
+ let data = &ctx.data;
- let _ = validate_or_insert_schema(w1.tables(), &schema, repos.deref_mut())
- .await
- .unwrap()
- .unwrap();
+ let w1 = ctx.arbitrary_write_with_seq_num(&ctx.table1, 1);
- std::mem::drop(repos);
let pause_size = w1.size() + 1;
let manager = LifecycleManager::new(
LifecycleConfig::new(
@@ -1401,25 +1254,23 @@ mod tests {
Duration::from_secs(1),
1000000,
),
- metrics,
+ Arc::clone(&ctx.metrics),
Arc::new(SystemProvider::new()),
);
- data.buffer_operation(
- shard1.id,
- DmlOperation::Write(w1.clone()),
- &manager.handle(),
- )
- .await
- .unwrap();
+
+ data.buffer_operation(shard1.id, DmlOperation::Write(w1), &manager.handle())
+ .await
+ .unwrap();
let predicate = DeletePredicate {
range: TimestampRange::new(1, 2),
exprs: vec![],
};
+ let ignored_ts = Time::from_timestamp_millis(42).unwrap();
let d1 = DmlDelete::new(
NamespaceId::new(42),
predicate,
- Some(NonEmptyString::new("mem").unwrap()),
+ Some(NonEmptyString::new(&ctx.table1.name).unwrap()),
DmlMeta::sequenced(
Sequence::new(ShardIndex::new(1), SequenceNumber::new(2)),
ignored_ts,
@@ -1445,28 +1296,4 @@ mod tests {
assert_eq!(progresses, expected_progresses);
}
-
- pub async fn build_id_map<R>(
- catalog: &mut R,
- namespace_id: NamespaceId,
- tables: &HashMap<String, MutableBatch>,
- ) -> HashMap<String, TableId>
- where
- R: RepoCollection + ?Sized,
- {
- let mut ret = HashMap::with_capacity(tables.len());
-
- for k in tables.keys() {
- let id = catalog
- .tables()
- .create_or_get(k, namespace_id)
- .await
- .expect("table should create OK")
- .id;
-
- ret.insert(k.clone(), id);
- }
-
- ret
- }
}
|
52ac1b97a9ac869ab38af20df5b7320e4ff54963
|
Dom Dwyer
|
2023-02-01 14:37:21
|
namespace retention protobuf mappings
|
Document that the caller can specify 0 or NULL for an infinite retention
period, and that IOx will respond with NULL.
Document that negative retention periods are rejected.
| null |
docs: namespace retention protobuf mappings
Document that the caller can specify 0 or NULL for an infinite retention
period, and that IOx will respond with NULL.
Document that negative retention periods are rejected.
|
diff --git a/generated_types/protos/influxdata/iox/namespace/v1/service.proto b/generated_types/protos/influxdata/iox/namespace/v1/service.proto
index e68f8a9925..83885eede1 100644
--- a/generated_types/protos/influxdata/iox/namespace/v1/service.proto
+++ b/generated_types/protos/influxdata/iox/namespace/v1/service.proto
@@ -27,7 +27,10 @@ message CreateNamespaceRequest {
// Name of the namespace to be created
string name = 1;
- // Retention period ns
+ // Retention period in nanoseconds.
+ //
+ // NULL means "infinite retention", and 0 is mapped to NULL. Negative values
+ // are rejected.
optional int64 retention_period_ns = 2;
}
@@ -47,7 +50,10 @@ message UpdateNamespaceRetentionRequest {
// Name of the namespace to be set
string name = 1;
- // Retention period ns
+ // Retention period in nanoseconds.
+ //
+ // NULL means "infinite retention", and 0 is mapped to NULL. Negative values
+ // are rejected.
optional int64 retention_period_ns = 2;
}
@@ -62,6 +68,8 @@ message Namespace {
// Name of the Namespace
string name = 2;
- // Retention period ns
+ // Retention period in nanoseconds.
+ //
+ // NULL means "infinite retention".
optional int64 retention_period_ns = 3;
}
|
ce77d3bd740629ab41ee12d5f403628c66621b52
|
Andrew Lamb
|
2023-03-24 16:20:28
|
Build `GetTableTypes` response directly from the catalog (#7324)
|
* refactor(flightsql): Do not use plan for GetTableTypes
* test: Add an end to end test
| null |
refactor(flightsql): Build `GetTableTypes` response directly from the catalog (#7324)
* refactor(flightsql): Do not use plan for GetTableTypes
* test: Add an end to end test
|
diff --git a/flightsql/src/planner.rs b/flightsql/src/planner.rs
index e080abcf89..7d7448cbb3 100644
--- a/flightsql/src/planner.rs
+++ b/flightsql/src/planner.rs
@@ -2,7 +2,7 @@
use std::sync::Arc;
use arrow::{
- array::{as_string_array, ArrayRef, BinaryArray, GenericBinaryBuilder},
+ array::{as_string_array, ArrayRef, BinaryArray, GenericBinaryBuilder, StringArray},
datatypes::{DataType, Field, Schema, SchemaRef},
error::ArrowError,
ipc::writer::IpcWriteOptions,
@@ -558,16 +558,22 @@ static GET_TABLES_SCHEMA_WITH_TABLE_SCHEMA: Lazy<SchemaRef> = Lazy::new(|| {
});
/// Return a `LogicalPlan` for GetTableTypes
-///
-/// In the future this could be made more efficient by building the
-/// response directly from the IOx catalog rather than running an
-/// entire DataFusion plan.
async fn plan_get_table_types(ctx: &IOxSessionContext) -> Result<LogicalPlan> {
- let query = "SELECT DISTINCT table_type FROM information_schema.tables ORDER BY table_type";
-
- Ok(ctx.sql_to_logical_plan(query).await?)
+ Ok(ctx.batch_to_logical_plan(TABLE_TYPES_RECORD_BATCH.clone())?)
}
/// The schema for GetTableTypes
-static GET_TABLE_TYPE_SCHEMA: Lazy<Schema> =
- Lazy::new(|| Schema::new(vec![Field::new("table_type", DataType::Utf8, false)]));
+static GET_TABLE_TYPE_SCHEMA: Lazy<SchemaRef> = Lazy::new(|| {
+ Arc::new(Schema::new(vec![Field::new(
+ "table_type",
+ DataType::Utf8,
+ false,
+ )]))
+});
+
+static TABLE_TYPES_RECORD_BATCH: Lazy<RecordBatch> = Lazy::new(|| {
+ // https://github.com/apache/arrow-datafusion/blob/26b8377b0690916deacf401097d688699026b8fb/datafusion/core/src/catalog/information_schema.rs#L285-L287
+ // IOx doesn't support LOCAL TEMPORARY yet
+ let table_type = Arc::new(StringArray::from_iter_values(["BASE TABLE", "VIEW"])) as ArrayRef;
+ RecordBatch::try_new(Arc::clone(&GET_TABLE_TYPE_SCHEMA), vec![table_type]).unwrap()
+});
diff --git a/influxdb_iox/tests/end_to_end_cases/flightsql.rs b/influxdb_iox/tests/end_to_end_cases/flightsql.rs
index 605bf11953..1762e04190 100644
--- a/influxdb_iox/tests/end_to_end_cases/flightsql.rs
+++ b/influxdb_iox/tests/end_to_end_cases/flightsql.rs
@@ -488,6 +488,70 @@ async fn flightsql_get_table_types() {
.await
}
+#[tokio::test]
+async fn flightsql_get_table_types_matches_information_schema() {
+ test_helpers::maybe_start_logging();
+ let database_url = maybe_skip_integration!();
+
+ let table_name = "the_table";
+
+ // Set up the cluster ====================================
+ let mut cluster = MiniCluster::create_shared2(database_url).await;
+
+ StepTest::new(
+ &mut cluster,
+ vec![
+ Step::WriteLineProtocol(format!(
+ "{table_name},tag1=A,tag2=B val=42i 123456\n\
+ {table_name},tag1=A,tag2=C val=43i 123457"
+ )),
+ Step::Custom(Box::new(move |state: &mut StepTestState| {
+ async move {
+ let mut client = flightsql_client(state.cluster());
+
+ // output of get_table_types is built manually in
+ // IOx, so it is important it remains in sync with
+ // the actual contents of the information schema
+ fn no_filter() -> Option<String> {
+ None
+ }
+ let stream = client
+ .get_table_types()
+ .await
+ .unwrap();
+ let get_table_types_batches = collect_stream(stream).await;
+ let get_table_types_output = batches_to_sorted_lines(&get_table_types_batches);
+
+ let sql = "SELECT DISTINCT table_type FROM information_schema.tables ORDER BY table_type";
+
+ let stream = client.query(sql).await.unwrap();
+ let information_schema_batches = collect_stream(stream).await;
+ let information_schema_output =
+ batches_to_sorted_lines(&information_schema_batches);
+
+ insta::assert_yaml_snapshot!(
+ get_table_types_output,
+ @r###"
+ ---
+ - +------------+
+ - "| table_type |"
+ - +------------+
+ - "| BASE TABLE |"
+ - "| VIEW |"
+ - +------------+
+ "###
+ );
+
+ assert_eq!(get_table_types_output, information_schema_output);
+ }
+ .boxed()
+ })),
+ ],
+ )
+ .run()
+ .await
+}
+
#[tokio::test]
async fn flightsql_get_db_schemas() {
test_helpers::maybe_start_logging();
diff --git a/influxdb_iox_client/src/client/flightsql.rs b/influxdb_iox_client/src/client/flightsql.rs
index cf59aba68b..717c05872b 100644
--- a/influxdb_iox_client/src/client/flightsql.rs
+++ b/influxdb_iox_client/src/client/flightsql.rs
@@ -120,8 +120,13 @@ impl FlightSqlClient {
/// Step 2: Fetch the results described in the [`FlightInfo`]
///
/// This implementation does not support alternate endpoints
- pub async fn query(&mut self, query: String) -> Result<FlightRecordBatchStream> {
- let msg = CommandStatementQuery { query };
+ pub async fn query(
+ &mut self,
+ query: impl Into<String> + Send,
+ ) -> Result<FlightRecordBatchStream> {
+ let msg = CommandStatementQuery {
+ query: query.into(),
+ };
self.do_get_with_cmd(msg.as_any()).await
}
|
02794617386f30ea467a543adb72cdcacb25bf7e
|
Trevor Hilton
|
2024-07-16 10:10:47
|
hook up last cache to query executor using DataFusion traits (#25143)
|
* feat: impl datafusion traits on last cache
Created a new module for the DataFusion table function implementations.
The TableProvider impl for LastCache was moved there, and new code that
implements the TableFunctionImpl trait to make the last cache queryable
was also written.
The LastCacheProvider and LastCache were augmented to make this work:
- The provider stores an Arc<LastCache> instead of a LastCache
- The LastCache uses interior mutability via an RwLock, to make the above
possible.
* feat: register last_cache UDTF on query context
* refactor: make server accept listener instead of socket addr
The server used to accept a socket address and bind it directly, returning
error if the bind fails.
This commit changes that so the ServerBuilder accepts a TcpListener. The
behaviour is essentially the same, but this allows us to bind the address
from tests when instantiating the server, so we can easily assign unused
ports.
Tests in the influxdb3_server were updated to exploit this in order to
use port 0 auto assignment and stop flaky test failures.
A new, failing, test was also added to that module for the last cache.
* refactor: naive implementation of last cache key columns
Committing here as the last cache is in a working state, but it is naively
implemented as it just stores all key columns again (still with the hierarchy)
* refactor: make the last cache work with the query executor
* chore: fix my own feedback and appease clippy
* refactor: remove lower lock in last cache
* chore: cargo update
* refactor: rename function
* fix: broken doc comment
| null |
feat: hook up last cache to query executor using DataFusion traits (#25143)
* feat: impl datafusion traits on last cache
Created a new module for the DataFusion table function implementations.
The TableProvider impl for LastCache was moved there, and new code that
implements the TableFunctionImpl trait to make the last cache queryable
was also written.
The LastCacheProvider and LastCache were augmented to make this work:
- The provider stores an Arc<LastCache> instead of a LastCache
- The LastCache uses interior mutability via an RwLock, to make the above
possible.
* feat: register last_cache UDTF on query context
* refactor: make server accept listener instead of socket addr
The server used to accept a socket address and bind it directly, returning
error if the bind fails.
This commit changes that so the ServerBuilder accepts a TcpListener. The
behaviour is essentially the same, but this allows us to bind the address
from tests when instantiating the server, so we can easily assign unused
ports.
Tests in the influxdb3_server were updated to exploit this in order to
use port 0 auto assignment and stop flaky test failures.
A new, failing, test was also added to that module for the last cache.
* refactor: naive implementation of last cache key columns
Committing here as the last cache is in a working state, but it is naively
implemented as it just stores all key columns again (still with the hierarchy)
* refactor: make the last cache work with the query executor
* chore: fix my own feedback and appease clippy
* refactor: remove lower lock in last cache
* chore: cargo update
* refactor: rename function
* fix: broken doc comment
|
diff --git a/Cargo.lock b/Cargo.lock
index 6f820f89a3..2c32bebc98 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -469,7 +469,7 @@ checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.70",
+ "syn 2.0.71",
]
[[package]]
@@ -480,7 +480,7 @@ checksum = "6e0c28dcc82d7c8ead5cb13beb15405b57b8546e93215673ff8ca0349a028107"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.70",
+ "syn 2.0.71",
]
[[package]]
@@ -653,9 +653,9 @@ dependencies = [
[[package]]
name = "blake3"
-version = "1.5.1"
+version = "1.5.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "30cca6d3674597c30ddf2c587bf8d9d65c9a84d2326d941cc79c9842dfe0ef52"
+checksum = "e9ec96fe9a81b5e365f9db71fe00edc4fe4ca2cc7dcb7861f0603012a7caa210"
dependencies = [
"arrayref",
"arrayvec",
@@ -719,9 +719,9 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b"
[[package]]
name = "bytes"
-version = "1.6.0"
+version = "1.6.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "514de17de45fdb8dc022b1a7975556c53c86f9f0aa5f534b98977b171857c2c9"
+checksum = "a12916984aab3fa6e39d655a33e09c0071eb36d6ab3aea5c2d78551f1df6d952"
[[package]]
name = "bzip2"
@@ -766,13 +766,12 @@ dependencies = [
[[package]]
name = "cc"
-version = "1.1.0"
+version = "1.1.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "eaff6f8ce506b9773fa786672d63fc7a191ffea1be33f72bbd4aeacefca9ffc8"
+checksum = "324c74f2155653c90b04f25b2a47a8a631360cb908f92a772695f430c7e31052"
dependencies = [
"jobserver",
"libc",
- "once_cell",
]
[[package]]
@@ -887,7 +886,7 @@ dependencies = [
"heck 0.5.0",
"proc-macro2",
"quote",
- "syn 2.0.70",
+ "syn 2.0.71",
]
[[package]]
@@ -1178,7 +1177,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.70",
+ "syn 2.0.71",
]
[[package]]
@@ -1202,7 +1201,7 @@ dependencies = [
"proc-macro2",
"quote",
"strsim",
- "syn 2.0.70",
+ "syn 2.0.71",
]
[[package]]
@@ -1213,7 +1212,7 @@ checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806"
dependencies = [
"darling_core",
"quote",
- "syn 2.0.70",
+ "syn 2.0.71",
]
[[package]]
@@ -1900,7 +1899,7 @@ checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.70",
+ "syn 2.0.71",
]
[[package]]
@@ -2164,9 +2163,9 @@ dependencies = [
[[package]]
name = "http-body"
-version = "1.0.0"
+version = "1.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1cac85db508abc24a2e48553ba12a996e87244a0395ce011e62b37158745d643"
+checksum = "1efedce1fb8e6913f23e0c92de8e62cd5b772a67e7b3946df930a62566c93184"
dependencies = [
"bytes",
"http 1.1.0",
@@ -2181,7 +2180,7 @@ dependencies = [
"bytes",
"futures-util",
"http 1.1.0",
- "http-body 1.0.0",
+ "http-body 1.0.1",
"pin-project-lite",
]
@@ -2238,7 +2237,7 @@ dependencies = [
"futures-util",
"h2 0.4.5",
"http 1.1.0",
- "http-body 1.0.0",
+ "http-body 1.0.1",
"httparse",
"itoa",
"pin-project-lite",
@@ -2301,7 +2300,7 @@ dependencies = [
"futures-channel",
"futures-util",
"http 1.1.0",
- "http-body 1.0.0",
+ "http-body 1.0.1",
"hyper 1.4.1",
"pin-project-lite",
"socket2",
@@ -3529,7 +3528,7 @@ checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8"
dependencies = [
"cfg-if",
"libc",
- "redox_syscall 0.5.2",
+ "redox_syscall 0.5.3",
"smallvec",
"windows-targets 0.52.6",
]
@@ -3731,7 +3730,7 @@ checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.70",
+ "syn 2.0.71",
]
[[package]]
@@ -3847,7 +3846,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5f12335488a2f3b0a83b14edad48dca9879ce89b2edd10e80237e4e852dd645e"
dependencies = [
"proc-macro2",
- "syn 2.0.70",
+ "syn 2.0.71",
]
[[package]]
@@ -3942,7 +3941,7 @@ dependencies = [
"prost 0.12.6",
"prost-types 0.12.6",
"regex",
- "syn 2.0.70",
+ "syn 2.0.71",
"tempfile",
]
@@ -3969,7 +3968,7 @@ dependencies = [
"itertools 0.12.1",
"proc-macro2",
"quote",
- "syn 2.0.70",
+ "syn 2.0.71",
]
[[package]]
@@ -4148,9 +4147,9 @@ dependencies = [
[[package]]
name = "redox_syscall"
-version = "0.5.2"
+version = "0.5.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c82cf8cff14456045f55ec4241383baeff27af886adb72ffb2162f99911de0fd"
+checksum = "2a908a6e00f1fdd0dfd9c0eb08ce85126f6d8bbda50017e74bc4a4b7d4a926a4"
dependencies = [
"bitflags 2.6.0",
]
@@ -4257,7 +4256,7 @@ dependencies = [
"futures-util",
"h2 0.4.5",
"http 1.1.0",
- "http-body 1.0.0",
+ "http-body 1.0.1",
"http-body-util",
"hyper 1.4.1",
"hyper-rustls 0.27.2",
@@ -4553,9 +4552,9 @@ dependencies = [
[[package]]
name = "security-framework"
-version = "2.11.0"
+version = "2.11.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c627723fd09706bacdb5cf41499e95098555af3c3c29d014dc3c458ef6be11c0"
+checksum = "897b2245f0b511c87893af39b033e5ca9cce68824c4d7e7630b5a1d339658d02"
dependencies = [
"bitflags 2.6.0",
"core-foundation",
@@ -4566,9 +4565,9 @@ dependencies = [
[[package]]
name = "security-framework-sys"
-version = "2.11.0"
+version = "2.11.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "317936bbbd05227752583946b9e66d7ce3b489f84e11a94a510b4437fef407d7"
+checksum = "75da29fe9b9b08fe9d6b22b5b4bcbc75d8db3aa31e639aa56bb62e9d46bfceaf"
dependencies = [
"core-foundation-sys",
"libc",
@@ -4603,7 +4602,7 @@ checksum = "e0cd7e117be63d3c3678776753929474f3b04a43a080c744d6b0ae2a8c28e222"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.70",
+ "syn 2.0.71",
]
[[package]]
@@ -4631,9 +4630,9 @@ dependencies = [
[[package]]
name = "serde_with"
-version = "3.8.3"
+version = "3.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e73139bc5ec2d45e6c5fd85be5a46949c1c39a4c18e56915f5eb4c12f975e377"
+checksum = "69cecfa94848272156ea67b2b1a53f20fc7bc638c4a46d2f8abde08f05f4b857"
dependencies = [
"base64 0.22.1",
"chrono",
@@ -4649,14 +4648,14 @@ dependencies = [
[[package]]
name = "serde_with_macros"
-version = "3.8.3"
+version = "3.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b80d3d6b56b64335c0180e5ffde23b3c5e08c14c585b51a15bd0e95393f46703"
+checksum = "a8fee4991ef4f274617a51ad4af30519438dacb2f56ac773b08a1922ff743350"
dependencies = [
"darling",
"proc-macro2",
"quote",
- "syn 2.0.70",
+ "syn 2.0.71",
]
[[package]]
@@ -4826,7 +4825,7 @@ dependencies = [
"heck 0.5.0",
"proc-macro2",
"quote",
- "syn 2.0.70",
+ "syn 2.0.71",
]
[[package]]
@@ -4892,7 +4891,7 @@ checksum = "01b2e185515564f15375f593fb966b5718bc624ba77fe49fa4616ad619690554"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.70",
+ "syn 2.0.71",
]
[[package]]
@@ -5149,7 +5148,7 @@ dependencies = [
"proc-macro2",
"quote",
"rustversion",
- "syn 2.0.70",
+ "syn 2.0.71",
]
[[package]]
@@ -5171,9 +5170,9 @@ dependencies = [
[[package]]
name = "syn"
-version = "2.0.70"
+version = "2.0.71"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2f0209b68b3613b093e0ec905354eccaedcfe83b8cb37cbdeae64026c3064c16"
+checksum = "b146dcf730474b4bcd16c311627b31ede9ab149045db4d6088b3becaea046462"
dependencies = [
"proc-macro2",
"quote",
@@ -5268,22 +5267,22 @@ dependencies = [
[[package]]
name = "thiserror"
-version = "1.0.61"
+version = "1.0.62"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c546c80d6be4bc6a00c0f01730c08df82eaa7a7a61f11d656526506112cc1709"
+checksum = "f2675633b1499176c2dff06b0856a27976a8f9d436737b4cf4f312d4d91d8bbb"
dependencies = [
"thiserror-impl",
]
[[package]]
name = "thiserror-impl"
-version = "1.0.61"
+version = "1.0.62"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "46c3384250002a6d5af4d114f2845d37b57521033f30d5c3f46c4d70e1197533"
+checksum = "d20468752b09f49e909e55a5d338caa8bedf615594e9d80bc4c565d30faf798c"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.70",
+ "syn 2.0.71",
]
[[package]]
@@ -5442,7 +5441,7 @@ checksum = "5f5ae998a069d4b5aba8ee9dad856af7d520c3699e6159b185c2acd48155d39a"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.70",
+ "syn 2.0.71",
]
[[package]]
@@ -5593,7 +5592,7 @@ dependencies = [
"proc-macro2",
"prost-build",
"quote",
- "syn 2.0.70",
+ "syn 2.0.71",
]
[[package]]
@@ -5726,7 +5725,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.70",
+ "syn 2.0.71",
]
[[package]]
@@ -6042,7 +6041,7 @@ dependencies = [
"once_cell",
"proc-macro2",
"quote",
- "syn 2.0.70",
+ "syn 2.0.71",
"wasm-bindgen-shared",
]
@@ -6076,7 +6075,7 @@ checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.70",
+ "syn 2.0.71",
"wasm-bindgen-backend",
"wasm-bindgen-shared",
]
@@ -6428,7 +6427,7 @@ dependencies = [
"strum",
"subtle",
"syn 1.0.109",
- "syn 2.0.70",
+ "syn 2.0.71",
"thrift",
"tokio",
"tokio-stream",
@@ -6482,7 +6481,7 @@ checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.70",
+ "syn 2.0.71",
]
[[package]]
@@ -6502,7 +6501,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.70",
+ "syn 2.0.71",
]
[[package]]
diff --git a/influxdb3/src/commands/serve.rs b/influxdb3/src/commands/serve.rs
index fcceada514..436372f088 100644
--- a/influxdb3/src/commands/serve.rs
+++ b/influxdb3/src/commands/serve.rs
@@ -31,6 +31,7 @@ use std::{
sync::Arc,
};
use thiserror::Error;
+use tokio::net::TcpListener;
use tokio_util::sync::CancellationToken;
use trace_exporters::TracingConfig;
use trace_http::ctx::TraceHeaderParser;
@@ -54,6 +55,9 @@ pub enum Error {
#[error("Error initializing tokio runtime: {0}")]
TokioRuntime(#[source] std::io::Error),
+ #[error("Failed to bind address")]
+ BindAddress(#[source] std::io::Error),
+
#[error("Server error: {0}")]
Server(#[from] influxdb3_server::Error),
@@ -266,12 +270,8 @@ pub async fn command(config: Config) -> Result<()> {
)
.with_jaeger_debug_name(config.tracing_config.traces_jaeger_debug_name);
- let common_state = CommonServerState::new(
- Arc::clone(&metrics),
- trace_exporter,
- trace_header_parser,
- *config.http_bind_address,
- )?;
+ let common_state =
+ CommonServerState::new(Arc::clone(&metrics), trace_exporter, trace_header_parser)?;
let persister = Arc::new(PersisterImpl::new(Arc::clone(&object_store)));
let wal: Option<Arc<WalImpl>> = config
.wal_directory
@@ -300,12 +300,17 @@ pub async fn command(config: Config) -> Result<()> {
config.query_log_size,
));
+ let listener = TcpListener::bind(*config.http_bind_address)
+ .await
+ .map_err(Error::BindAddress)?;
+
let builder = ServerBuilder::new(common_state)
.max_request_size(config.max_http_request_size)
.write_buffer(write_buffer)
.query_executor(query_executor)
.time_provider(time_provider)
- .persister(persister);
+ .persister(persister)
+ .tcp_listener(listener);
let server = if let Some(token) = config.bearer_token.map(hex::decode).transpose()? {
builder
diff --git a/influxdb3_server/src/builder.rs b/influxdb3_server/src/builder.rs
index d7000c4944..7a44648f07 100644
--- a/influxdb3_server/src/builder.rs
+++ b/influxdb3_server/src/builder.rs
@@ -1,21 +1,23 @@
use std::sync::Arc;
use authz::Authorizer;
+use tokio::net::TcpListener;
use crate::{auth::DefaultAuthorizer, http::HttpApi, CommonServerState, Server};
#[derive(Debug)]
-pub struct ServerBuilder<W, Q, P, T> {
+pub struct ServerBuilder<W, Q, P, T, L> {
common_state: CommonServerState,
time_provider: T,
max_request_size: usize,
write_buffer: W,
query_executor: Q,
persister: P,
+ listener: L,
authorizer: Arc<dyn Authorizer>,
}
-impl ServerBuilder<NoWriteBuf, NoQueryExec, NoPersister, NoTimeProvider> {
+impl ServerBuilder<NoWriteBuf, NoQueryExec, NoPersister, NoTimeProvider, NoListener> {
pub fn new(common_state: CommonServerState) -> Self {
Self {
common_state,
@@ -24,12 +26,13 @@ impl ServerBuilder<NoWriteBuf, NoQueryExec, NoPersister, NoTimeProvider> {
write_buffer: NoWriteBuf,
query_executor: NoQueryExec,
persister: NoPersister,
+ listener: NoListener,
authorizer: Arc::new(DefaultAuthorizer),
}
}
}
-impl<W, Q, P, T> ServerBuilder<W, Q, P, T> {
+impl<W, Q, P, T, L> ServerBuilder<W, Q, P, T, L> {
pub fn max_request_size(mut self, max_request_size: usize) -> Self {
self.max_request_size = max_request_size;
self
@@ -57,9 +60,13 @@ pub struct WithPersister<P>(Arc<P>);
pub struct NoTimeProvider;
#[derive(Debug)]
pub struct WithTimeProvider<T>(Arc<T>);
+#[derive(Debug)]
+pub struct NoListener;
+#[derive(Debug)]
+pub struct WithListener(TcpListener);
-impl<Q, P, T> ServerBuilder<NoWriteBuf, Q, P, T> {
- pub fn write_buffer<W>(self, wb: Arc<W>) -> ServerBuilder<WithWriteBuf<W>, Q, P, T> {
+impl<Q, P, T, L> ServerBuilder<NoWriteBuf, Q, P, T, L> {
+ pub fn write_buffer<W>(self, wb: Arc<W>) -> ServerBuilder<WithWriteBuf<W>, Q, P, T, L> {
ServerBuilder {
common_state: self.common_state,
time_provider: self.time_provider,
@@ -67,13 +74,14 @@ impl<Q, P, T> ServerBuilder<NoWriteBuf, Q, P, T> {
write_buffer: WithWriteBuf(wb),
query_executor: self.query_executor,
persister: self.persister,
+ listener: self.listener,
authorizer: self.authorizer,
}
}
}
-impl<W, P, T> ServerBuilder<W, NoQueryExec, P, T> {
- pub fn query_executor<Q>(self, qe: Arc<Q>) -> ServerBuilder<W, WithQueryExec<Q>, P, T> {
+impl<W, P, T, L> ServerBuilder<W, NoQueryExec, P, T, L> {
+ pub fn query_executor<Q>(self, qe: Arc<Q>) -> ServerBuilder<W, WithQueryExec<Q>, P, T, L> {
ServerBuilder {
common_state: self.common_state,
time_provider: self.time_provider,
@@ -81,13 +89,14 @@ impl<W, P, T> ServerBuilder<W, NoQueryExec, P, T> {
write_buffer: self.write_buffer,
query_executor: WithQueryExec(qe),
persister: self.persister,
+ listener: self.listener,
authorizer: self.authorizer,
}
}
}
-impl<W, Q, T> ServerBuilder<W, Q, NoPersister, T> {
- pub fn persister<P>(self, p: Arc<P>) -> ServerBuilder<W, Q, WithPersister<P>, T> {
+impl<W, Q, T, L> ServerBuilder<W, Q, NoPersister, T, L> {
+ pub fn persister<P>(self, p: Arc<P>) -> ServerBuilder<W, Q, WithPersister<P>, T, L> {
ServerBuilder {
common_state: self.common_state,
time_provider: self.time_provider,
@@ -95,13 +104,14 @@ impl<W, Q, T> ServerBuilder<W, Q, NoPersister, T> {
write_buffer: self.write_buffer,
query_executor: self.query_executor,
persister: WithPersister(p),
+ listener: self.listener,
authorizer: self.authorizer,
}
}
}
-impl<W, Q, P> ServerBuilder<W, Q, P, NoTimeProvider> {
- pub fn time_provider<T>(self, tp: Arc<T>) -> ServerBuilder<W, Q, P, WithTimeProvider<T>> {
+impl<W, Q, P, L> ServerBuilder<W, Q, P, NoTimeProvider, L> {
+ pub fn time_provider<T>(self, tp: Arc<T>) -> ServerBuilder<W, Q, P, WithTimeProvider<T>, L> {
ServerBuilder {
common_state: self.common_state,
time_provider: WithTimeProvider(tp),
@@ -109,13 +119,35 @@ impl<W, Q, P> ServerBuilder<W, Q, P, NoTimeProvider> {
write_buffer: self.write_buffer,
query_executor: self.query_executor,
persister: self.persister,
+ listener: self.listener,
+ authorizer: self.authorizer,
+ }
+ }
+}
+
+impl<W, Q, P, T> ServerBuilder<W, Q, P, T, NoListener> {
+ pub fn tcp_listener(self, listener: TcpListener) -> ServerBuilder<W, Q, P, T, WithListener> {
+ ServerBuilder {
+ common_state: self.common_state,
+ time_provider: self.time_provider,
+ max_request_size: self.max_request_size,
+ write_buffer: self.write_buffer,
+ query_executor: self.query_executor,
+ persister: self.persister,
+ listener: WithListener(listener),
authorizer: self.authorizer,
}
}
}
impl<W, Q, P, T>
- ServerBuilder<WithWriteBuf<W>, WithQueryExec<Q>, WithPersister<P>, WithTimeProvider<T>>
+ ServerBuilder<
+ WithWriteBuf<W>,
+ WithQueryExec<Q>,
+ WithPersister<P>,
+ WithTimeProvider<T>,
+ WithListener,
+ >
{
pub fn build(self) -> Server<W, Q, P, T> {
let persister = Arc::clone(&self.persister.0);
@@ -133,6 +165,7 @@ impl<W, Q, P, T>
http,
persister,
authorizer,
+ listener: self.listener.0,
}
}
}
diff --git a/influxdb3_server/src/lib.rs b/influxdb3_server/src/lib.rs
index ddcf6e62b4..41077a3956 100644
--- a/influxdb3_server/src/lib.rs
+++ b/influxdb3_server/src/lib.rs
@@ -24,6 +24,8 @@ use crate::http::HttpApi;
use async_trait::async_trait;
use authz::Authorizer;
use datafusion::execution::SendableRecordBatchStream;
+use hyper::server::conn::AddrIncoming;
+use hyper::server::conn::Http;
use hyper::service::service_fn;
use influxdb3_write::{Persister, WriteBuffer};
use iox_query::QueryDatabase;
@@ -33,9 +35,9 @@ use observability_deps::tracing::error;
use service::hybrid;
use std::convert::Infallible;
use std::fmt::Debug;
-use std::net::SocketAddr;
use std::sync::Arc;
use thiserror::Error;
+use tokio::net::TcpListener;
use tokio_util::sync::CancellationToken;
use tower::Layer;
use trace::ctx::SpanContext;
@@ -76,7 +78,6 @@ pub struct CommonServerState {
metrics: Arc<metric::Registry>,
trace_exporter: Option<Arc<trace_exporters::export::AsyncExporter>>,
trace_header_parser: TraceHeaderParser,
- http_addr: SocketAddr,
}
impl CommonServerState {
@@ -84,13 +85,11 @@ impl CommonServerState {
metrics: Arc<metric::Registry>,
trace_exporter: Option<Arc<trace_exporters::export::AsyncExporter>>,
trace_header_parser: TraceHeaderParser,
- http_addr: SocketAddr,
) -> Result<Self> {
Ok(Self {
metrics,
trace_exporter,
trace_header_parser,
- http_addr,
})
}
@@ -120,6 +119,7 @@ pub struct Server<W, Q, P, T> {
http: Arc<HttpApi<W, Q, T>>,
persister: Arc<P>,
authorizer: Arc<dyn Authorizer>,
+ listener: TcpListener,
}
#[async_trait]
@@ -193,7 +193,8 @@ where
let hybrid_make_service = hybrid(rest_service, grpc_service);
- hyper::Server::bind(&server.common_state.http_addr)
+ let addr = AddrIncoming::from_listener(server.listener)?;
+ hyper::server::Builder::new(addr, Http::new())
.serve(hybrid_make_service)
.with_graceful_shutdown(shutdown.cancelled())
.await?;
@@ -231,6 +232,8 @@ mod tests {
use datafusion::parquet::data_type::AsBytes;
use hyper::{body, Body, Client, Request, Response, StatusCode};
use influxdb3_write::persister::PersisterImpl;
+ use influxdb3_write::wal::WalImpl;
+ use influxdb3_write::write_buffer::WriteBufferImpl;
use influxdb3_write::SegmentDuration;
use iox_query::exec::{DedicatedExecutor, Executor, ExecutorConfig};
use iox_time::{MockProvider, Time};
@@ -238,75 +241,17 @@ mod tests {
use parquet_file::storage::{ParquetStorage, StorageId};
use pretty_assertions::assert_eq;
use std::collections::HashMap;
- use std::net::{SocketAddr, SocketAddrV4};
+ use std::net::{IpAddr, Ipv4Addr, SocketAddr};
use std::num::NonZeroUsize;
- use std::sync::atomic::{AtomicU16, Ordering};
use std::sync::Arc;
+ use tokio::net::TcpListener;
use tokio_util::sync::CancellationToken;
- static NEXT_PORT: AtomicU16 = AtomicU16::new(8090);
-
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn write_and_query() {
- let addr = get_free_port();
- let trace_header_parser = trace_http::ctx::TraceHeaderParser::new();
- let metrics = Arc::new(metric::Registry::new());
- let common_state =
- crate::CommonServerState::new(Arc::clone(&metrics), None, trace_header_parser, addr)
- .unwrap();
- let object_store: Arc<DynObjectStore> = Arc::new(object_store::memory::InMemory::new());
- let parquet_store =
- ParquetStorage::new(Arc::clone(&object_store), StorageId::from("influxdb3"));
- let exec = Arc::new(Executor::new_with_config_and_executor(
- ExecutorConfig {
- target_query_partitions: NonZeroUsize::new(1).unwrap(),
- object_stores: [&parquet_store]
- .into_iter()
- .map(|store| (store.id(), Arc::clone(store.object_store())))
- .collect(),
- metric_registry: Arc::clone(&metrics),
- mem_pool_size: usize::MAX,
- },
- DedicatedExecutor::new_testing(),
- ));
- let persister = Arc::new(PersisterImpl::new(Arc::clone(&object_store)));
- let time_provider = Arc::new(MockProvider::new(Time::from_timestamp_nanos(0)));
+ let start_time = 0;
+ let (server, shutdown, _) = setup_server(start_time).await;
- let write_buffer = Arc::new(
- influxdb3_write::write_buffer::WriteBufferImpl::new(
- Arc::clone(&persister),
- None::<Arc<influxdb3_write::wal::WalImpl>>,
- Arc::clone(&time_provider),
- SegmentDuration::new_5m(),
- Arc::clone(&exec),
- 10000,
- )
- .await
- .unwrap(),
- );
- let query_executor = Arc::new(crate::query_executor::QueryExecutorImpl::new(
- write_buffer.catalog(),
- Arc::clone(&write_buffer),
- Arc::clone(&exec),
- Arc::clone(&metrics),
- Arc::new(HashMap::new()),
- 10,
- 10,
- ));
-
- let server = ServerBuilder::new(common_state)
- .write_buffer(Arc::clone(&write_buffer))
- .query_executor(Arc::clone(&query_executor))
- .persister(Arc::clone(&persister))
- .authorizer(Arc::new(DefaultAuthorizer))
- .time_provider(Arc::clone(&time_provider))
- .build();
- let frontend_shutdown = CancellationToken::new();
- let shutdown = frontend_shutdown.clone();
-
- tokio::spawn(async move { serve(server, frontend_shutdown).await });
-
- let server = format!("http://{}", addr);
write_lp(
&server,
"foo",
@@ -409,66 +354,9 @@ mod tests {
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn write_lp_tests() {
- let addr = get_free_port();
- let trace_header_parser = trace_http::ctx::TraceHeaderParser::new();
- let metrics = Arc::new(metric::Registry::new());
- let common_state =
- crate::CommonServerState::new(Arc::clone(&metrics), None, trace_header_parser, addr)
- .unwrap();
- let object_store: Arc<DynObjectStore> = Arc::new(object_store::memory::InMemory::new());
- let parquet_store =
- ParquetStorage::new(Arc::clone(&object_store), StorageId::from("influxdb3"));
- let exec = Arc::new(Executor::new_with_config_and_executor(
- ExecutorConfig {
- target_query_partitions: NonZeroUsize::new(1).unwrap(),
- object_stores: [&parquet_store]
- .into_iter()
- .map(|store| (store.id(), Arc::clone(store.object_store())))
- .collect(),
- metric_registry: Arc::clone(&metrics),
- mem_pool_size: usize::MAX,
- },
- DedicatedExecutor::new_testing(),
- ));
- let persister = Arc::new(PersisterImpl::new(Arc::clone(&object_store)));
- let time_provider = Arc::new(MockProvider::new(Time::from_timestamp_nanos(0)));
-
- let write_buffer = Arc::new(
- influxdb3_write::write_buffer::WriteBufferImpl::new(
- Arc::clone(&persister),
- None::<Arc<influxdb3_write::wal::WalImpl>>,
- Arc::clone(&time_provider),
- SegmentDuration::new_5m(),
- Arc::clone(&exec),
- 10000,
- )
- .await
- .unwrap(),
- );
- let query_executor = crate::query_executor::QueryExecutorImpl::new(
- write_buffer.catalog(),
- Arc::clone(&write_buffer),
- Arc::clone(&exec),
- Arc::clone(&metrics),
- Arc::new(HashMap::new()),
- 10,
- 10,
- );
-
- let server = ServerBuilder::new(common_state)
- .write_buffer(Arc::clone(&write_buffer))
- .query_executor(Arc::new(query_executor))
- .persister(persister)
- .authorizer(Arc::new(DefaultAuthorizer))
- .time_provider(Arc::clone(&time_provider))
- .build();
- let frontend_shutdown = CancellationToken::new();
- let shutdown = frontend_shutdown.clone();
+ let start_time = 0;
+ let (server, shutdown, _) = setup_server(start_time).await;
- tokio::spawn(async move { serve(server, frontend_shutdown).await });
-
- // Test that only one error comes back
- let server = format!("http://{}", addr);
let resp = write_lp(
&server,
"foo",
@@ -615,67 +503,9 @@ mod tests {
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn write_lp_precision_tests() {
- let addr = get_free_port();
- let trace_header_parser = trace_http::ctx::TraceHeaderParser::new();
- let metrics = Arc::new(metric::Registry::new());
- let common_state =
- crate::CommonServerState::new(Arc::clone(&metrics), None, trace_header_parser, addr)
- .unwrap();
- let object_store: Arc<DynObjectStore> = Arc::new(object_store::memory::InMemory::new());
- let parquet_store =
- ParquetStorage::new(Arc::clone(&object_store), StorageId::from("influxdb3"));
- let exec = Arc::new(Executor::new_with_config_and_executor(
- ExecutorConfig {
- target_query_partitions: NonZeroUsize::new(1).unwrap(),
- object_stores: [&parquet_store]
- .into_iter()
- .map(|store| (store.id(), Arc::clone(store.object_store())))
- .collect(),
- metric_registry: Arc::clone(&metrics),
- mem_pool_size: usize::MAX,
- },
- DedicatedExecutor::new_testing(),
- ));
- let persister = Arc::new(PersisterImpl::new(Arc::clone(&object_store)));
- let time_provider = Arc::new(MockProvider::new(Time::from_timestamp_nanos(
- 1708473607000000000,
- )));
-
- let write_buffer = Arc::new(
- influxdb3_write::write_buffer::WriteBufferImpl::new(
- Arc::clone(&persister),
- None::<Arc<influxdb3_write::wal::WalImpl>>,
- Arc::clone(&time_provider),
- SegmentDuration::new_5m(),
- Arc::clone(&exec),
- 10000,
- )
- .await
- .unwrap(),
- );
- let query_executor = crate::query_executor::QueryExecutorImpl::new(
- write_buffer.catalog(),
- Arc::clone(&write_buffer),
- Arc::clone(&exec),
- Arc::clone(&metrics),
- Arc::new(HashMap::new()),
- 10,
- 10,
- );
-
- let server = ServerBuilder::new(common_state)
- .write_buffer(Arc::clone(&write_buffer))
- .query_executor(Arc::new(query_executor))
- .persister(persister)
- .authorizer(Arc::new(DefaultAuthorizer))
- .time_provider(Arc::clone(&time_provider))
- .build();
- let frontend_shutdown = CancellationToken::new();
- let shutdown = frontend_shutdown.clone();
-
- tokio::spawn(async move { serve(server, frontend_shutdown).await });
+ let start_time = 1708473607000000000;
+ let (server, shutdown, _) = setup_server(start_time).await;
- let server = format!("http://{}", addr);
let resp = write_lp(
&server,
"foo",
@@ -789,6 +619,153 @@ mod tests {
shutdown.cancel();
}
+ #[tokio::test]
+ async fn query_from_last_cache() {
+ let start_time = 0;
+ let (url, shutdown, wbuf) = setup_server(start_time).await;
+ let db_name = "foo";
+ let tbl_name = "cpu";
+
+ // Write to generate a db/table in the catalog:
+ let resp = write_lp(
+ &url,
+ db_name,
+ format!("{tbl_name},region=us,host=a usage=50 500"),
+ None,
+ false,
+ "second",
+ )
+ .await;
+ assert_eq!(resp.status(), StatusCode::OK);
+
+ // Create the last cache:
+ wbuf.create_last_cache(db_name, tbl_name, None, None, None, None, None)
+ .expect("create last cache");
+
+ // Write to put something in the last cache:
+ let resp = write_lp(
+ &url,
+ db_name,
+ format!(
+ "\
+ {tbl_name},region=us,host=a usage=11 1000\n\
+ {tbl_name},region=us,host=b usage=22 1000\n\
+ {tbl_name},region=us,host=c usage=33 1000\n\
+ {tbl_name},region=ca,host=d usage=44 1000\n\
+ {tbl_name},region=ca,host=e usage=55 1000\n\
+ {tbl_name},region=eu,host=f usage=66 1000\n\
+ "
+ ),
+ None,
+ false,
+ "second",
+ )
+ .await;
+ assert_eq!(resp.status(), StatusCode::OK);
+
+ // Query from the last cache:
+ let res = query(
+ &url,
+ db_name,
+ format!("SELECT * FROM last_cache('{tbl_name}') ORDER BY host"),
+ "pretty",
+ None,
+ )
+ .await;
+ let body = body::to_bytes(res.into_body()).await.unwrap();
+ let body = String::from_utf8(body.as_bytes().to_vec()).unwrap();
+ assert_eq!(
+ "\
+ +------+--------+---------------------+-------+\n\
+ | host | region | time | usage |\n\
+ +------+--------+---------------------+-------+\n\
+ | a | us | 1970-01-01T00:16:40 | 11.0 |\n\
+ | b | us | 1970-01-01T00:16:40 | 22.0 |\n\
+ | c | us | 1970-01-01T00:16:40 | 33.0 |\n\
+ | d | ca | 1970-01-01T00:16:40 | 44.0 |\n\
+ | e | ca | 1970-01-01T00:16:40 | 55.0 |\n\
+ | f | eu | 1970-01-01T00:16:40 | 66.0 |\n\
+ +------+--------+---------------------+-------+",
+ body
+ );
+
+ shutdown.cancel();
+ }
+
+ async fn setup_server(
+ start_time: i64,
+ ) -> (
+ String,
+ CancellationToken,
+ Arc<WriteBufferImpl<WalImpl, MockProvider>>,
+ ) {
+ let trace_header_parser = trace_http::ctx::TraceHeaderParser::new();
+ let metrics = Arc::new(metric::Registry::new());
+ let common_state =
+ crate::CommonServerState::new(Arc::clone(&metrics), None, trace_header_parser).unwrap();
+ let object_store: Arc<DynObjectStore> = Arc::new(object_store::memory::InMemory::new());
+ let parquet_store =
+ ParquetStorage::new(Arc::clone(&object_store), StorageId::from("influxdb3"));
+ let exec = Arc::new(Executor::new_with_config_and_executor(
+ ExecutorConfig {
+ target_query_partitions: NonZeroUsize::new(1).unwrap(),
+ object_stores: [&parquet_store]
+ .into_iter()
+ .map(|store| (store.id(), Arc::clone(store.object_store())))
+ .collect(),
+ metric_registry: Arc::clone(&metrics),
+ mem_pool_size: usize::MAX,
+ },
+ DedicatedExecutor::new_testing(),
+ ));
+ let persister = Arc::new(PersisterImpl::new(Arc::clone(&object_store)));
+ let time_provider = Arc::new(MockProvider::new(Time::from_timestamp_nanos(start_time)));
+
+ let write_buffer = Arc::new(
+ influxdb3_write::write_buffer::WriteBufferImpl::new(
+ Arc::clone(&persister),
+ None::<Arc<influxdb3_write::wal::WalImpl>>,
+ Arc::clone(&time_provider),
+ SegmentDuration::new_5m(),
+ Arc::clone(&exec),
+ 10000,
+ )
+ .await
+ .unwrap(),
+ );
+ let query_executor = crate::query_executor::QueryExecutorImpl::new(
+ write_buffer.catalog(),
+ Arc::clone(&write_buffer),
+ Arc::clone(&exec),
+ Arc::clone(&metrics),
+ Arc::new(HashMap::new()),
+ 10,
+ 10,
+ );
+
+ // bind to port 0 will assign a random available port:
+ let socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 0);
+ let listener = TcpListener::bind(socket_addr)
+ .await
+ .expect("bind tcp address");
+ let addr = listener.local_addr().unwrap();
+
+ let server = ServerBuilder::new(common_state)
+ .write_buffer(Arc::clone(&write_buffer))
+ .query_executor(Arc::new(query_executor))
+ .persister(persister)
+ .authorizer(Arc::new(DefaultAuthorizer))
+ .time_provider(Arc::clone(&time_provider))
+ .tcp_listener(listener)
+ .build();
+ let frontend_shutdown = CancellationToken::new();
+ let shutdown = frontend_shutdown.clone();
+
+ tokio::spawn(async move { serve(server, frontend_shutdown).await });
+
+ (format!("http://{addr}"), shutdown, write_buffer)
+ }
+
pub(crate) async fn write_lp(
server: impl Into<String> + Send,
database: impl Into<String> + Send,
@@ -853,17 +830,4 @@ mod tests {
.await
.expect("http error sending query")
}
-
- pub(crate) fn get_free_port() -> SocketAddr {
- let ip = std::net::Ipv4Addr::new(127, 0, 0, 1);
-
- loop {
- let port = NEXT_PORT.fetch_add(1, Ordering::SeqCst);
- let addr = SocketAddrV4::new(ip, port);
-
- if std::net::TcpListener::bind(addr).is_ok() {
- return addr.into();
- }
- }
- }
}
diff --git a/influxdb3_server/src/query_executor.rs b/influxdb3_server/src/query_executor.rs
index f3ac594b15..c88315cc55 100644
--- a/influxdb3_server/src/query_executor.rs
+++ b/influxdb3_server/src/query_executor.rs
@@ -22,6 +22,7 @@ use datafusion::physical_plan::ExecutionPlan;
use datafusion::prelude::Expr;
use datafusion_util::config::DEFAULT_SCHEMA;
use datafusion_util::MemoryStream;
+use influxdb3_write::last_cache::LastCacheFunction;
use influxdb3_write::{
catalog::{Catalog, DatabaseSchema},
WriteBuffer,
@@ -443,10 +444,20 @@ impl<B: WriteBuffer> QueryNamespace for Database<B> {
cfg = cfg.with_config_option(k, v);
}
- cfg.build()
+ let ctx = cfg.build();
+ ctx.inner().register_udtf(
+ LAST_CACHE_UDTF_NAME,
+ Arc::new(LastCacheFunction::new(
+ &self.db_schema.name,
+ self.write_buffer.last_cache(),
+ )),
+ );
+ ctx
}
}
+const LAST_CACHE_UDTF_NAME: &str = "last_cache";
+
impl<B: WriteBuffer> CatalogProvider for Database<B> {
fn as_any(&self) -> &dyn Any {
self as &dyn Any
diff --git a/influxdb3_write/src/last_cache.rs b/influxdb3_write/src/last_cache/mod.rs
similarity index 83%
rename from influxdb3_write/src/last_cache.rs
rename to influxdb3_write/src/last_cache/mod.rs
index d2a717f577..a0c563f379 100644
--- a/influxdb3_write/src/last_cache.rs
+++ b/influxdb3_write/src/last_cache/mod.rs
@@ -1,5 +1,4 @@
use std::{
- any::Any,
collections::VecDeque,
sync::Arc,
time::{Duration, Instant},
@@ -17,26 +16,24 @@ use arrow::{
},
error::ArrowError,
};
-use async_trait::async_trait;
use datafusion::{
- common::Result as DFResult,
- datasource::{TableProvider, TableType},
- execution::context::SessionState,
- logical_expr::{BinaryExpr, Expr, Operator, TableProviderFilterPushDown},
- physical_plan::{memory::MemoryExec, ExecutionPlan},
+ logical_expr::{BinaryExpr, Expr, Operator},
scalar::ScalarValue,
};
use hashbrown::{HashMap, HashSet};
use indexmap::{IndexMap, IndexSet};
use iox_time::Time;
use parking_lot::RwLock;
-use schema::{InfluxColumnType, InfluxFieldType, Schema, SchemaBuilder, TIME_COLUMN_NAME};
+use schema::{InfluxColumnType, InfluxFieldType, Schema, TIME_COLUMN_NAME};
use crate::{
catalog::LastCacheSize,
write_buffer::{buffer_segment::WriteBatch, Field, FieldData, Row},
};
+mod table_function;
+pub use table_function::LastCacheFunction;
+
#[derive(Debug, thiserror::Error)]
pub enum Error {
#[error("invalid cache size")]
@@ -51,6 +48,8 @@ pub enum Error {
ValueColumnDoesNotExist { column_name: String },
#[error("schema builder error: {0}")]
SchemaBuilder(#[from] schema::builder::Error),
+ #[error("requested last cache does not exist")]
+ CacheDoesNotExist,
}
impl Error {
@@ -118,6 +117,34 @@ impl LastCacheProvider {
}
}
+ /// Get a particular cache's name and arrow schema
+ ///
+ /// This is used for the implementation of DataFusion's `TableFunctionImpl` and `TableProvider`
+ /// traits.
+ fn get_cache_name_and_schema(
+ &self,
+ db_name: &str,
+ tbl_name: &str,
+ cache_name: Option<&str>,
+ ) -> Option<(String, ArrowSchemaRef)> {
+ self.cache_map
+ .read()
+ .get(db_name)
+ .and_then(|db| db.get(tbl_name))
+ .and_then(|tbl| {
+ if let Some(name) = cache_name {
+ tbl.get(name)
+ .map(|lc| (name.to_string(), Arc::clone(&lc.schema)))
+ } else if tbl.len() == 1 {
+ tbl.iter()
+ .map(|(name, lc)| (name.to_string(), Arc::clone(&lc.schema)))
+ .next()
+ } else {
+ None
+ }
+ })
+ }
+
/// Create a new entry in the last cache for a given database and table, along with the given
/// parameters.
pub(crate) fn create_cache(
@@ -201,14 +228,26 @@ impl LastCacheProvider {
)
};
- // build a schema that only holds the field columns
- let mut schema_builder = SchemaBuilder::new();
- for (t, name) in schema
+ let mut schema_builder = ArrowSchemaBuilder::new();
+ // Add key columns first:
+ for (t, field) in schema
+ .iter()
+ .filter(|&(_, f)| key_columns.contains(f.name()))
+ {
+ if let InfluxColumnType::Tag = t {
+ // override tags with string type in the schema, because the KeyValue type stores
+ // them as strings, and produces them as StringArray when creating RecordBatches:
+ schema_builder.push(ArrowField::new(field.name(), DataType::Utf8, false))
+ } else {
+ schema_builder.push(field.clone());
+ };
+ }
+ // Add value columns second:
+ for (_, field) in schema
.iter()
.filter(|&(_, f)| value_columns.contains(f.name()))
- .map(|(t, f)| (t, f.name()))
{
- schema_builder.influx_column(name, t);
+ schema_builder.push(field.clone());
}
let series_key = schema
@@ -223,7 +262,7 @@ impl LastCacheProvider {
.map_err(|_| Error::InvalidCacheSize)?,
ttl.unwrap_or(DEFAULT_CACHE_TTL),
key_columns,
- schema_builder.build()?.as_arrow(),
+ Arc::new(schema_builder.finish()),
series_key,
accept_new_fields,
);
@@ -340,9 +379,7 @@ pub(crate) struct LastCache {
/// The key columns for this cache
///
/// Uses an [`IndexSet`] for both fast iteration and fast lookup.
- key_columns: IndexSet<String>,
- /// The Arrow Schema for the table that this cache is associated with
- schema: ArrowSchemaRef,
+ key_columns: Arc<IndexSet<String>>,
/// Optionally store the series key for tables that use it for ensuring non-nullability in the
/// column buffer for series key columns
///
@@ -351,6 +388,8 @@ pub(crate) struct LastCache {
series_key: Option<HashSet<String>>,
/// Whether or not this cache accepts newly written fields
accept_new_fields: bool,
+ /// The Arrow Schema for the table that this cache is associated with
+ schema: ArrowSchemaRef,
/// The internal state of the cache
state: LastCacheState,
}
@@ -368,10 +407,10 @@ impl LastCache {
Self {
count,
ttl,
- key_columns: key_columns.into_iter().collect(),
- schema,
+ key_columns: Arc::new(key_columns.into_iter().collect()),
series_key,
accept_new_fields,
+ schema,
state: LastCacheState::Init,
}
}
@@ -418,6 +457,7 @@ impl LastCache {
/// This will panic if the internal cache state's keys are out-of-order with respect to the
/// order of the `key_columns` on this [`LastCache`]
pub(crate) fn push(&mut self, row: &Row) {
+ let schema = Arc::clone(&self.schema);
let mut target = &mut self.state;
let mut key_iter = self.key_columns.iter().peekable();
while let (Some(key), peek) = (key_iter.next(), key_iter.peek()) {
@@ -451,7 +491,8 @@ impl LastCache {
LastCacheState::Store(LastCacheStore::new(
self.count.into(),
self.ttl,
- Arc::clone(&self.schema),
+ Arc::clone(&schema),
+ Arc::clone(&self.key_columns),
self.series_key.as_ref(),
))
}
@@ -462,14 +503,15 @@ impl LastCache {
*target = LastCacheState::Store(LastCacheStore::new(
self.count.into(),
self.ttl,
- Arc::clone(&self.schema),
+ Arc::clone(&schema),
+ Arc::clone(&self.key_columns),
self.series_key.as_ref(),
));
}
let store = target.as_store_mut().expect(
"cache target should be the actual store after iterating through all key columns",
);
- let Some(new_columns) = store.push(row, self.accept_new_fields, &self.key_columns) else {
+ let Some(new_columns) = store.push(row, self.accept_new_fields) else {
// Unless new columns were added, and we need to update the schema, we are done.
return;
};
@@ -490,10 +532,10 @@ impl LastCache {
fn to_record_batches(&self, predicates: &[Predicate]) -> Result<Vec<RecordBatch>, ArrowError> {
// map the provided predicates on to the key columns
// there may not be predicates provided for each key column, hence the Option
- let predicates: Vec<Option<Predicate>> = self
+ let predicates: Vec<Option<&Predicate>> = self
.key_columns
.iter()
- .map(|key| predicates.iter().find(|p| p.key == *key).cloned())
+ .map(|key| predicates.iter().find(|p| p.key == *key))
.collect();
let mut caches = vec![ExtendedLastCacheState {
@@ -508,13 +550,15 @@ impl LastCache {
let mut new_caches = vec![];
'cache_loop: for c in caches {
let cache_key = c.state.as_key().unwrap();
- if let Some(ref pred) = predicate {
+ if let Some(pred) = predicate {
let Some(next_state) = cache_key.evaluate_predicate(pred) else {
continue 'cache_loop;
};
+ let mut additional_columns = c.additional_columns.clone();
+ additional_columns.push((&cache_key.column_name, &pred.value));
new_caches.push(ExtendedLastCacheState {
state: next_state,
- additional_columns: c.additional_columns.clone(),
+ additional_columns,
});
} else {
new_caches.extend(cache_key.value_map.iter().map(|(v, state)| {
@@ -825,6 +869,8 @@ struct LastCacheStore {
///
/// [perf]: https://github.com/indexmap-rs/indexmap?tab=readme-ov-file#performance
cache: IndexMap<String, CacheColumn>,
+ /// A reference to the set of key columns for the cache
+ key_columns: Arc<IndexSet<String>>,
/// A ring buffer holding the instants at which entries in the cache were inserted
///
/// This is used to evict cache values that outlive the `ttl`
@@ -845,11 +891,13 @@ impl LastCacheStore {
count: usize,
ttl: Duration,
schema: ArrowSchemaRef,
+ key_columns: Arc<IndexSet<String>>,
series_keys: Option<&HashSet<String>>,
) -> Self {
let cache = schema
.fields()
.iter()
+ .filter(|f| !key_columns.contains(f.name()))
.map(|f| {
(
f.name().to_string(),
@@ -863,6 +911,7 @@ impl LastCacheStore {
.collect();
Self {
cache,
+ key_columns,
instants: VecDeque::with_capacity(count),
count,
ttl,
@@ -888,7 +937,6 @@ impl LastCacheStore {
&mut self,
row: &'a Row,
accept_new_fields: bool,
- key_columns: &IndexSet<String>,
) -> Option<Vec<(&'a str, DataType)>> {
if row.time <= self.last_time.timestamp_nanos() {
return None;
@@ -904,7 +952,7 @@ impl LastCacheStore {
if let Some(col) = self.cache.get_mut(&field.name) {
// In this case, the field already has an entry in the cache, so just push:
col.push(&field.value);
- } else if !key_columns.contains(&field.name) {
+ } else if !self.key_columns.contains(&field.name) {
// In this case, there is not an entry for the field in the cache, so if the
// value is not one of the key columns, then it is a new field being added.
let data_type = data_type_from_buffer_field(field);
@@ -963,11 +1011,14 @@ impl LastCacheStore {
.fields()
.iter()
.cloned()
- .map(|f| {
+ .filter_map(|f| {
if let Some(c) = self.cache.get(f.name()) {
- (f, c.data.as_array())
+ Some((f, c.data.as_array()))
+ } else if self.key_columns.contains(f.name()) {
+ // We prepend key columns with the extended set provided
+ None
} else {
- (Arc::clone(&f), new_null_array(f.data_type(), self.len()))
+ Some((Arc::clone(&f), new_null_array(f.data_type(), self.len())))
}
})
.collect();
@@ -1006,52 +1057,15 @@ impl LastCacheStore {
}
}
-#[async_trait]
-impl TableProvider for LastCache {
- fn as_any(&self) -> &dyn Any {
- self as &dyn Any
- }
-
- fn schema(&self) -> ArrowSchemaRef {
- Arc::clone(&self.schema)
- }
-
- fn table_type(&self) -> TableType {
- TableType::Temporary
- }
-
- fn supports_filters_pushdown(
- &self,
- filters: &[&Expr],
- ) -> DFResult<Vec<TableProviderFilterPushDown>> {
- Ok(vec![TableProviderFilterPushDown::Inexact; filters.len()])
- }
-
- async fn scan(
- &self,
- ctx: &SessionState,
- projection: Option<&Vec<usize>>,
- filters: &[Expr],
- _limit: Option<usize>,
- ) -> DFResult<Arc<dyn ExecutionPlan>> {
- let predicates = self.convert_filter_exprs(filters);
- let partitions = vec![self.to_record_batches(&predicates)?];
- let mut exec = MemoryExec::try_new(&partitions, self.schema(), projection.cloned())?;
-
- let show_sizes = ctx.config_options().explain.show_sizes;
- exec = exec.with_show_sizes(show_sizes);
-
- Ok(Arc::new(exec))
- }
-}
-
/// A column in a [`LastCache`]
///
/// Stores its size so it can evict old data on push. Stores the time-to-live (TTL) in order
/// to remove expired data.
#[derive(Debug)]
struct CacheColumn {
+ /// The number of entries the [`CacheColumn`] will hold before evicting old ones on push
size: usize,
+ /// The buffer containing data for the column
data: CacheColumnData,
}
@@ -1381,11 +1395,11 @@ mod tests {
assert_batches_eq!(
[
- "+--------+-----------------------------+-------+",
- "| region | time | usage |",
- "+--------+-----------------------------+-------+",
- "| us | 1970-01-01T00:00:00.000002Z | 99.0 |",
- "+--------+-----------------------------+-------+",
+ "+------+--------+-----------------------------+-------+",
+ "| host | region | time | usage |",
+ "+------+--------+-----------------------------+-------+",
+ "| a | us | 1970-01-01T00:00:00.000002Z | 99.0 |",
+ "+------+--------+-----------------------------+-------+",
],
&batch
);
@@ -1409,11 +1423,11 @@ mod tests {
assert_batches_eq!(
[
- "+--------+-----------------------------+-------+",
- "| region | time | usage |",
- "+--------+-----------------------------+-------+",
- "| us | 1970-01-01T00:00:00.000003Z | 88.0 |",
- "+--------+-----------------------------+-------+",
+ "+------+--------+-----------------------------+-------+",
+ "| host | region | time | usage |",
+ "+------+--------+-----------------------------+-------+",
+ "| a | us | 1970-01-01T00:00:00.000003Z | 88.0 |",
+ "+------+--------+-----------------------------+-------+",
],
&batch
);
@@ -1497,11 +1511,11 @@ mod tests {
Predicate::new("host", KeyValue::string("c")),
],
expected: &[
- "+-----------------------------+-------+",
- "| time | usage |",
- "+-----------------------------+-------+",
- "| 1970-01-01T00:00:00.000001Z | 60.0 |",
- "+-----------------------------+-------+",
+ "+--------+------+-----------------------------+-------+",
+ "| region | host | time | usage |",
+ "+--------+------+-----------------------------+-------+",
+ "| us | c | 1970-01-01T00:00:00.000001Z | 60.0 |",
+ "+--------+------+-----------------------------+-------+",
],
},
// Predicate on only region key column will have host column outputted in addition to
@@ -1509,26 +1523,26 @@ mod tests {
TestCase {
predicates: &[Predicate::new("region", KeyValue::string("us"))],
expected: &[
- "+------+-----------------------------+-------+",
- "| host | time | usage |",
- "+------+-----------------------------+-------+",
- "| a | 1970-01-01T00:00:00.000001Z | 100.0 |",
- "| c | 1970-01-01T00:00:00.000001Z | 60.0 |",
- "| b | 1970-01-01T00:00:00.000001Z | 80.0 |",
- "+------+-----------------------------+-------+",
+ "+--------+------+-----------------------------+-------+",
+ "| region | host | time | usage |",
+ "+--------+------+-----------------------------+-------+",
+ "| us | a | 1970-01-01T00:00:00.000001Z | 100.0 |",
+ "| us | b | 1970-01-01T00:00:00.000001Z | 80.0 |",
+ "| us | c | 1970-01-01T00:00:00.000001Z | 60.0 |",
+ "+--------+------+-----------------------------+-------+",
],
},
// Similar to previous, with a different region predicate:
TestCase {
predicates: &[Predicate::new("region", KeyValue::string("ca"))],
expected: &[
- "+------+-----------------------------+-------+",
- "| host | time | usage |",
- "+------+-----------------------------+-------+",
- "| d | 1970-01-01T00:00:00.000001Z | 40.0 |",
- "| e | 1970-01-01T00:00:00.000001Z | 20.0 |",
- "| f | 1970-01-01T00:00:00.000001Z | 30.0 |",
- "+------+-----------------------------+-------+",
+ "+--------+------+-----------------------------+-------+",
+ "| region | host | time | usage |",
+ "+--------+------+-----------------------------+-------+",
+ "| ca | d | 1970-01-01T00:00:00.000001Z | 40.0 |",
+ "| ca | e | 1970-01-01T00:00:00.000001Z | 20.0 |",
+ "| ca | f | 1970-01-01T00:00:00.000001Z | 30.0 |",
+ "+--------+------+-----------------------------+-------+",
],
},
// Predicate on only host key column will have region column outputted in addition to
@@ -1536,11 +1550,11 @@ mod tests {
TestCase {
predicates: &[Predicate::new("host", KeyValue::string("a"))],
expected: &[
- "+--------+-----------------------------+-------+",
- "| region | time | usage |",
- "+--------+-----------------------------+-------+",
- "| us | 1970-01-01T00:00:00.000001Z | 100.0 |",
- "+--------+-----------------------------+-------+",
+ "+--------+------+-----------------------------+-------+",
+ "| region | host | time | usage |",
+ "+--------+------+-----------------------------+-------+",
+ "| us | a | 1970-01-01T00:00:00.000001Z | 100.0 |",
+ "+--------+------+-----------------------------+-------+",
],
},
// Omitting all key columns from the predicate will have all key columns included in
@@ -1704,57 +1718,57 @@ mod tests {
Predicate::new("host", KeyValue::string("a")),
],
expected: &[
- "+--------------------------------+-------+",
- "| time | usage |",
- "+--------------------------------+-------+",
- "| 1970-01-01T00:00:00.000001500Z | 99.0 |",
- "| 1970-01-01T00:00:00.000001Z | 100.0 |",
- "| 1970-01-01T00:00:00.000002500Z | 90.0 |",
- "| 1970-01-01T00:00:00.000002Z | 95.0 |",
- "+--------------------------------+-------+",
+ "+--------+------+--------------------------------+-------+",
+ "| region | host | time | usage |",
+ "+--------+------+--------------------------------+-------+",
+ "| us | a | 1970-01-01T00:00:00.000001500Z | 99.0 |",
+ "| us | a | 1970-01-01T00:00:00.000001Z | 100.0 |",
+ "| us | a | 1970-01-01T00:00:00.000002500Z | 90.0 |",
+ "| us | a | 1970-01-01T00:00:00.000002Z | 95.0 |",
+ "+--------+------+--------------------------------+-------+",
],
},
TestCase {
predicates: &[Predicate::new("region", KeyValue::string("us"))],
expected: &[
- "+------+--------------------------------+-------+",
- "| host | time | usage |",
- "+------+--------------------------------+-------+",
- "| a | 1970-01-01T00:00:00.000001500Z | 99.0 |",
- "| a | 1970-01-01T00:00:00.000001Z | 100.0 |",
- "| a | 1970-01-01T00:00:00.000002500Z | 90.0 |",
- "| a | 1970-01-01T00:00:00.000002Z | 95.0 |",
- "| b | 1970-01-01T00:00:00.000001500Z | 88.0 |",
- "| b | 1970-01-01T00:00:00.000001Z | 80.0 |",
- "| b | 1970-01-01T00:00:00.000002500Z | 99.0 |",
- "| b | 1970-01-01T00:00:00.000002Z | 92.0 |",
- "+------+--------------------------------+-------+",
+ "+--------+------+--------------------------------+-------+",
+ "| region | host | time | usage |",
+ "+--------+------+--------------------------------+-------+",
+ "| us | a | 1970-01-01T00:00:00.000001500Z | 99.0 |",
+ "| us | a | 1970-01-01T00:00:00.000001Z | 100.0 |",
+ "| us | a | 1970-01-01T00:00:00.000002500Z | 90.0 |",
+ "| us | a | 1970-01-01T00:00:00.000002Z | 95.0 |",
+ "| us | b | 1970-01-01T00:00:00.000001500Z | 88.0 |",
+ "| us | b | 1970-01-01T00:00:00.000001Z | 80.0 |",
+ "| us | b | 1970-01-01T00:00:00.000002500Z | 99.0 |",
+ "| us | b | 1970-01-01T00:00:00.000002Z | 92.0 |",
+ "+--------+------+--------------------------------+-------+",
],
},
TestCase {
predicates: &[Predicate::new("host", KeyValue::string("a"))],
expected: &[
- "+--------+--------------------------------+-------+",
- "| region | time | usage |",
- "+--------+--------------------------------+-------+",
- "| us | 1970-01-01T00:00:00.000001500Z | 99.0 |",
- "| us | 1970-01-01T00:00:00.000001Z | 100.0 |",
- "| us | 1970-01-01T00:00:00.000002500Z | 90.0 |",
- "| us | 1970-01-01T00:00:00.000002Z | 95.0 |",
- "+--------+--------------------------------+-------+",
+ "+--------+------+--------------------------------+-------+",
+ "| region | host | time | usage |",
+ "+--------+------+--------------------------------+-------+",
+ "| us | a | 1970-01-01T00:00:00.000001500Z | 99.0 |",
+ "| us | a | 1970-01-01T00:00:00.000001Z | 100.0 |",
+ "| us | a | 1970-01-01T00:00:00.000002500Z | 90.0 |",
+ "| us | a | 1970-01-01T00:00:00.000002Z | 95.0 |",
+ "+--------+------+--------------------------------+-------+",
],
},
TestCase {
predicates: &[Predicate::new("host", KeyValue::string("b"))],
expected: &[
- "+--------+--------------------------------+-------+",
- "| region | time | usage |",
- "+--------+--------------------------------+-------+",
- "| us | 1970-01-01T00:00:00.000001500Z | 88.0 |",
- "| us | 1970-01-01T00:00:00.000001Z | 80.0 |",
- "| us | 1970-01-01T00:00:00.000002500Z | 99.0 |",
- "| us | 1970-01-01T00:00:00.000002Z | 92.0 |",
- "+--------+--------------------------------+-------+",
+ "+--------+------+--------------------------------+-------+",
+ "| region | host | time | usage |",
+ "+--------+------+--------------------------------+-------+",
+ "| us | b | 1970-01-01T00:00:00.000001500Z | 88.0 |",
+ "| us | b | 1970-01-01T00:00:00.000001Z | 80.0 |",
+ "| us | b | 1970-01-01T00:00:00.000002500Z | 99.0 |",
+ "| us | b | 1970-01-01T00:00:00.000002Z | 92.0 |",
+ "+--------+------+--------------------------------+-------+",
],
},
TestCase {
@@ -1853,11 +1867,11 @@ mod tests {
assert_batches_sorted_eq!(
[
- "+-----------------------------+-------+",
- "| time | usage |",
- "+-----------------------------+-------+",
- "| 1970-01-01T00:00:00.000001Z | 100.0 |",
- "+-----------------------------+-------+",
+ "+--------+------+-----------------------------+-------+",
+ "| region | host | time | usage |",
+ "+--------+------+-----------------------------+-------+",
+ "| us | a | 1970-01-01T00:00:00.000001Z | 100.0 |",
+ "+--------+------+-----------------------------+-------+",
],
&batches
);
@@ -1903,11 +1917,11 @@ mod tests {
assert_batches_sorted_eq!(
[
- "+--------+--------------------------+-------+",
- "| region | time | usage |",
- "+--------+--------------------------+-------+",
- "| us | 1970-01-01T00:00:00.500Z | 333.0 |",
- "+--------+--------------------------+-------+",
+ "+--------+------+--------------------------+-------+",
+ "| region | host | time | usage |",
+ "+--------+------+--------------------------+-------+",
+ "| us | a | 1970-01-01T00:00:00.500Z | 333.0 |",
+ "+--------+------+--------------------------+-------+",
],
&batches
);
@@ -1997,36 +2011,36 @@ mod tests {
TestCase {
predicates: &[Predicate::new("component_id", KeyValue::string("333"))],
expected: &[
- "+--------+--------+------+---------+-----------------------------+",
- "| active | type | loc | reading | time |",
- "+--------+--------+------+---------+-----------------------------+",
- "| true | camera | fore | 145.0 | 1970-01-01T00:00:00.000001Z |",
- "+--------+--------+------+---------+-----------------------------+",
+ "+--------------+--------+--------+------+---------+-----------------------------+",
+ "| component_id | active | type | loc | reading | time |",
+ "+--------------+--------+--------+------+---------+-----------------------------+",
+ "| 333 | true | camera | fore | 145.0 | 1970-01-01T00:00:00.000001Z |",
+ "+--------------+--------+--------+------+---------+-----------------------------+",
],
},
// Predicate on a non-string field key:
TestCase {
predicates: &[Predicate::new("active", KeyValue::Bool(false))],
expected: &[
- "+--------------+-------------+---------+---------+-----------------------------+",
- "| component_id | type | loc | reading | time |",
- "+--------------+-------------+---------+---------+-----------------------------+",
- "| 555 | solar-panel | huygens | 200.0 | 1970-01-01T00:00:00.000001Z |",
- "| 666 | comms-dish | huygens | 220.0 | 1970-01-01T00:00:00.000001Z |",
- "+--------------+-------------+---------+---------+-----------------------------+",
+ "+--------------+--------+-------------+---------+---------+-----------------------------+",
+ "| component_id | active | type | loc | reading | time |",
+ "+--------------+--------+-------------+---------+---------+-----------------------------+",
+ "| 555 | false | solar-panel | huygens | 200.0 | 1970-01-01T00:00:00.000001Z |",
+ "| 666 | false | comms-dish | huygens | 220.0 | 1970-01-01T00:00:00.000001Z |",
+ "+--------------+--------+-------------+---------+---------+-----------------------------+",
],
},
// Predicate on a string field key:
TestCase {
predicates: &[Predicate::new("type", KeyValue::string("camera"))],
expected: &[
- "+--------------+--------+-----------+---------+-----------------------------+",
- "| component_id | active | loc | reading | time |",
- "+--------------+--------+-----------+---------+-----------------------------+",
- "| 111 | true | port | 150.0 | 1970-01-01T00:00:00.000001Z |",
- "| 222 | true | starboard | 250.0 | 1970-01-01T00:00:00.000001Z |",
- "| 333 | true | fore | 145.0 | 1970-01-01T00:00:00.000001Z |",
- "+--------------+--------+-----------+---------+-----------------------------+",
+ "+--------------+--------+--------+-----------+---------+-----------------------------+",
+ "| component_id | active | type | loc | reading | time |",
+ "+--------------+--------+--------+-----------+---------+-----------------------------+",
+ "| 111 | true | camera | port | 150.0 | 1970-01-01T00:00:00.000001Z |",
+ "| 222 | true | camera | starboard | 250.0 | 1970-01-01T00:00:00.000001Z |",
+ "| 333 | true | camera | fore | 145.0 | 1970-01-01T00:00:00.000001Z |",
+ "+--------------+--------+--------+-----------+---------+-----------------------------+",
],
}
];
@@ -2110,39 +2124,39 @@ mod tests {
TestCase {
predicates: &[Predicate::new("state", KeyValue::string("ca"))],
expected: &[
- "+--------+-------+-------+-----------------------------+",
- "| county | farm | speed | time |",
- "+--------+-------+-------+-----------------------------+",
- "| napa | 10-01 | 50.0 | 1970-01-01T00:00:00.000001Z |",
- "| napa | 10-02 | 49.0 | 1970-01-01T00:00:00.000001Z |",
- "| nevada | 40-01 | 66.0 | 1970-01-01T00:00:00.000001Z |",
- "| orange | 20-01 | 40.0 | 1970-01-01T00:00:00.000001Z |",
- "| orange | 20-02 | 33.0 | 1970-01-01T00:00:00.000001Z |",
- "| yolo | 30-01 | 62.0 | 1970-01-01T00:00:00.000001Z |",
- "+--------+-------+-------+-----------------------------+",
+ "+-------+--------+-------+-------+-----------------------------+",
+ "| state | county | farm | speed | time |",
+ "+-------+--------+-------+-------+-----------------------------+",
+ "| ca | napa | 10-01 | 50.0 | 1970-01-01T00:00:00.000001Z |",
+ "| ca | napa | 10-02 | 49.0 | 1970-01-01T00:00:00.000001Z |",
+ "| ca | nevada | 40-01 | 66.0 | 1970-01-01T00:00:00.000001Z |",
+ "| ca | orange | 20-01 | 40.0 | 1970-01-01T00:00:00.000001Z |",
+ "| ca | orange | 20-02 | 33.0 | 1970-01-01T00:00:00.000001Z |",
+ "| ca | yolo | 30-01 | 62.0 | 1970-01-01T00:00:00.000001Z |",
+ "+-------+--------+-------+-------+-----------------------------+",
],
},
// Predicate on county column, which is part of the series key:
TestCase {
predicates: &[Predicate::new("county", KeyValue::string("napa"))],
expected: &[
- "+-------+-------+-------+-----------------------------+",
- "| state | farm | speed | time |",
- "+-------+-------+-------+-----------------------------+",
- "| ca | 10-01 | 50.0 | 1970-01-01T00:00:00.000001Z |",
- "| ca | 10-02 | 49.0 | 1970-01-01T00:00:00.000001Z |",
- "+-------+-------+-------+-----------------------------+",
+ "+-------+--------+-------+-------+-----------------------------+",
+ "| state | county | farm | speed | time |",
+ "+-------+--------+-------+-------+-----------------------------+",
+ "| ca | napa | 10-01 | 50.0 | 1970-01-01T00:00:00.000001Z |",
+ "| ca | napa | 10-02 | 49.0 | 1970-01-01T00:00:00.000001Z |",
+ "+-------+--------+-------+-------+-----------------------------+",
],
},
// Predicate on farm column, which is part of the series key:
TestCase {
predicates: &[Predicate::new("farm", KeyValue::string("30-01"))],
expected: &[
- "+-------+--------+-------+-----------------------------+",
- "| state | county | speed | time |",
- "+-------+--------+-------+-----------------------------+",
- "| ca | yolo | 62.0 | 1970-01-01T00:00:00.000001Z |",
- "+-------+--------+-------+-----------------------------+",
+ "+-------+--------+-------+-------+-----------------------------+",
+ "| state | county | farm | speed | time |",
+ "+-------+--------+-------+-------+-----------------------------+",
+ "| ca | yolo | 30-01 | 62.0 | 1970-01-01T00:00:00.000001Z |",
+ "+-------+--------+-------+-------+-----------------------------+",
],
},
// Predicate on all series key columns:
@@ -2153,11 +2167,11 @@ mod tests {
Predicate::new("farm", KeyValue::string("40-01")),
],
expected: &[
- "+-------+-----------------------------+",
- "| speed | time |",
- "+-------+-----------------------------+",
- "| 66.0 | 1970-01-01T00:00:00.000001Z |",
- "+-------+-----------------------------+",
+ "+-------+--------+-------+-------+-----------------------------+",
+ "| state | county | farm | speed | time |",
+ "+-------+--------+-------+-------+-----------------------------+",
+ "| ca | nevada | 40-01 | 66.0 | 1970-01-01T00:00:00.000001Z |",
+ "+-------+--------+-------+-------+-----------------------------+",
],
},
];
@@ -2241,39 +2255,39 @@ mod tests {
TestCase {
predicates: &[Predicate::new("state", KeyValue::string("ca"))],
expected: &[
- "+--------+-------+-------+-----------------------------+",
- "| county | farm | speed | time |",
- "+--------+-------+-------+-----------------------------+",
- "| napa | 10-01 | 50.0 | 1970-01-01T00:00:00.000001Z |",
- "| napa | 10-02 | 49.0 | 1970-01-01T00:00:00.000001Z |",
- "| nevada | 40-01 | 66.0 | 1970-01-01T00:00:00.000001Z |",
- "| orange | 20-01 | 40.0 | 1970-01-01T00:00:00.000001Z |",
- "| orange | 20-02 | 33.0 | 1970-01-01T00:00:00.000001Z |",
- "| yolo | 30-01 | 62.0 | 1970-01-01T00:00:00.000001Z |",
- "+--------+-------+-------+-----------------------------+",
+ "+--------+-------+-------+-------+-----------------------------+",
+ "| county | farm | state | speed | time |",
+ "+--------+-------+-------+-------+-----------------------------+",
+ "| napa | 10-01 | ca | 50.0 | 1970-01-01T00:00:00.000001Z |",
+ "| napa | 10-02 | ca | 49.0 | 1970-01-01T00:00:00.000001Z |",
+ "| nevada | 40-01 | ca | 66.0 | 1970-01-01T00:00:00.000001Z |",
+ "| orange | 20-01 | ca | 40.0 | 1970-01-01T00:00:00.000001Z |",
+ "| orange | 20-02 | ca | 33.0 | 1970-01-01T00:00:00.000001Z |",
+ "| yolo | 30-01 | ca | 62.0 | 1970-01-01T00:00:00.000001Z |",
+ "+--------+-------+-------+-------+-----------------------------+",
],
},
// Predicate on county column, which is part of the series key:
TestCase {
predicates: &[Predicate::new("county", KeyValue::string("napa"))],
expected: &[
- "+-------+-------+-------+-----------------------------+",
- "| farm | state | speed | time |",
- "+-------+-------+-------+-----------------------------+",
- "| 10-01 | ca | 50.0 | 1970-01-01T00:00:00.000001Z |",
- "| 10-02 | ca | 49.0 | 1970-01-01T00:00:00.000001Z |",
- "+-------+-------+-------+-----------------------------+",
+ "+--------+-------+-------+-------+-----------------------------+",
+ "| county | farm | state | speed | time |",
+ "+--------+-------+-------+-------+-----------------------------+",
+ "| napa | 10-01 | ca | 50.0 | 1970-01-01T00:00:00.000001Z |",
+ "| napa | 10-02 | ca | 49.0 | 1970-01-01T00:00:00.000001Z |",
+ "+--------+-------+-------+-------+-----------------------------+",
],
},
// Predicate on farm column, which is part of the series key:
TestCase {
predicates: &[Predicate::new("farm", KeyValue::string("30-01"))],
expected: &[
- "+--------+-------+-------+-----------------------------+",
- "| county | state | speed | time |",
- "+--------+-------+-------+-----------------------------+",
- "| yolo | ca | 62.0 | 1970-01-01T00:00:00.000001Z |",
- "+--------+-------+-------+-----------------------------+",
+ "+--------+-------+-------+-------+-----------------------------+",
+ "| county | farm | state | speed | time |",
+ "+--------+-------+-------+-------+-----------------------------+",
+ "| yolo | 30-01 | ca | 62.0 | 1970-01-01T00:00:00.000001Z |",
+ "+--------+-------+-------+-------+-----------------------------+",
],
},
// Predicate on all series key columns:
@@ -2284,11 +2298,11 @@ mod tests {
Predicate::new("farm", KeyValue::string("40-01")),
],
expected: &[
- "+-------+-----------------------------+",
- "| speed | time |",
- "+-------+-----------------------------+",
- "| 66.0 | 1970-01-01T00:00:00.000001Z |",
- "+-------+-----------------------------+",
+ "+--------+-------+-------+-------+-----------------------------+",
+ "| county | farm | state | speed | time |",
+ "+--------+-------+-------+-------+-----------------------------+",
+ "| nevada | 40-01 | ca | 66.0 | 1970-01-01T00:00:00.000001Z |",
+ "+--------+-------+-------+-------+-----------------------------+",
],
},
];
@@ -2421,22 +2435,22 @@ mod tests {
TestCase {
predicates: &[Predicate::new("game_id", KeyValue::string("4"))],
expected: &[
- "+-----------+-----------------------------+------+------+",
- "| player | time | type | zone |",
- "+-----------+-----------------------------+------+------+",
- "| bobrovsky | 1970-01-01T00:00:00.000001Z | save | home |",
- "+-----------+-----------------------------+------+------+",
+ "+---------+-----------+-----------------------------+------+------+",
+ "| game_id | player | time | type | zone |",
+ "+---------+-----------+-----------------------------+------+------+",
+ "| 4 | bobrovsky | 1970-01-01T00:00:00.000001Z | save | home |",
+ "+---------+-----------+-----------------------------+------+------+",
],
},
// Cache that does not have a zone column will produce it with nulls:
TestCase {
predicates: &[Predicate::new("game_id", KeyValue::string("1"))],
expected: &[
- "+-----------+-----------------------------+------+------+",
- "| player | time | type | zone |",
- "+-----------+-----------------------------+------+------+",
- "| mackinnon | 1970-01-01T00:00:00.000001Z | shot | |",
- "+-----------+-----------------------------+------+------+",
+ "+---------+-----------+-----------------------------+------+------+",
+ "| game_id | player | time | type | zone |",
+ "+---------+-----------+-----------------------------+------+------+",
+ "| 1 | mackinnon | 1970-01-01T00:00:00.000001Z | shot | |",
+ "+---------+-----------+-----------------------------+------+------+",
],
},
// Pulling from multiple caches will fill in with nulls:
@@ -2545,31 +2559,31 @@ mod tests {
TestCase {
predicates: &[Predicate::new("t1", KeyValue::string("a"))],
expected: &[
- "+-----+--------------------------------+-----+-----+-----+",
- "| f1 | time | f2 | f3 | f4 |",
- "+-----+--------------------------------+-----+-----+-----+",
- "| 1.0 | 1970-01-01T00:00:00.000001500Z | 2.0 | 3.0 | 4.0 |",
- "+-----+--------------------------------+-----+-----+-----+",
+ "+----+-----+--------------------------------+-----+-----+-----+",
+ "| t1 | f1 | time | f2 | f3 | f4 |",
+ "+----+-----+--------------------------------+-----+-----+-----+",
+ "| a | 1.0 | 1970-01-01T00:00:00.000001500Z | 2.0 | 3.0 | 4.0 |",
+ "+----+-----+--------------------------------+-----+-----+-----+",
],
},
TestCase {
predicates: &[Predicate::new("t1", KeyValue::string("b"))],
expected: &[
- "+------+--------------------------------+----+------+------+",
- "| f1 | time | f2 | f3 | f4 |",
- "+------+--------------------------------+----+------+------+",
- "| 10.0 | 1970-01-01T00:00:00.000001500Z | | 30.0 | 40.0 |",
- "+------+--------------------------------+----+------+------+",
+ "+----+------+--------------------------------+----+------+------+",
+ "| t1 | f1 | time | f2 | f3 | f4 |",
+ "+----+------+--------------------------------+----+------+------+",
+ "| b | 10.0 | 1970-01-01T00:00:00.000001500Z | | 30.0 | 40.0 |",
+ "+----+------+--------------------------------+----+------+------+",
],
},
TestCase {
predicates: &[Predicate::new("t1", KeyValue::string("c"))],
expected: &[
- "+-------+--------------------------------+-------+-------+----+",
- "| f1 | time | f2 | f3 | f4 |",
- "+-------+--------------------------------+-------+-------+----+",
- "| 100.0 | 1970-01-01T00:00:00.000001500Z | 200.0 | 300.0 | |",
- "+-------+--------------------------------+-------+-------+----+",
+ "+----+-------+--------------------------------+-------+-------+----+",
+ "| t1 | f1 | time | f2 | f3 | f4 |",
+ "+----+-------+--------------------------------+-------+-------+----+",
+ "| c | 100.0 | 1970-01-01T00:00:00.000001500Z | 200.0 | 300.0 | |",
+ "+----+-------+--------------------------------+-------+-------+----+",
],
},
// Can query accross key column values:
diff --git a/influxdb3_write/src/last_cache/table_function.rs b/influxdb3_write/src/last_cache/table_function.rs
new file mode 100644
index 0000000000..094987eafc
--- /dev/null
+++ b/influxdb3_write/src/last_cache/table_function.rs
@@ -0,0 +1,117 @@
+use std::{any::Any, sync::Arc};
+
+use arrow::datatypes::SchemaRef;
+use async_trait::async_trait;
+use datafusion::{
+ common::{plan_err, Result},
+ datasource::{function::TableFunctionImpl, TableProvider, TableType},
+ execution::context::SessionState,
+ logical_expr::{Expr, TableProviderFilterPushDown},
+ physical_plan::{memory::MemoryExec, ExecutionPlan},
+ scalar::ScalarValue,
+};
+
+use super::LastCacheProvider;
+
+struct LastCacheFunctionProvider {
+ db_name: String,
+ table_name: String,
+ cache_name: String,
+ schema: SchemaRef,
+ provider: Arc<LastCacheProvider>,
+}
+
+#[async_trait]
+impl TableProvider for LastCacheFunctionProvider {
+ fn as_any(&self) -> &dyn Any {
+ self as &dyn Any
+ }
+
+ fn schema(&self) -> SchemaRef {
+ Arc::clone(&self.schema)
+ }
+
+ fn table_type(&self) -> TableType {
+ TableType::Temporary
+ }
+
+ fn supports_filters_pushdown(
+ &self,
+ filters: &[&Expr],
+ ) -> Result<Vec<TableProviderFilterPushDown>> {
+ Ok(vec![TableProviderFilterPushDown::Inexact; filters.len()])
+ }
+
+ async fn scan(
+ &self,
+ ctx: &SessionState,
+ projection: Option<&Vec<usize>>,
+ filters: &[Expr],
+ _limit: Option<usize>,
+ ) -> Result<Arc<dyn ExecutionPlan>> {
+ let read = self.provider.cache_map.read();
+ let batches = if let Some(cache) = read
+ .get(&self.db_name)
+ .and_then(|db| db.get(&self.table_name))
+ .and_then(|tbl| tbl.get(&self.cache_name))
+ {
+ let predicates = cache.convert_filter_exprs(filters);
+ cache.to_record_batches(&predicates)?
+ } else {
+ // If there is no cache, it means that it was removed, in which case, we just return
+ // an empty set of record batches.
+ vec![]
+ };
+ let mut exec = MemoryExec::try_new(&[batches], self.schema(), projection.cloned())?;
+
+ let show_sizes = ctx.config_options().explain.show_sizes;
+ exec = exec.with_show_sizes(show_sizes);
+
+ Ok(Arc::new(exec))
+ }
+}
+
+pub struct LastCacheFunction {
+ db_name: String,
+ provider: Arc<LastCacheProvider>,
+}
+
+impl LastCacheFunction {
+ pub fn new(db_name: impl Into<String>, provider: Arc<LastCacheProvider>) -> Self {
+ Self {
+ db_name: db_name.into(),
+ provider,
+ }
+ }
+}
+
+impl TableFunctionImpl for LastCacheFunction {
+ fn call(&self, args: &[Expr]) -> Result<Arc<dyn TableProvider>> {
+ let Some(Expr::Literal(ScalarValue::Utf8(Some(table_name)))) = args.first() else {
+ return plan_err!("first argument must be the table name as a string");
+ };
+
+ let cache_name = match args.get(1) {
+ Some(Expr::Literal(ScalarValue::Utf8(Some(name)))) => Some(name),
+ Some(_) => {
+ return plan_err!("second argument, if passed, must be the cache name as a string")
+ }
+ None => None,
+ };
+
+ match self.provider.get_cache_name_and_schema(
+ &self.db_name,
+ table_name,
+ cache_name.map(|x| x.as_str()),
+ ) {
+ Some((cache_name, schema)) => Ok(Arc::new(LastCacheFunctionProvider {
+ db_name: self.db_name.clone(),
+ table_name: table_name.clone(),
+ cache_name,
+ schema,
+ provider: Arc::clone(&self.provider),
+ })),
+ None => plan_err!("could not find cache for the given arguments"),
+ }
+ }
+}
diff --git a/influxdb3_write/src/lib.rs b/influxdb3_write/src/lib.rs
index 9155fab042..174e1fe685 100644
--- a/influxdb3_write/src/lib.rs
+++ b/influxdb3_write/src/lib.rs
@@ -9,7 +9,7 @@
pub mod cache;
pub mod catalog;
mod chunk;
-mod last_cache;
+pub mod last_cache;
pub mod paths;
pub mod persister;
pub mod wal;
|
1d440ddb2dcf6c53e12f9fb20c8e561f353f9b1f
|
Marco Neumann
|
2022-10-24 13:34:22
|
`IOxReadFilterNode` can always accumulate statistics (#5954)
|
* refactor: `IOxReadFilterNode` can always accumulate statistics
`IOxReadFilterNode` used to not emit statistics if one chunk has
duplicates or delete predicates. This is wrong (or at least overly
conservative), because the node itself (or the chunks themselves) do NOT
perform dedup or delete predicate filtering. Instead this is done is
done by parent nodes (`DeduplicateExec` and `FilterExec`) and its their
job to propagate statistics correctly.
Helps w/ #5897.
* test: explain setup
Co-authored-by: Andrew Lamb <[email protected]>
|
Co-authored-by: Andrew Lamb <[email protected]>
|
refactor: `IOxReadFilterNode` can always accumulate statistics (#5954)
* refactor: `IOxReadFilterNode` can always accumulate statistics
`IOxReadFilterNode` used to not emit statistics if one chunk has
duplicates or delete predicates. This is wrong (or at least overly
conservative), because the node itself (or the chunks themselves) do NOT
perform dedup or delete predicate filtering. Instead this is done is
done by parent nodes (`DeduplicateExec` and `FilterExec`) and its their
job to propagate statistics correctly.
Helps w/ #5897.
* test: explain setup
Co-authored-by: Andrew Lamb <[email protected]>
Co-authored-by: Andrew Lamb <[email protected]>
|
diff --git a/influxdb_iox/tests/end_to_end_cases/querier.rs b/influxdb_iox/tests/end_to_end_cases/querier.rs
index 38d22746e2..67879c99cc 100644
--- a/influxdb_iox/tests/end_to_end_cases/querier.rs
+++ b/influxdb_iox/tests/end_to_end_cases/querier.rs
@@ -164,7 +164,8 @@ async fn query_after_persist_sees_new_files() {
],
},
// write another parquet file
- Step::WriteLineProtocol(setup.lp_to_force_persistence()),
+ // that has non duplicated data
+ Step::WriteLineProtocol(setup.lp_to_force_persistence().replace("tag=A", "tag=B")),
Step::WaitForPersisted,
// query should correctly see the data in the second parquet file
Step::Query {
diff --git a/iox_query/src/provider/deduplicate.rs b/iox_query/src/provider/deduplicate.rs
index 57ff8cdca4..2f765e552d 100644
--- a/iox_query/src/provider/deduplicate.rs
+++ b/iox_query/src/provider/deduplicate.rs
@@ -237,15 +237,9 @@ impl ExecutionPlan for DeduplicateExec {
}
fn statistics(&self) -> Statistics {
- // TODO: we should acount for overlaps at this point -- if
- // there is overlap across the chunks, we probably can't
- // provide exact statistics without more work
- let is_exact = true;
-
- // for now, pass on the input statistics but note they can not
- // be exact
+ // use a guess from our input but they are NOT exact
Statistics {
- is_exact,
+ is_exact: false,
..self.input.statistics()
}
}
diff --git a/iox_query/src/provider/physical.rs b/iox_query/src/provider/physical.rs
index e283e1dfaa..1672dbc70c 100644
--- a/iox_query/src/provider/physical.rs
+++ b/iox_query/src/provider/physical.rs
@@ -174,12 +174,6 @@ impl ExecutionPlan for IOxReadFilterNode {
fn statistics(&self) -> Statistics {
let mut combined_summary_option: Option<TableSummary> = None;
for chunk in &self.chunks {
- if chunk.has_delete_predicates() || chunk.may_contain_pk_duplicates() {
- // Not use statistics if there is at least one delete predicate or
- // if chunk may have duplicates
- return Statistics::default();
- }
-
combined_summary_option = match combined_summary_option {
None => Some(
chunk
diff --git a/query_tests/cases/in/duplicates_ingester.expected b/query_tests/cases/in/duplicates_ingester.expected
index fcd47819ad..25cc6e96e8 100644
--- a/query_tests/cases/in/duplicates_ingester.expected
+++ b/query_tests/cases/in/duplicates_ingester.expected
@@ -83,3 +83,9 @@
| | IOxReadFilterNode: table_name=h2o, chunks=1 predicate=Predicate |
| | |
+---------------+---------------------------------------------------------------------------------+
+-- SQL: select count(*) from h2o;
++-----------------+
+| COUNT(UInt8(1)) |
++-----------------+
+| 18 |
++-----------------+
diff --git a/query_tests/cases/in/duplicates_ingester.sql b/query_tests/cases/in/duplicates_ingester.sql
index 9ea662b11f..c906978c7b 100644
--- a/query_tests/cases/in/duplicates_ingester.sql
+++ b/query_tests/cases/in/duplicates_ingester.sql
@@ -10,3 +10,6 @@ EXPLAIN select time, state, city, min_temp, max_temp, area from h2o;
-- Union plan
EXPLAIN select state as name from h2o UNION ALL select city as name from h2o;
+
+-- count(*) plan that ensures that row count statistics are not used (because we don't know how many rows overlap)
+select count(*) from h2o;
diff --git a/query_tests/cases/in/duplicates_parquet.expected b/query_tests/cases/in/duplicates_parquet.expected
index cfb8a814b9..29f0d42e2a 100644
--- a/query_tests/cases/in/duplicates_parquet.expected
+++ b/query_tests/cases/in/duplicates_parquet.expected
@@ -67,3 +67,9 @@
| | IOxReadFilterNode: table_name=h2o, chunks=2 predicate=Predicate |
| | |
+---------------+---------------------------------------------------------------------------------+
+-- SQL: select count(*) from h2o;
++-----------------+
+| COUNT(UInt8(1)) |
++-----------------+
+| 18 |
++-----------------+
diff --git a/query_tests/cases/in/duplicates_parquet.sql b/query_tests/cases/in/duplicates_parquet.sql
index 4b0e5f6fd0..b7f15e1b44 100644
--- a/query_tests/cases/in/duplicates_parquet.sql
+++ b/query_tests/cases/in/duplicates_parquet.sql
@@ -10,3 +10,6 @@ EXPLAIN select time, state, city, min_temp, max_temp, area from h2o;
-- Union plan
EXPLAIN select state as name from h2o UNION ALL select city as name from h2o;
+
+-- count(*) plan that ensures that row count statistics are not used (because we don't know how many rows overlap)
+select count(*) from h2o;
|
c03a5c7c14e408b61919bc1feb2b511a2cde7fa9
|
Marco Neumann
|
2023-04-06 12:01:39
|
tracing span hierarchy in querier (#7469)
|
The span for the individual chunk creations should be under
"create individual chunks".
| null |
fix: tracing span hierarchy in querier (#7469)
The span for the individual chunk creations should be under
"create individual chunks".
|
diff --git a/querier/src/parquet/creation.rs b/querier/src/parquet/creation.rs
index e378276c06..c3856cf312 100644
--- a/querier/src/parquet/creation.rs
+++ b/querier/src/parquet/creation.rs
@@ -137,7 +137,7 @@ impl ChunkAdapter {
}
{
- let _span_recorder = span_recorder.child("create individual chunks");
+ let span_recorder = span_recorder.child("create individual chunks");
futures::stream::iter(parquet_files)
.map(|cached_parquet_file| {
|
08cf71e0acb26c2cd2efd07343d2546bcab0efe4
|
Dom Dwyer
|
2023-02-09 12:38:28
|
move PersistService to new file
|
Separate the PersistService into it's own file.
| null |
refactor(proto): move PersistService to new file
Separate the PersistService into it's own file.
|
diff --git a/generated_types/build.rs b/generated_types/build.rs
index bd2c190598..a511146577 100644
--- a/generated_types/build.rs
+++ b/generated_types/build.rs
@@ -59,6 +59,7 @@ fn generate_grpc_types(root: &Path) -> Result<()> {
ingester_path.join("write_info.proto"),
ingester_path.join("write.proto"),
ingester_path.join("replication.proto"),
+ ingester_path.join("persist.proto"),
namespace_path.join("service.proto"),
object_store_path.join("service.proto"),
predicate_path.join("predicate.proto"),
diff --git a/generated_types/protos/influxdata/iox/ingester/v1/persist.proto b/generated_types/protos/influxdata/iox/ingester/v1/persist.proto
new file mode 100644
index 0000000000..2cd62022a2
--- /dev/null
+++ b/generated_types/protos/influxdata/iox/ingester/v1/persist.proto
@@ -0,0 +1,14 @@
+syntax = "proto3";
+package influxdata.iox.ingester.v1;
+option go_package = "github.com/influxdata/iox/ingester/v1";
+
+service PersistService {
+ rpc Persist(PersistRequest) returns (PersistResponse);
+}
+
+message PersistRequest {
+ // The namespace to persist
+ string namespace = 1;
+}
+
+message PersistResponse {}
diff --git a/generated_types/protos/influxdata/iox/ingester/v1/write.proto b/generated_types/protos/influxdata/iox/ingester/v1/write.proto
index 2737124889..2b2c4fe6d1 100644
--- a/generated_types/protos/influxdata/iox/ingester/v1/write.proto
+++ b/generated_types/protos/influxdata/iox/ingester/v1/write.proto
@@ -14,13 +14,3 @@ message WriteRequest {
message WriteResponse {}
-service PersistService {
- rpc Persist(PersistRequest) returns (PersistResponse);
-}
-
-message PersistRequest {
- // The namespace to persist
- string namespace = 1;
-}
-
-message PersistResponse {}
|
56ba3b17ded9ba0b6eb158d37318eeca6c4904d6
|
Carol (Nichols || Goulding)
|
2022-12-21 11:19:10
|
Allow partitions from ingesters to overlap in RPC write mode
|
This was added in c82d0d8ca6dc02dcdd40a4c656a1ee51f3f9bfee with the
comment:
> Right now this would clearly indicate a bug and before I am trying to
> understand some prod issues, I wanna rule that one out.
In the RPC write path, this isn't a bug, it's quite expected.
| null |
fix: Allow partitions from ingesters to overlap in RPC write mode
This was added in c82d0d8ca6dc02dcdd40a4c656a1ee51f3f9bfee with the
comment:
> Right now this would clearly indicate a bug and before I am trying to
> understand some prod issues, I wanna rule that one out.
In the RPC write path, this isn't a bug, it's quite expected.
|
diff --git a/querier/src/table/mod.rs b/querier/src/table/mod.rs
index 0c8b5fc86a..bd923c28ed 100644
--- a/querier/src/table/mod.rs
+++ b/querier/src/table/mod.rs
@@ -478,19 +478,21 @@ impl QuerierTable {
let partitions = partitions_result?;
- // check that partitions from ingesters don't overlap
- let mut seen = HashMap::with_capacity(partitions.len());
- for partition in &partitions {
- match seen.entry(partition.partition_id()) {
- Entry::Occupied(o) => {
- return Err(Error::IngestersOverlap {
- ingester1: Arc::clone(o.get()),
- ingester2: Arc::clone(partition.ingester()),
- partition: partition.partition_id(),
- })
- }
- Entry::Vacant(v) => {
- v.insert(Arc::clone(partition.ingester()));
+ if !self.rpc_write() {
+ // check that partitions from ingesters don't overlap
+ let mut seen = HashMap::with_capacity(partitions.len());
+ for partition in &partitions {
+ match seen.entry(partition.partition_id()) {
+ Entry::Occupied(o) => {
+ return Err(Error::IngestersOverlap {
+ ingester1: Arc::clone(o.get()),
+ ingester2: Arc::clone(partition.ingester()),
+ partition: partition.partition_id(),
+ })
+ }
+ Entry::Vacant(v) => {
+ v.insert(Arc::clone(partition.ingester()));
+ }
}
}
}
|
525c48de2c758ac406e1b5deee68982fa9ba95ac
|
Andrew Lamb
|
2023-03-01 20:52:04
|
add invariant checks to the compactor tests (#7096)
|
* feat(compactor2): adding invariant checks to the compactor tests
* fix: Update tests
* fix: remove uneeded change
* fix: filter out deleted files from invariant checks
| null |
feat(compactor2): add invariant checks to the compactor tests (#7096)
* feat(compactor2): adding invariant checks to the compactor tests
* fix: Update tests
* fix: remove uneeded change
* fix: filter out deleted files from invariant checks
|
diff --git a/compactor2/tests/layouts/core.rs b/compactor2/tests/layouts/core.rs
index 5b0dcb6c4e..4fb4967ade 100644
--- a/compactor2/tests/layouts/core.rs
+++ b/compactor2/tests/layouts/core.rs
@@ -139,7 +139,7 @@ async fn l1_with_overlapping_l0() {
.create_parquet_file(
parquet_builder()
.with_min_time(50 + i * 50)
- .with_max_time(100 + i * 50)
+ .with_max_time(99 + i * 50)
.with_compaction_level(CompactionLevel::FileNonOverlapped)
.with_file_size_bytes(10 * ONE_MB),
)
@@ -170,8 +170,8 @@ async fn l1_with_overlapping_l0() {
- "L0.6[230,280] 5kb |----L0.6-----| "
- "L0.7[260,310] 5kb |----L0.7-----| "
- "L1 "
- - "L1.1[50,100] 10mb |----L1.1-----| "
- - "L1.2[100,150] 10mb |----L1.2-----| "
+ - "L1.1[50,99] 10mb |----L1.1-----| "
+ - "L1.2[100,149] 10mb |----L1.2-----| "
- "**** Simulation run 0, type=compact. 6 Input Files, 10.02mb total:"
- "L0 "
- "L0.7[260,310] 5kb |------L0.7-------| "
@@ -180,7 +180,7 @@ async fn l1_with_overlapping_l0() {
- "L0.4[170,220] 5kb |------L0.4-------| "
- "L0.3[140,190] 5kb |------L0.3-------| "
- "L1 "
- - "L1.2[100,150] 10mb |------L1.2-------| "
+ - "L1.2[100,149] 10mb |------L1.2------| "
- "**** 1 Output Files (parquet_file_id not yet assigned), 10.02mb total:"
- "L1, all files 10.02mb "
- "L1.?[100,310] |-------------------------------------L1.?-------------------------------------|"
@@ -189,7 +189,7 @@ async fn l1_with_overlapping_l0() {
- " Creating 1 files at level CompactionLevel::L1"
- "**** Final Output Files "
- "L1 "
- - "L1.1[50,100] 10mb |----L1.1-----| "
+ - "L1.1[50,99] 10mb |----L1.1-----| "
- "L1.8[100,310] 10.02mb |-----------------------------L1.8-----------------------------| "
"###
);
@@ -213,7 +213,7 @@ async fn l1_with_non_overlapping_l0() {
.create_parquet_file(
parquet_builder()
.with_min_time(50 + i * 50)
- .with_max_time(100 + i * 50)
+ .with_max_time(99 + i * 50)
.with_compaction_level(CompactionLevel::FileNonOverlapped)
.with_file_size_bytes(10 * ONE_MB),
)
@@ -243,8 +243,8 @@ async fn l1_with_non_overlapping_l0() {
- "L0.6[450,500] 5kb |-L0.6-| "
- "L0.7[500,550] 5kb |-L0.7-|"
- "L1 "
- - "L1.1[50,100] 10mb |-L1.1-| "
- - "L1.2[100,150] 10mb |-L1.2-| "
+ - "L1.1[50,99] 10mb |L1.1-| "
+ - "L1.2[100,149] 10mb |L1.2-| "
- "**** Simulation run 0, type=compact. 5 Input Files, 25kb total:"
- "L0, all files 5kb "
- "L0.7[500,550] |-----L0.7-----|"
@@ -260,8 +260,8 @@ async fn l1_with_non_overlapping_l0() {
- " Creating 1 files at level CompactionLevel::L1"
- "**** Final Output Files "
- "L1 "
- - "L1.1[50,100] 10mb |-L1.1-| "
- - "L1.2[100,150] 10mb |-L1.2-| "
+ - "L1.1[50,99] 10mb |L1.1-| "
+ - "L1.2[100,149] 10mb |L1.2-| "
- "L1.8[300,550] 25kb |-----------------L1.8-----------------|"
"###
);
@@ -285,12 +285,13 @@ async fn l1_with_non_overlapping_l0_larger() {
.create_parquet_file(
parquet_builder()
.with_min_time(50 + i * 50)
- .with_max_time(100 + i * 50)
+ .with_max_time(99 + i * 50)
.with_compaction_level(CompactionLevel::FileNonOverlapped)
.with_file_size_bytes(sz * ONE_MB),
)
.await;
}
+ // L0 overlapping
for i in 0..3 {
setup
.partition
@@ -313,10 +314,10 @@ async fn l1_with_non_overlapping_l0_larger() {
- "L0.6[350,400] 5mb |--L0.6--| "
- "L0.7[400,450] 5mb |--L0.7--|"
- "L1 "
- - "L1.1[50,100] 20mb |--L1.1--| "
- - "L1.2[100,150] 50mb |--L1.2--| "
- - "L1.3[150,200] 20mb |--L1.3--| "
- - "L1.4[200,250] 3mb |--L1.4--| "
+ - "L1.1[50,99] 20mb |-L1.1--| "
+ - "L1.2[100,149] 50mb |-L1.2--| "
+ - "L1.3[150,199] 20mb |-L1.3--| "
+ - "L1.4[200,249] 3mb |-L1.4--| "
- "**** Simulation run 0, type=compact. 3 Input Files, 15mb total:"
- "L0, all files 5mb "
- "L0.7[400,450] |----------L0.7----------| "
@@ -330,10 +331,10 @@ async fn l1_with_non_overlapping_l0_larger() {
- " Creating 1 files at level CompactionLevel::L1"
- "**** Simulation run 1, type=split(split_times=[370]). 5 Input Files, 108mb total:"
- "L1 "
- - "L1.4[200,250] 3mb |--L1.4--| "
- - "L1.3[150,200] 20mb |--L1.3--| "
- - "L1.2[100,150] 50mb |--L1.2--| "
- - "L1.1[50,100] 20mb |--L1.1--| "
+ - "L1.4[200,249] 3mb |-L1.4--| "
+ - "L1.3[150,199] 20mb |-L1.3--| "
+ - "L1.2[100,149] 50mb |-L1.2--| "
+ - "L1.1[50,99] 20mb |-L1.1--| "
- "L1.8[300,450] 15mb |------------L1.8------------|"
- "**** 2 Output Files (parquet_file_id not yet assigned), 108mb total:"
- "L2 "
@@ -370,7 +371,7 @@ async fn l1_too_much_with_non_overlapping_l0() {
.create_parquet_file(
parquet_builder()
.with_min_time(50 + i * 50)
- .with_max_time(100 + i * 50)
+ .with_max_time(99 + i * 50)
.with_compaction_level(CompactionLevel::FileNonOverlapped)
.with_file_size_bytes(sz * ONE_MB),
)
@@ -399,16 +400,16 @@ async fn l1_too_much_with_non_overlapping_l0() {
- "L0.12[600,650] 5mb |L0.12|"
- "L0.13[600,650] 5mb |L0.13|"
- "L1 "
- - "L1.1[50,100] 90mb |L1.1| "
- - "L1.2[100,150] 80mb |L1.2| "
- - "L1.3[150,200] 70mb |L1.3| "
- - "L1.4[200,250] 70mb |L1.4| "
- - "L1.5[250,300] 70mb |L1.5| "
- - "L1.6[300,350] 70mb |L1.6| "
- - "L1.7[350,400] 70mb |L1.7| "
- - "L1.8[400,450] 70mb |L1.8| "
- - "L1.9[450,500] 70mb |L1.9| "
- - "L1.10[500,550] 70mb |L1.10| "
+ - "L1.1[50,99] 90mb |L1.1| "
+ - "L1.2[100,149] 80mb |L1.2| "
+ - "L1.3[150,199] 70mb |L1.3| "
+ - "L1.4[200,249] 70mb |L1.4| "
+ - "L1.5[250,299] 70mb |L1.5| "
+ - "L1.6[300,349] 70mb |L1.6| "
+ - "L1.7[350,399] 70mb |L1.7| "
+ - "L1.8[400,449] 70mb |L1.8| "
+ - "L1.9[450,499] 70mb |L1.9| "
+ - "L1.10[500,549] 70mb |L1.10| "
- "**** Simulation run 0, type=compact. 3 Input Files, 15mb total:"
- "L0, all files 5mb "
- "L0.13[600,650] |------------------------------------L0.13-------------------------------------|"
@@ -423,16 +424,16 @@ async fn l1_too_much_with_non_overlapping_l0() {
- "SKIPPED COMPACTION for PartitionId(1): partition 1 has 781189120 parquet file bytes, limit is 268435456"
- "**** Final Output Files "
- "L1 "
- - "L1.1[50,100] 90mb |L1.1| "
- - "L1.2[100,150] 80mb |L1.2| "
- - "L1.3[150,200] 70mb |L1.3| "
- - "L1.4[200,250] 70mb |L1.4| "
- - "L1.5[250,300] 70mb |L1.5| "
- - "L1.6[300,350] 70mb |L1.6| "
- - "L1.7[350,400] 70mb |L1.7| "
- - "L1.8[400,450] 70mb |L1.8| "
- - "L1.9[450,500] 70mb |L1.9| "
- - "L1.10[500,550] 70mb |L1.10| "
+ - "L1.1[50,99] 90mb |L1.1| "
+ - "L1.2[100,149] 80mb |L1.2| "
+ - "L1.3[150,199] 70mb |L1.3| "
+ - "L1.4[200,249] 70mb |L1.4| "
+ - "L1.5[250,299] 70mb |L1.5| "
+ - "L1.6[300,349] 70mb |L1.6| "
+ - "L1.7[350,399] 70mb |L1.7| "
+ - "L1.8[400,449] 70mb |L1.8| "
+ - "L1.9[450,499] 70mb |L1.9| "
+ - "L1.10[500,549] 70mb |L1.10| "
- "L1.14[600,650] 15mb |L1.14|"
"###
);
@@ -457,7 +458,7 @@ async fn many_l1_with_non_overlapping_l0() {
.create_parquet_file(
parquet_builder()
.with_min_time(50 + i * 50)
- .with_max_time(100 + i * 50)
+ .with_max_time(99 + i * 50)
.with_compaction_level(CompactionLevel::FileNonOverlapped)
.with_file_size_bytes(sz * ONE_MB),
)
@@ -486,16 +487,16 @@ async fn many_l1_with_non_overlapping_l0() {
- "L0.12[600,650] 5mb |L0.12|"
- "L0.13[600,650] 5mb |L0.13|"
- "L1 "
- - "L1.1[50,100] 9mb |L1.1| "
- - "L1.2[100,150] 8mb |L1.2| "
- - "L1.3[150,200] 7mb |L1.3| "
- - "L1.4[200,250] 7mb |L1.4| "
- - "L1.5[250,300] 7mb |L1.5| "
- - "L1.6[300,350] 7mb |L1.6| "
- - "L1.7[350,400] 7mb |L1.7| "
- - "L1.8[400,450] 7mb |L1.8| "
- - "L1.9[450,500] 7mb |L1.9| "
- - "L1.10[500,550] 7mb |L1.10| "
+ - "L1.1[50,99] 9mb |L1.1| "
+ - "L1.2[100,149] 8mb |L1.2| "
+ - "L1.3[150,199] 7mb |L1.3| "
+ - "L1.4[200,249] 7mb |L1.4| "
+ - "L1.5[250,299] 7mb |L1.5| "
+ - "L1.6[300,349] 7mb |L1.6| "
+ - "L1.7[350,399] 7mb |L1.7| "
+ - "L1.8[400,449] 7mb |L1.8| "
+ - "L1.9[450,499] 7mb |L1.9| "
+ - "L1.10[500,549] 7mb |L1.10| "
- "**** Simulation run 0, type=compact. 3 Input Files, 15mb total:"
- "L0, all files 5mb "
- "L0.13[600,650] |------------------------------------L0.13-------------------------------------|"
@@ -509,16 +510,16 @@ async fn many_l1_with_non_overlapping_l0() {
- " Creating 1 files at level CompactionLevel::L1"
- "**** Simulation run 1, type=split(split_times=[530]). 11 Input Files, 88mb total:"
- "L1 "
- - "L1.10[500,550] 7mb |L1.10| "
- - "L1.9[450,500] 7mb |L1.9| "
- - "L1.8[400,450] 7mb |L1.8| "
- - "L1.7[350,400] 7mb |L1.7| "
- - "L1.6[300,350] 7mb |L1.6| "
- - "L1.5[250,300] 7mb |L1.5| "
- - "L1.4[200,250] 7mb |L1.4| "
- - "L1.3[150,200] 7mb |L1.3| "
- - "L1.2[100,150] 8mb |L1.2| "
- - "L1.1[50,100] 9mb |L1.1| "
+ - "L1.10[500,549] 7mb |L1.10| "
+ - "L1.9[450,499] 7mb |L1.9| "
+ - "L1.8[400,449] 7mb |L1.8| "
+ - "L1.7[350,399] 7mb |L1.7| "
+ - "L1.6[300,349] 7mb |L1.6| "
+ - "L1.5[250,299] 7mb |L1.5| "
+ - "L1.4[200,249] 7mb |L1.4| "
+ - "L1.3[150,199] 7mb |L1.3| "
+ - "L1.2[100,149] 8mb |L1.2| "
+ - "L1.1[50,99] 9mb |L1.1| "
- "L1.14[600,650] 15mb |L1.14|"
- "**** 2 Output Files (parquet_file_id not yet assigned), 88mb total:"
- "L2 "
@@ -554,7 +555,7 @@ async fn large_l1_with_non_overlapping_l0() {
.create_parquet_file(
parquet_builder()
.with_min_time(50 + i * 50)
- .with_max_time(100 + i * 50)
+ .with_max_time(99 + i * 50)
.with_compaction_level(CompactionLevel::FileNonOverlapped)
.with_file_size_bytes(sz * ONE_MB),
)
@@ -583,8 +584,8 @@ async fn large_l1_with_non_overlapping_l0() {
- "L0.4[600,650] 5mb |L0.4| "
- "L0.5[600,650] 5mb |L0.5| "
- "L1 "
- - "L1.1[50,100] 90mb |L1.1| "
- - "L1.2[100,150] 80mb |L1.2| "
+ - "L1.1[50,99] 90mb |L1.1| "
+ - "L1.2[100,149] 80mb |L1.2| "
- "**** Simulation run 0, type=compact. 3 Input Files, 15mb total:"
- "L0, all files 5mb "
- "L0.5[600,650] |-------------------------------------L0.5-------------------------------------|"
@@ -598,8 +599,8 @@ async fn large_l1_with_non_overlapping_l0() {
- " Creating 1 files at level CompactionLevel::L1"
- "**** Simulation run 1, type=split(split_times=[375]). 3 Input Files, 185mb total:"
- "L1 "
- - "L1.2[100,150] 80mb |L1.2| "
- - "L1.1[50,100] 90mb |L1.1| "
+ - "L1.2[100,149] 80mb |L1.2| "
+ - "L1.1[50,99] 90mb |L1.1| "
- "L1.6[600,650] 15mb |L1.6| "
- "**** 2 Output Files (parquet_file_id not yet assigned), 185mb total:"
- "L2 "
diff --git a/compactor2/tests/layouts/many_files.rs b/compactor2/tests/layouts/many_files.rs
index 1970297f40..7d10816b72 100644
--- a/compactor2/tests/layouts/many_files.rs
+++ b/compactor2/tests/layouts/many_files.rs
@@ -18,8 +18,8 @@ async fn many_l1_files() {
.partition
.create_parquet_file(
parquet_builder()
- .with_min_time(i)
- .with_max_time(i + 1)
+ .with_min_time(i * 2)
+ .with_max_time(i * 2 + 1)
.with_compaction_level(CompactionLevel::FileNonOverlapped)
.with_file_size_bytes(10 * ONE_MB),
)
@@ -44,75 +44,76 @@ async fn many_l1_files() {
---
- "**** Input Files "
- "L0 "
- - "L0.21[24,25] 1mb |L0.21|"
- - "L0.22[24,25] 1mb |L0.22|"
- - "L0.23[24,25] 1mb |L0.23|"
+ - "L0.21[24,25] 1mb |L0.21| "
+ - "L0.22[24,25] 1mb |L0.22| "
+ - "L0.23[24,25] 1mb |L0.23| "
- "L1 "
- "L1.1[0,1] 10mb |L1.1| "
- - "L1.2[1,2] 10mb |L1.2| "
- - "L1.3[2,3] 10mb |L1.3| "
- - "L1.4[3,4] 10mb |L1.4| "
- - "L1.5[4,5] 10mb |L1.5| "
- - "L1.6[5,6] 10mb |L1.6| "
- - "L1.7[6,7] 10mb |L1.7| "
- - "L1.8[7,8] 10mb |L1.8| "
- - "L1.9[8,9] 10mb |L1.9| "
- - "L1.10[9,10] 10mb |L1.10| "
- - "L1.11[10,11] 10mb |L1.11| "
- - "L1.12[11,12] 10mb |L1.12| "
- - "L1.13[12,13] 10mb |L1.13| "
- - "L1.14[13,14] 10mb |L1.14| "
- - "L1.15[14,15] 10mb |L1.15| "
- - "L1.16[15,16] 10mb |L1.16| "
- - "L1.17[16,17] 10mb |L1.17| "
- - "L1.18[17,18] 10mb |L1.18| "
- - "L1.19[18,19] 10mb |L1.19| "
- - "L1.20[19,20] 10mb |L1.20| "
- - "**** Simulation run 0, type=compact. 3 Input Files, 3mb total:"
- - "L0, all files 1mb "
- - "L0.23[24,25] |------------------------------------L0.23-------------------------------------|"
- - "L0.22[24,25] |------------------------------------L0.22-------------------------------------|"
- - "L0.21[24,25] |------------------------------------L0.21-------------------------------------|"
- - "**** 1 Output Files (parquet_file_id not yet assigned), 3mb total:"
- - "L1, all files 3mb "
+ - "L1.2[2,3] 10mb |L1.2| "
+ - "L1.3[4,5] 10mb |L1.3| "
+ - "L1.4[6,7] 10mb |L1.4| "
+ - "L1.5[8,9] 10mb |L1.5| "
+ - "L1.6[10,11] 10mb |L1.6| "
+ - "L1.7[12,13] 10mb |L1.7| "
+ - "L1.8[14,15] 10mb |L1.8| "
+ - "L1.9[16,17] 10mb |L1.9| "
+ - "L1.10[18,19] 10mb |L1.10| "
+ - "L1.11[20,21] 10mb |L1.11| "
+ - "L1.12[22,23] 10mb |L1.12| "
+ - "L1.13[24,25] 10mb |L1.13| "
+ - "L1.14[26,27] 10mb |L1.14| "
+ - "L1.15[28,29] 10mb |L1.15| "
+ - "L1.16[30,31] 10mb |L1.16| "
+ - "L1.17[32,33] 10mb |L1.17| "
+ - "L1.18[34,35] 10mb |L1.18| "
+ - "L1.19[36,37] 10mb |L1.19|"
+ - "L1.20[38,39] 10mb |L1.20|"
+ - "**** Simulation run 0, type=compact. 4 Input Files, 13mb total:"
+ - "L0 "
+ - "L0.23[24,25] 1mb |------------------------------------L0.23-------------------------------------|"
+ - "L0.22[24,25] 1mb |------------------------------------L0.22-------------------------------------|"
+ - "L0.21[24,25] 1mb |------------------------------------L0.21-------------------------------------|"
+ - "L1 "
+ - "L1.13[24,25] 10mb |------------------------------------L1.13-------------------------------------|"
+ - "**** 1 Output Files (parquet_file_id not yet assigned), 13mb total:"
+ - "L1, all files 13mb "
- "L1.?[24,25] |-------------------------------------L1.?-------------------------------------|"
- "Committing partition 1:"
- - " Soft Deleting 3 files: L0.21, L0.22, L0.23"
+ - " Soft Deleting 4 files: L1.13, L0.21, L0.22, L0.23"
- " Creating 1 files at level CompactionLevel::L1"
- - "**** Simulation run 1, type=split(split_times=[13]). 21 Input Files, 203mb total:"
+ - "**** Simulation run 1, type=split(split_times=[20]). 20 Input Files, 203mb total:"
- "L1 "
- - "L1.20[19,20] 10mb |L1.20| "
- - "L1.19[18,19] 10mb |L1.19| "
- - "L1.18[17,18] 10mb |L1.18| "
- - "L1.17[16,17] 10mb |L1.17| "
- - "L1.16[15,16] 10mb |L1.16| "
- - "L1.15[14,15] 10mb |L1.15| "
- - "L1.14[13,14] 10mb |L1.14| "
- - "L1.13[12,13] 10mb |L1.13| "
- - "L1.12[11,12] 10mb |L1.12| "
- - "L1.11[10,11] 10mb |L1.11| "
- - "L1.10[9,10] 10mb |L1.10| "
- - "L1.9[8,9] 10mb |L1.9| "
- - "L1.8[7,8] 10mb |L1.8| "
- - "L1.7[6,7] 10mb |L1.7| "
- - "L1.6[5,6] 10mb |L1.6| "
- - "L1.5[4,5] 10mb |L1.5| "
- - "L1.4[3,4] 10mb |L1.4| "
- - "L1.3[2,3] 10mb |L1.3| "
- - "L1.2[1,2] 10mb |L1.2| "
+ - "L1.14[26,27] 10mb |L1.14| "
+ - "L1.15[28,29] 10mb |L1.15| "
+ - "L1.16[30,31] 10mb |L1.16| "
+ - "L1.17[32,33] 10mb |L1.17| "
+ - "L1.18[34,35] 10mb |L1.18| "
+ - "L1.19[36,37] 10mb |L1.19|"
+ - "L1.20[38,39] 10mb |L1.20|"
+ - "L1.12[22,23] 10mb |L1.12| "
+ - "L1.11[20,21] 10mb |L1.11| "
+ - "L1.10[18,19] 10mb |L1.10| "
+ - "L1.9[16,17] 10mb |L1.9| "
+ - "L1.8[14,15] 10mb |L1.8| "
+ - "L1.7[12,13] 10mb |L1.7| "
+ - "L1.6[10,11] 10mb |L1.6| "
+ - "L1.5[8,9] 10mb |L1.5| "
+ - "L1.4[6,7] 10mb |L1.4| "
+ - "L1.3[4,5] 10mb |L1.3| "
+ - "L1.2[2,3] 10mb |L1.2| "
- "L1.1[0,1] 10mb |L1.1| "
- - "L1.24[24,25] 3mb |L1.24|"
+ - "L1.24[24,25] 13mb |L1.24| "
- "**** 2 Output Files (parquet_file_id not yet assigned), 203mb total:"
- "L2 "
- - "L2.?[0,13] 105.56mb |-----------------L2.?------------------| "
- - "L2.?[13,25] 97.44mb |----------------L2.?----------------| "
+ - "L2.?[0,20] 104.1mb |-----------------L2.?------------------| "
+ - "L2.?[20,39] 98.9mb |----------------L2.?----------------| "
- "Committing partition 1:"
- - " Soft Deleting 21 files: L1.1, L1.2, L1.3, L1.4, L1.5, L1.6, L1.7, L1.8, L1.9, L1.10, L1.11, L1.12, L1.13, L1.14, L1.15, L1.16, L1.17, L1.18, L1.19, L1.20, L1.24"
+ - " Soft Deleting 20 files: L1.1, L1.2, L1.3, L1.4, L1.5, L1.6, L1.7, L1.8, L1.9, L1.10, L1.11, L1.12, L1.14, L1.15, L1.16, L1.17, L1.18, L1.19, L1.20, L1.24"
- " Creating 2 files at level CompactionLevel::L2"
- "**** Final Output Files "
- "L2 "
- - "L2.25[0,13] 105.56mb|-----------------L2.25-----------------| "
- - "L2.26[13,25] 97.44mb |---------------L2.26----------------| "
+ - "L2.25[0,20] 104.1mb |-----------------L2.25-----------------| "
+ - "L2.26[20,39] 98.9mb |---------------L2.26----------------| "
"###
);
}
@@ -1677,8 +1678,8 @@ async fn many_tiny_l1_files() {
.partition
.create_parquet_file(
parquet_builder()
- .with_min_time(i)
- .with_max_time(i + 1)
+ .with_min_time(2 * i)
+ .with_max_time(2 * i + 1)
.with_compaction_level(CompactionLevel::FileNonOverlapped)
.with_file_size_bytes(seven_kb),
)
@@ -1692,601 +1693,601 @@ async fn many_tiny_l1_files() {
- "**** Input Files "
- "L1, all files 7kb "
- "L1.1[0,1] |L1.1| "
- - "L1.2[1,2] |L1.2| "
- - "L1.3[2,3] |L1.3| "
- - "L1.4[3,4] |L1.4| "
- - "L1.5[4,5] |L1.5| "
- - "L1.6[5,6] |L1.6| "
- - "L1.7[6,7] |L1.7| "
- - "L1.8[7,8] |L1.8| "
- - "L1.9[8,9] |L1.9| "
- - "L1.10[9,10] |L1.10| "
- - "L1.11[10,11] |L1.11| "
- - "L1.12[11,12] |L1.12| "
- - "L1.13[12,13] |L1.13| "
- - "L1.14[13,14] |L1.14| "
- - "L1.15[14,15] |L1.15| "
- - "L1.16[15,16] |L1.16| "
- - "L1.17[16,17] |L1.17| "
- - "L1.18[17,18] |L1.18| "
- - "L1.19[18,19] |L1.19| "
- - "L1.20[19,20] |L1.20| "
- - "L1.21[20,21] |L1.21| "
- - "L1.22[21,22] |L1.22| "
- - "L1.23[22,23] |L1.23| "
- - "L1.24[23,24] |L1.24| "
- - "L1.25[24,25] |L1.25| "
- - "L1.26[25,26] |L1.26| "
- - "L1.27[26,27] |L1.27| "
- - "L1.28[27,28] |L1.28| "
- - "L1.29[28,29] |L1.29| "
- - "L1.30[29,30] |L1.30| "
- - "L1.31[30,31] |L1.31| "
- - "L1.32[31,32] |L1.32| "
- - "L1.33[32,33] |L1.33| "
- - "L1.34[33,34] |L1.34| "
- - "L1.35[34,35] |L1.35| "
- - "L1.36[35,36] |L1.36| "
- - "L1.37[36,37] |L1.37| "
- - "L1.38[37,38] |L1.38| "
- - "L1.39[38,39] |L1.39| "
- - "L1.40[39,40] |L1.40| "
- - "L1.41[40,41] |L1.41| "
- - "L1.42[41,42] |L1.42| "
- - "L1.43[42,43] |L1.43| "
- - "L1.44[43,44] |L1.44| "
- - "L1.45[44,45] |L1.45| "
- - "L1.46[45,46] |L1.46| "
- - "L1.47[46,47] |L1.47| "
- - "L1.48[47,48] |L1.48| "
- - "L1.49[48,49] |L1.49| "
- - "L1.50[49,50] |L1.50| "
- - "L1.51[50,51] |L1.51| "
- - "L1.52[51,52] |L1.52| "
- - "L1.53[52,53] |L1.53| "
- - "L1.54[53,54] |L1.54| "
- - "L1.55[54,55] |L1.55| "
- - "L1.56[55,56] |L1.56| "
- - "L1.57[56,57] |L1.57| "
- - "L1.58[57,58] |L1.58| "
- - "L1.59[58,59] |L1.59| "
- - "L1.60[59,60] |L1.60| "
- - "L1.61[60,61] |L1.61| "
- - "L1.62[61,62] |L1.62| "
- - "L1.63[62,63] |L1.63| "
- - "L1.64[63,64] |L1.64| "
- - "L1.65[64,65] |L1.65| "
- - "L1.66[65,66] |L1.66| "
- - "L1.67[66,67] |L1.67| "
- - "L1.68[67,68] |L1.68| "
- - "L1.69[68,69] |L1.69| "
- - "L1.70[69,70] |L1.70| "
- - "L1.71[70,71] |L1.71| "
- - "L1.72[71,72] |L1.72| "
- - "L1.73[72,73] |L1.73| "
- - "L1.74[73,74] |L1.74| "
- - "L1.75[74,75] |L1.75| "
- - "L1.76[75,76] |L1.76| "
- - "L1.77[76,77] |L1.77| "
- - "L1.78[77,78] |L1.78| "
- - "L1.79[78,79] |L1.79| "
- - "L1.80[79,80] |L1.80| "
- - "L1.81[80,81] |L1.81| "
- - "L1.82[81,82] |L1.82| "
- - "L1.83[82,83] |L1.83| "
- - "L1.84[83,84] |L1.84| "
- - "L1.85[84,85] |L1.85| "
- - "L1.86[85,86] |L1.86| "
- - "L1.87[86,87] |L1.87| "
- - "L1.88[87,88] |L1.88| "
- - "L1.89[88,89] |L1.89| "
- - "L1.90[89,90] |L1.90| "
- - "L1.91[90,91] |L1.91| "
- - "L1.92[91,92] |L1.92| "
- - "L1.93[92,93] |L1.93| "
- - "L1.94[93,94] |L1.94| "
- - "L1.95[94,95] |L1.95| "
- - "L1.96[95,96] |L1.96| "
- - "L1.97[96,97] |L1.97| "
- - "L1.98[97,98] |L1.98| "
- - "L1.99[98,99] |L1.99| "
- - "L1.100[99,100] |L1.100| "
- - "L1.101[100,101] |L1.101| "
- - "L1.102[101,102] |L1.102| "
- - "L1.103[102,103] |L1.103| "
- - "L1.104[103,104] |L1.104| "
- - "L1.105[104,105] |L1.105| "
- - "L1.106[105,106] |L1.106| "
- - "L1.107[106,107] |L1.107| "
- - "L1.108[107,108] |L1.108| "
- - "L1.109[108,109] |L1.109| "
- - "L1.110[109,110] |L1.110| "
- - "L1.111[110,111] |L1.111| "
- - "L1.112[111,112] |L1.112| "
- - "L1.113[112,113] |L1.113| "
- - "L1.114[113,114] |L1.114| "
- - "L1.115[114,115] |L1.115| "
- - "L1.116[115,116] |L1.116| "
- - "L1.117[116,117] |L1.117| "
- - "L1.118[117,118] |L1.118| "
- - "L1.119[118,119] |L1.119| "
- - "L1.120[119,120] |L1.120| "
- - "L1.121[120,121] |L1.121| "
- - "L1.122[121,122] |L1.122| "
- - "L1.123[122,123] |L1.123| "
- - "L1.124[123,124] |L1.124| "
- - "L1.125[124,125] |L1.125| "
- - "L1.126[125,126] |L1.126| "
- - "L1.127[126,127] |L1.127| "
- - "L1.128[127,128] |L1.128| "
- - "L1.129[128,129] |L1.129| "
- - "L1.130[129,130] |L1.130| "
- - "L1.131[130,131] |L1.131| "
- - "L1.132[131,132] |L1.132| "
- - "L1.133[132,133] |L1.133| "
- - "L1.134[133,134] |L1.134| "
- - "L1.135[134,135] |L1.135| "
- - "L1.136[135,136] |L1.136| "
- - "L1.137[136,137] |L1.137| "
- - "L1.138[137,138] |L1.138| "
- - "L1.139[138,139] |L1.139| "
- - "L1.140[139,140] |L1.140| "
- - "L1.141[140,141] |L1.141| "
- - "L1.142[141,142] |L1.142| "
- - "L1.143[142,143] |L1.143| "
- - "L1.144[143,144] |L1.144| "
- - "L1.145[144,145] |L1.145| "
- - "L1.146[145,146] |L1.146| "
- - "L1.147[146,147] |L1.147| "
- - "L1.148[147,148] |L1.148| "
- - "L1.149[148,149] |L1.149| "
- - "L1.150[149,150] |L1.150| "
- - "L1.151[150,151] |L1.151| "
- - "L1.152[151,152] |L1.152| "
- - "L1.153[152,153] |L1.153| "
- - "L1.154[153,154] |L1.154| "
- - "L1.155[154,155] |L1.155| "
- - "L1.156[155,156] |L1.156| "
- - "L1.157[156,157] |L1.157| "
- - "L1.158[157,158] |L1.158| "
- - "L1.159[158,159] |L1.159| "
- - "L1.160[159,160] |L1.160| "
- - "L1.161[160,161] |L1.161| "
- - "L1.162[161,162] |L1.162| "
- - "L1.163[162,163] |L1.163| "
- - "L1.164[163,164] |L1.164| "
- - "L1.165[164,165] |L1.165| "
- - "L1.166[165,166] |L1.166| "
- - "L1.167[166,167] |L1.167| "
- - "L1.168[167,168] |L1.168| "
- - "L1.169[168,169] |L1.169| "
- - "L1.170[169,170] |L1.170| "
- - "L1.171[170,171] |L1.171| "
- - "L1.172[171,172] |L1.172| "
- - "L1.173[172,173] |L1.173| "
- - "L1.174[173,174] |L1.174| "
- - "L1.175[174,175] |L1.175| "
- - "L1.176[175,176] |L1.176| "
- - "L1.177[176,177] |L1.177| "
- - "L1.178[177,178] |L1.178| "
- - "L1.179[178,179] |L1.179| "
- - "L1.180[179,180] |L1.180| "
- - "L1.181[180,181] |L1.181| "
- - "L1.182[181,182] |L1.182| "
- - "L1.183[182,183] |L1.183| "
- - "L1.184[183,184] |L1.184| "
- - "L1.185[184,185] |L1.185| "
- - "L1.186[185,186] |L1.186| "
- - "L1.187[186,187] |L1.187| "
- - "L1.188[187,188] |L1.188| "
- - "L1.189[188,189] |L1.189| "
- - "L1.190[189,190] |L1.190| "
- - "L1.191[190,191] |L1.191| "
- - "L1.192[191,192] |L1.192| "
- - "L1.193[192,193] |L1.193| "
- - "L1.194[193,194] |L1.194| "
- - "L1.195[194,195] |L1.195| "
- - "L1.196[195,196] |L1.196| "
- - "L1.197[196,197] |L1.197| "
- - "L1.198[197,198] |L1.198| "
- - "L1.199[198,199] |L1.199| "
- - "L1.200[199,200] |L1.200| "
- - "L1.201[200,201] |L1.201| "
- - "L1.202[201,202] |L1.202| "
- - "L1.203[202,203] |L1.203| "
- - "L1.204[203,204] |L1.204| "
- - "L1.205[204,205] |L1.205| "
- - "L1.206[205,206] |L1.206| "
- - "L1.207[206,207] |L1.207| "
- - "L1.208[207,208] |L1.208| "
- - "L1.209[208,209] |L1.209| "
- - "L1.210[209,210] |L1.210| "
- - "L1.211[210,211] |L1.211| "
- - "L1.212[211,212] |L1.212| "
- - "L1.213[212,213] |L1.213| "
- - "L1.214[213,214] |L1.214| "
- - "L1.215[214,215] |L1.215| "
- - "L1.216[215,216] |L1.216| "
- - "L1.217[216,217] |L1.217| "
- - "L1.218[217,218] |L1.218| "
- - "L1.219[218,219] |L1.219| "
- - "L1.220[219,220] |L1.220| "
- - "L1.221[220,221] |L1.221| "
- - "L1.222[221,222] |L1.222| "
- - "L1.223[222,223] |L1.223| "
- - "L1.224[223,224] |L1.224| "
- - "L1.225[224,225] |L1.225| "
- - "L1.226[225,226] |L1.226| "
- - "L1.227[226,227] |L1.227| "
- - "L1.228[227,228] |L1.228| "
- - "L1.229[228,229] |L1.229| "
- - "L1.230[229,230] |L1.230| "
- - "L1.231[230,231] |L1.231| "
- - "L1.232[231,232] |L1.232| "
- - "L1.233[232,233] |L1.233| "
- - "L1.234[233,234] |L1.234| "
- - "L1.235[234,235] |L1.235| "
- - "L1.236[235,236] |L1.236| "
- - "L1.237[236,237] |L1.237| "
- - "L1.238[237,238] |L1.238| "
- - "L1.239[238,239] |L1.239| "
- - "L1.240[239,240] |L1.240| "
- - "L1.241[240,241] |L1.241| "
- - "L1.242[241,242] |L1.242| "
- - "L1.243[242,243] |L1.243| "
- - "L1.244[243,244] |L1.244| "
- - "L1.245[244,245] |L1.245| "
- - "L1.246[245,246] |L1.246| "
- - "L1.247[246,247] |L1.247| "
- - "L1.248[247,248] |L1.248| "
- - "L1.249[248,249] |L1.249| "
- - "L1.250[249,250] |L1.250| "
- - "L1.251[250,251] |L1.251| "
- - "L1.252[251,252] |L1.252| "
- - "L1.253[252,253] |L1.253| "
- - "L1.254[253,254] |L1.254| "
- - "L1.255[254,255] |L1.255| "
- - "L1.256[255,256] |L1.256| "
- - "L1.257[256,257] |L1.257| "
- - "L1.258[257,258] |L1.258| "
- - "L1.259[258,259] |L1.259| "
- - "L1.260[259,260] |L1.260| "
- - "L1.261[260,261] |L1.261|"
- - "L1.262[261,262] |L1.262|"
- - "L1.263[262,263] |L1.263|"
- - "L1.264[263,264] |L1.264|"
- - "L1.265[264,265] |L1.265|"
- - "L1.266[265,266] |L1.266|"
- - "L1.267[266,267] |L1.267|"
- - "L1.268[267,268] |L1.268|"
- - "L1.269[268,269] |L1.269|"
- - "L1.270[269,270] |L1.270|"
- - "L1.271[270,271] |L1.271|"
- - "L1.272[271,272] |L1.272|"
- - "L1.273[272,273] |L1.273|"
- - "L1.274[273,274] |L1.274|"
- - "L1.275[274,275] |L1.275|"
- - "L1.276[275,276] |L1.276|"
- - "L1.277[276,277] |L1.277|"
- - "L1.278[277,278] |L1.278|"
- - "L1.279[278,279] |L1.279|"
- - "L1.280[279,280] |L1.280|"
- - "L1.281[280,281] |L1.281|"
- - "L1.282[281,282] |L1.282|"
- - "L1.283[282,283] |L1.283|"
- - "L1.284[283,284] |L1.284|"
- - "L1.285[284,285] |L1.285|"
- - "L1.286[285,286] |L1.286|"
- - "L1.287[286,287] |L1.287|"
- - "L1.288[287,288] |L1.288|"
+ - "L1.2[2,3] |L1.2| "
+ - "L1.3[4,5] |L1.3| "
+ - "L1.4[6,7] |L1.4| "
+ - "L1.5[8,9] |L1.5| "
+ - "L1.6[10,11] |L1.6| "
+ - "L1.7[12,13] |L1.7| "
+ - "L1.8[14,15] |L1.8| "
+ - "L1.9[16,17] |L1.9| "
+ - "L1.10[18,19] |L1.10| "
+ - "L1.11[20,21] |L1.11| "
+ - "L1.12[22,23] |L1.12| "
+ - "L1.13[24,25] |L1.13| "
+ - "L1.14[26,27] |L1.14| "
+ - "L1.15[28,29] |L1.15| "
+ - "L1.16[30,31] |L1.16| "
+ - "L1.17[32,33] |L1.17| "
+ - "L1.18[34,35] |L1.18| "
+ - "L1.19[36,37] |L1.19| "
+ - "L1.20[38,39] |L1.20| "
+ - "L1.21[40,41] |L1.21| "
+ - "L1.22[42,43] |L1.22| "
+ - "L1.23[44,45] |L1.23| "
+ - "L1.24[46,47] |L1.24| "
+ - "L1.25[48,49] |L1.25| "
+ - "L1.26[50,51] |L1.26| "
+ - "L1.27[52,53] |L1.27| "
+ - "L1.28[54,55] |L1.28| "
+ - "L1.29[56,57] |L1.29| "
+ - "L1.30[58,59] |L1.30| "
+ - "L1.31[60,61] |L1.31| "
+ - "L1.32[62,63] |L1.32| "
+ - "L1.33[64,65] |L1.33| "
+ - "L1.34[66,67] |L1.34| "
+ - "L1.35[68,69] |L1.35| "
+ - "L1.36[70,71] |L1.36| "
+ - "L1.37[72,73] |L1.37| "
+ - "L1.38[74,75] |L1.38| "
+ - "L1.39[76,77] |L1.39| "
+ - "L1.40[78,79] |L1.40| "
+ - "L1.41[80,81] |L1.41| "
+ - "L1.42[82,83] |L1.42| "
+ - "L1.43[84,85] |L1.43| "
+ - "L1.44[86,87] |L1.44| "
+ - "L1.45[88,89] |L1.45| "
+ - "L1.46[90,91] |L1.46| "
+ - "L1.47[92,93] |L1.47| "
+ - "L1.48[94,95] |L1.48| "
+ - "L1.49[96,97] |L1.49| "
+ - "L1.50[98,99] |L1.50| "
+ - "L1.51[100,101] |L1.51| "
+ - "L1.52[102,103] |L1.52| "
+ - "L1.53[104,105] |L1.53| "
+ - "L1.54[106,107] |L1.54| "
+ - "L1.55[108,109] |L1.55| "
+ - "L1.56[110,111] |L1.56| "
+ - "L1.57[112,113] |L1.57| "
+ - "L1.58[114,115] |L1.58| "
+ - "L1.59[116,117] |L1.59| "
+ - "L1.60[118,119] |L1.60| "
+ - "L1.61[120,121] |L1.61| "
+ - "L1.62[122,123] |L1.62| "
+ - "L1.63[124,125] |L1.63| "
+ - "L1.64[126,127] |L1.64| "
+ - "L1.65[128,129] |L1.65| "
+ - "L1.66[130,131] |L1.66| "
+ - "L1.67[132,133] |L1.67| "
+ - "L1.68[134,135] |L1.68| "
+ - "L1.69[136,137] |L1.69| "
+ - "L1.70[138,139] |L1.70| "
+ - "L1.71[140,141] |L1.71| "
+ - "L1.72[142,143] |L1.72| "
+ - "L1.73[144,145] |L1.73| "
+ - "L1.74[146,147] |L1.74| "
+ - "L1.75[148,149] |L1.75| "
+ - "L1.76[150,151] |L1.76| "
+ - "L1.77[152,153] |L1.77| "
+ - "L1.78[154,155] |L1.78| "
+ - "L1.79[156,157] |L1.79| "
+ - "L1.80[158,159] |L1.80| "
+ - "L1.81[160,161] |L1.81| "
+ - "L1.82[162,163] |L1.82| "
+ - "L1.83[164,165] |L1.83| "
+ - "L1.84[166,167] |L1.84| "
+ - "L1.85[168,169] |L1.85| "
+ - "L1.86[170,171] |L1.86| "
+ - "L1.87[172,173] |L1.87| "
+ - "L1.88[174,175] |L1.88| "
+ - "L1.89[176,177] |L1.89| "
+ - "L1.90[178,179] |L1.90| "
+ - "L1.91[180,181] |L1.91| "
+ - "L1.92[182,183] |L1.92| "
+ - "L1.93[184,185] |L1.93| "
+ - "L1.94[186,187] |L1.94| "
+ - "L1.95[188,189] |L1.95| "
+ - "L1.96[190,191] |L1.96| "
+ - "L1.97[192,193] |L1.97| "
+ - "L1.98[194,195] |L1.98| "
+ - "L1.99[196,197] |L1.99| "
+ - "L1.100[198,199] |L1.100| "
+ - "L1.101[200,201] |L1.101| "
+ - "L1.102[202,203] |L1.102| "
+ - "L1.103[204,205] |L1.103| "
+ - "L1.104[206,207] |L1.104| "
+ - "L1.105[208,209] |L1.105| "
+ - "L1.106[210,211] |L1.106| "
+ - "L1.107[212,213] |L1.107| "
+ - "L1.108[214,215] |L1.108| "
+ - "L1.109[216,217] |L1.109| "
+ - "L1.110[218,219] |L1.110| "
+ - "L1.111[220,221] |L1.111| "
+ - "L1.112[222,223] |L1.112| "
+ - "L1.113[224,225] |L1.113| "
+ - "L1.114[226,227] |L1.114| "
+ - "L1.115[228,229] |L1.115| "
+ - "L1.116[230,231] |L1.116| "
+ - "L1.117[232,233] |L1.117| "
+ - "L1.118[234,235] |L1.118| "
+ - "L1.119[236,237] |L1.119| "
+ - "L1.120[238,239] |L1.120| "
+ - "L1.121[240,241] |L1.121| "
+ - "L1.122[242,243] |L1.122| "
+ - "L1.123[244,245] |L1.123| "
+ - "L1.124[246,247] |L1.124| "
+ - "L1.125[248,249] |L1.125| "
+ - "L1.126[250,251] |L1.126| "
+ - "L1.127[252,253] |L1.127| "
+ - "L1.128[254,255] |L1.128| "
+ - "L1.129[256,257] |L1.129| "
+ - "L1.130[258,259] |L1.130| "
+ - "L1.131[260,261] |L1.131| "
+ - "L1.132[262,263] |L1.132| "
+ - "L1.133[264,265] |L1.133| "
+ - "L1.134[266,267] |L1.134| "
+ - "L1.135[268,269] |L1.135| "
+ - "L1.136[270,271] |L1.136| "
+ - "L1.137[272,273] |L1.137| "
+ - "L1.138[274,275] |L1.138| "
+ - "L1.139[276,277] |L1.139| "
+ - "L1.140[278,279] |L1.140| "
+ - "L1.141[280,281] |L1.141| "
+ - "L1.142[282,283] |L1.142| "
+ - "L1.143[284,285] |L1.143| "
+ - "L1.144[286,287] |L1.144| "
+ - "L1.145[288,289] |L1.145| "
+ - "L1.146[290,291] |L1.146| "
+ - "L1.147[292,293] |L1.147| "
+ - "L1.148[294,295] |L1.148| "
+ - "L1.149[296,297] |L1.149| "
+ - "L1.150[298,299] |L1.150| "
+ - "L1.151[300,301] |L1.151| "
+ - "L1.152[302,303] |L1.152| "
+ - "L1.153[304,305] |L1.153| "
+ - "L1.154[306,307] |L1.154| "
+ - "L1.155[308,309] |L1.155| "
+ - "L1.156[310,311] |L1.156| "
+ - "L1.157[312,313] |L1.157| "
+ - "L1.158[314,315] |L1.158| "
+ - "L1.159[316,317] |L1.159| "
+ - "L1.160[318,319] |L1.160| "
+ - "L1.161[320,321] |L1.161| "
+ - "L1.162[322,323] |L1.162| "
+ - "L1.163[324,325] |L1.163| "
+ - "L1.164[326,327] |L1.164| "
+ - "L1.165[328,329] |L1.165| "
+ - "L1.166[330,331] |L1.166| "
+ - "L1.167[332,333] |L1.167| "
+ - "L1.168[334,335] |L1.168| "
+ - "L1.169[336,337] |L1.169| "
+ - "L1.170[338,339] |L1.170| "
+ - "L1.171[340,341] |L1.171| "
+ - "L1.172[342,343] |L1.172| "
+ - "L1.173[344,345] |L1.173| "
+ - "L1.174[346,347] |L1.174| "
+ - "L1.175[348,349] |L1.175| "
+ - "L1.176[350,351] |L1.176| "
+ - "L1.177[352,353] |L1.177| "
+ - "L1.178[354,355] |L1.178| "
+ - "L1.179[356,357] |L1.179| "
+ - "L1.180[358,359] |L1.180| "
+ - "L1.181[360,361] |L1.181| "
+ - "L1.182[362,363] |L1.182| "
+ - "L1.183[364,365] |L1.183| "
+ - "L1.184[366,367] |L1.184| "
+ - "L1.185[368,369] |L1.185| "
+ - "L1.186[370,371] |L1.186| "
+ - "L1.187[372,373] |L1.187| "
+ - "L1.188[374,375] |L1.188| "
+ - "L1.189[376,377] |L1.189| "
+ - "L1.190[378,379] |L1.190| "
+ - "L1.191[380,381] |L1.191| "
+ - "L1.192[382,383] |L1.192| "
+ - "L1.193[384,385] |L1.193| "
+ - "L1.194[386,387] |L1.194| "
+ - "L1.195[388,389] |L1.195| "
+ - "L1.196[390,391] |L1.196| "
+ - "L1.197[392,393] |L1.197| "
+ - "L1.198[394,395] |L1.198| "
+ - "L1.199[396,397] |L1.199| "
+ - "L1.200[398,399] |L1.200| "
+ - "L1.201[400,401] |L1.201| "
+ - "L1.202[402,403] |L1.202| "
+ - "L1.203[404,405] |L1.203| "
+ - "L1.204[406,407] |L1.204| "
+ - "L1.205[408,409] |L1.205| "
+ - "L1.206[410,411] |L1.206| "
+ - "L1.207[412,413] |L1.207| "
+ - "L1.208[414,415] |L1.208| "
+ - "L1.209[416,417] |L1.209| "
+ - "L1.210[418,419] |L1.210| "
+ - "L1.211[420,421] |L1.211| "
+ - "L1.212[422,423] |L1.212| "
+ - "L1.213[424,425] |L1.213| "
+ - "L1.214[426,427] |L1.214| "
+ - "L1.215[428,429] |L1.215| "
+ - "L1.216[430,431] |L1.216| "
+ - "L1.217[432,433] |L1.217| "
+ - "L1.218[434,435] |L1.218| "
+ - "L1.219[436,437] |L1.219| "
+ - "L1.220[438,439] |L1.220| "
+ - "L1.221[440,441] |L1.221| "
+ - "L1.222[442,443] |L1.222| "
+ - "L1.223[444,445] |L1.223| "
+ - "L1.224[446,447] |L1.224| "
+ - "L1.225[448,449] |L1.225| "
+ - "L1.226[450,451] |L1.226| "
+ - "L1.227[452,453] |L1.227| "
+ - "L1.228[454,455] |L1.228| "
+ - "L1.229[456,457] |L1.229| "
+ - "L1.230[458,459] |L1.230| "
+ - "L1.231[460,461] |L1.231| "
+ - "L1.232[462,463] |L1.232| "
+ - "L1.233[464,465] |L1.233| "
+ - "L1.234[466,467] |L1.234| "
+ - "L1.235[468,469] |L1.235| "
+ - "L1.236[470,471] |L1.236| "
+ - "L1.237[472,473] |L1.237| "
+ - "L1.238[474,475] |L1.238| "
+ - "L1.239[476,477] |L1.239| "
+ - "L1.240[478,479] |L1.240| "
+ - "L1.241[480,481] |L1.241| "
+ - "L1.242[482,483] |L1.242| "
+ - "L1.243[484,485] |L1.243| "
+ - "L1.244[486,487] |L1.244| "
+ - "L1.245[488,489] |L1.245| "
+ - "L1.246[490,491] |L1.246| "
+ - "L1.247[492,493] |L1.247| "
+ - "L1.248[494,495] |L1.248| "
+ - "L1.249[496,497] |L1.249| "
+ - "L1.250[498,499] |L1.250| "
+ - "L1.251[500,501] |L1.251| "
+ - "L1.252[502,503] |L1.252| "
+ - "L1.253[504,505] |L1.253| "
+ - "L1.254[506,507] |L1.254| "
+ - "L1.255[508,509] |L1.255| "
+ - "L1.256[510,511] |L1.256| "
+ - "L1.257[512,513] |L1.257| "
+ - "L1.258[514,515] |L1.258| "
+ - "L1.259[516,517] |L1.259| "
+ - "L1.260[518,519] |L1.260|"
+ - "L1.261[520,521] |L1.261|"
+ - "L1.262[522,523] |L1.262|"
+ - "L1.263[524,525] |L1.263|"
+ - "L1.264[526,527] |L1.264|"
+ - "L1.265[528,529] |L1.265|"
+ - "L1.266[530,531] |L1.266|"
+ - "L1.267[532,533] |L1.267|"
+ - "L1.268[534,535] |L1.268|"
+ - "L1.269[536,537] |L1.269|"
+ - "L1.270[538,539] |L1.270|"
+ - "L1.271[540,541] |L1.271|"
+ - "L1.272[542,543] |L1.272|"
+ - "L1.273[544,545] |L1.273|"
+ - "L1.274[546,547] |L1.274|"
+ - "L1.275[548,549] |L1.275|"
+ - "L1.276[550,551] |L1.276|"
+ - "L1.277[552,553] |L1.277|"
+ - "L1.278[554,555] |L1.278|"
+ - "L1.279[556,557] |L1.279|"
+ - "L1.280[558,559] |L1.280|"
+ - "L1.281[560,561] |L1.281|"
+ - "L1.282[562,563] |L1.282|"
+ - "L1.283[564,565] |L1.283|"
+ - "L1.284[566,567] |L1.284|"
+ - "L1.285[568,569] |L1.285|"
+ - "L1.286[570,571] |L1.286|"
+ - "L1.287[572,573] |L1.287|"
+ - "L1.288[574,575] |L1.288|"
- "**** Simulation run 0, type=compact. 200 Input Files, 1.37mb total:"
- "L1, all files 7kb "
- "L1.1[0,1] |L1.1| "
- - "L1.2[1,2] |L1.2| "
- - "L1.3[2,3] |L1.3| "
- - "L1.4[3,4] |L1.4| "
- - "L1.5[4,5] |L1.5| "
- - "L1.6[5,6] |L1.6| "
- - "L1.7[6,7] |L1.7| "
- - "L1.8[7,8] |L1.8| "
- - "L1.9[8,9] |L1.9| "
- - "L1.10[9,10] |L1.10| "
- - "L1.11[10,11] |L1.11| "
- - "L1.12[11,12] |L1.12| "
- - "L1.13[12,13] |L1.13| "
- - "L1.14[13,14] |L1.14| "
- - "L1.15[14,15] |L1.15| "
- - "L1.16[15,16] |L1.16| "
- - "L1.17[16,17] |L1.17| "
- - "L1.18[17,18] |L1.18| "
- - "L1.19[18,19] |L1.19| "
- - "L1.20[19,20] |L1.20| "
- - "L1.21[20,21] |L1.21| "
- - "L1.22[21,22] |L1.22| "
- - "L1.23[22,23] |L1.23| "
- - "L1.24[23,24] |L1.24| "
- - "L1.25[24,25] |L1.25| "
- - "L1.26[25,26] |L1.26| "
- - "L1.27[26,27] |L1.27| "
- - "L1.28[27,28] |L1.28| "
- - "L1.29[28,29] |L1.29| "
- - "L1.30[29,30] |L1.30| "
- - "L1.31[30,31] |L1.31| "
- - "L1.32[31,32] |L1.32| "
- - "L1.33[32,33] |L1.33| "
- - "L1.34[33,34] |L1.34| "
- - "L1.35[34,35] |L1.35| "
- - "L1.36[35,36] |L1.36| "
- - "L1.37[36,37] |L1.37| "
- - "L1.38[37,38] |L1.38| "
- - "L1.39[38,39] |L1.39| "
- - "L1.40[39,40] |L1.40| "
- - "L1.41[40,41] |L1.41| "
- - "L1.42[41,42] |L1.42| "
- - "L1.43[42,43] |L1.43| "
- - "L1.44[43,44] |L1.44| "
- - "L1.45[44,45] |L1.45| "
- - "L1.46[45,46] |L1.46| "
- - "L1.47[46,47] |L1.47| "
- - "L1.48[47,48] |L1.48| "
- - "L1.49[48,49] |L1.49| "
- - "L1.50[49,50] |L1.50| "
- - "L1.51[50,51] |L1.51| "
- - "L1.52[51,52] |L1.52| "
- - "L1.53[52,53] |L1.53| "
- - "L1.54[53,54] |L1.54| "
- - "L1.55[54,55] |L1.55| "
- - "L1.56[55,56] |L1.56| "
- - "L1.57[56,57] |L1.57| "
- - "L1.58[57,58] |L1.58| "
- - "L1.59[58,59] |L1.59| "
- - "L1.60[59,60] |L1.60| "
- - "L1.61[60,61] |L1.61| "
- - "L1.62[61,62] |L1.62| "
- - "L1.63[62,63] |L1.63| "
- - "L1.64[63,64] |L1.64| "
- - "L1.65[64,65] |L1.65| "
- - "L1.66[65,66] |L1.66| "
- - "L1.67[66,67] |L1.67| "
- - "L1.68[67,68] |L1.68| "
- - "L1.69[68,69] |L1.69| "
- - "L1.70[69,70] |L1.70| "
- - "L1.71[70,71] |L1.71| "
- - "L1.72[71,72] |L1.72| "
- - "L1.73[72,73] |L1.73| "
- - "L1.74[73,74] |L1.74| "
- - "L1.75[74,75] |L1.75| "
- - "L1.76[75,76] |L1.76| "
- - "L1.77[76,77] |L1.77| "
- - "L1.78[77,78] |L1.78| "
- - "L1.79[78,79] |L1.79| "
- - "L1.80[79,80] |L1.80| "
- - "L1.81[80,81] |L1.81| "
- - "L1.82[81,82] |L1.82| "
- - "L1.83[82,83] |L1.83| "
- - "L1.84[83,84] |L1.84| "
- - "L1.85[84,85] |L1.85| "
- - "L1.86[85,86] |L1.86| "
- - "L1.87[86,87] |L1.87| "
- - "L1.88[87,88] |L1.88| "
- - "L1.89[88,89] |L1.89| "
- - "L1.90[89,90] |L1.90| "
- - "L1.91[90,91] |L1.91| "
- - "L1.92[91,92] |L1.92| "
- - "L1.93[92,93] |L1.93| "
- - "L1.94[93,94] |L1.94| "
- - "L1.95[94,95] |L1.95| "
- - "L1.96[95,96] |L1.96| "
- - "L1.97[96,97] |L1.97| "
- - "L1.98[97,98] |L1.98| "
- - "L1.99[98,99] |L1.99| "
- - "L1.100[99,100] |L1.100| "
- - "L1.101[100,101] |L1.101| "
- - "L1.102[101,102] |L1.102| "
- - "L1.103[102,103] |L1.103| "
- - "L1.104[103,104] |L1.104| "
- - "L1.105[104,105] |L1.105| "
- - "L1.106[105,106] |L1.106| "
- - "L1.107[106,107] |L1.107| "
- - "L1.108[107,108] |L1.108| "
- - "L1.109[108,109] |L1.109| "
- - "L1.110[109,110] |L1.110| "
- - "L1.111[110,111] |L1.111| "
- - "L1.112[111,112] |L1.112| "
- - "L1.113[112,113] |L1.113| "
- - "L1.114[113,114] |L1.114| "
- - "L1.115[114,115] |L1.115| "
- - "L1.116[115,116] |L1.116| "
- - "L1.117[116,117] |L1.117| "
- - "L1.118[117,118] |L1.118| "
- - "L1.119[118,119] |L1.119| "
- - "L1.120[119,120] |L1.120| "
- - "L1.121[120,121] |L1.121| "
- - "L1.122[121,122] |L1.122| "
- - "L1.123[122,123] |L1.123| "
- - "L1.124[123,124] |L1.124| "
- - "L1.125[124,125] |L1.125| "
- - "L1.126[125,126] |L1.126| "
- - "L1.127[126,127] |L1.127| "
- - "L1.128[127,128] |L1.128| "
- - "L1.129[128,129] |L1.129| "
- - "L1.130[129,130] |L1.130| "
- - "L1.131[130,131] |L1.131| "
- - "L1.132[131,132] |L1.132| "
- - "L1.133[132,133] |L1.133| "
- - "L1.134[133,134] |L1.134| "
- - "L1.135[134,135] |L1.135| "
- - "L1.136[135,136] |L1.136| "
- - "L1.137[136,137] |L1.137| "
- - "L1.138[137,138] |L1.138| "
- - "L1.139[138,139] |L1.139| "
- - "L1.140[139,140] |L1.140| "
- - "L1.141[140,141] |L1.141| "
- - "L1.142[141,142] |L1.142| "
- - "L1.143[142,143] |L1.143| "
- - "L1.144[143,144] |L1.144| "
- - "L1.145[144,145] |L1.145| "
- - "L1.146[145,146] |L1.146| "
- - "L1.147[146,147] |L1.147| "
- - "L1.148[147,148] |L1.148| "
- - "L1.149[148,149] |L1.149| "
- - "L1.150[149,150] |L1.150| "
- - "L1.151[150,151] |L1.151| "
- - "L1.152[151,152] |L1.152| "
- - "L1.153[152,153] |L1.153| "
- - "L1.154[153,154] |L1.154| "
- - "L1.155[154,155] |L1.155| "
- - "L1.156[155,156] |L1.156| "
- - "L1.157[156,157] |L1.157| "
- - "L1.158[157,158] |L1.158| "
- - "L1.159[158,159] |L1.159| "
- - "L1.160[159,160] |L1.160| "
- - "L1.161[160,161] |L1.161| "
- - "L1.162[161,162] |L1.162| "
- - "L1.163[162,163] |L1.163| "
- - "L1.164[163,164] |L1.164| "
- - "L1.165[164,165] |L1.165| "
- - "L1.166[165,166] |L1.166| "
- - "L1.167[166,167] |L1.167| "
- - "L1.168[167,168] |L1.168| "
- - "L1.169[168,169] |L1.169| "
- - "L1.170[169,170] |L1.170| "
- - "L1.171[170,171] |L1.171| "
- - "L1.172[171,172] |L1.172| "
- - "L1.173[172,173] |L1.173| "
- - "L1.174[173,174] |L1.174| "
- - "L1.175[174,175] |L1.175| "
- - "L1.176[175,176] |L1.176| "
- - "L1.177[176,177] |L1.177| "
- - "L1.178[177,178] |L1.178| "
- - "L1.179[178,179] |L1.179| "
- - "L1.180[179,180] |L1.180| "
- - "L1.181[180,181] |L1.181|"
- - "L1.182[181,182] |L1.182|"
- - "L1.183[182,183] |L1.183|"
- - "L1.184[183,184] |L1.184|"
- - "L1.185[184,185] |L1.185|"
- - "L1.186[185,186] |L1.186|"
- - "L1.187[186,187] |L1.187|"
- - "L1.188[187,188] |L1.188|"
- - "L1.189[188,189] |L1.189|"
- - "L1.190[189,190] |L1.190|"
- - "L1.191[190,191] |L1.191|"
- - "L1.192[191,192] |L1.192|"
- - "L1.193[192,193] |L1.193|"
- - "L1.194[193,194] |L1.194|"
- - "L1.195[194,195] |L1.195|"
- - "L1.196[195,196] |L1.196|"
- - "L1.197[196,197] |L1.197|"
- - "L1.198[197,198] |L1.198|"
- - "L1.199[198,199] |L1.199|"
- - "L1.200[199,200] |L1.200|"
+ - "L1.2[2,3] |L1.2| "
+ - "L1.3[4,5] |L1.3| "
+ - "L1.4[6,7] |L1.4| "
+ - "L1.5[8,9] |L1.5| "
+ - "L1.6[10,11] |L1.6| "
+ - "L1.7[12,13] |L1.7| "
+ - "L1.8[14,15] |L1.8| "
+ - "L1.9[16,17] |L1.9| "
+ - "L1.10[18,19] |L1.10| "
+ - "L1.11[20,21] |L1.11| "
+ - "L1.12[22,23] |L1.12| "
+ - "L1.13[24,25] |L1.13| "
+ - "L1.14[26,27] |L1.14| "
+ - "L1.15[28,29] |L1.15| "
+ - "L1.16[30,31] |L1.16| "
+ - "L1.17[32,33] |L1.17| "
+ - "L1.18[34,35] |L1.18| "
+ - "L1.19[36,37] |L1.19| "
+ - "L1.20[38,39] |L1.20| "
+ - "L1.21[40,41] |L1.21| "
+ - "L1.22[42,43] |L1.22| "
+ - "L1.23[44,45] |L1.23| "
+ - "L1.24[46,47] |L1.24| "
+ - "L1.25[48,49] |L1.25| "
+ - "L1.26[50,51] |L1.26| "
+ - "L1.27[52,53] |L1.27| "
+ - "L1.28[54,55] |L1.28| "
+ - "L1.29[56,57] |L1.29| "
+ - "L1.30[58,59] |L1.30| "
+ - "L1.31[60,61] |L1.31| "
+ - "L1.32[62,63] |L1.32| "
+ - "L1.33[64,65] |L1.33| "
+ - "L1.34[66,67] |L1.34| "
+ - "L1.35[68,69] |L1.35| "
+ - "L1.36[70,71] |L1.36| "
+ - "L1.37[72,73] |L1.37| "
+ - "L1.38[74,75] |L1.38| "
+ - "L1.39[76,77] |L1.39| "
+ - "L1.40[78,79] |L1.40| "
+ - "L1.41[80,81] |L1.41| "
+ - "L1.42[82,83] |L1.42| "
+ - "L1.43[84,85] |L1.43| "
+ - "L1.44[86,87] |L1.44| "
+ - "L1.45[88,89] |L1.45| "
+ - "L1.46[90,91] |L1.46| "
+ - "L1.47[92,93] |L1.47| "
+ - "L1.48[94,95] |L1.48| "
+ - "L1.49[96,97] |L1.49| "
+ - "L1.50[98,99] |L1.50| "
+ - "L1.51[100,101] |L1.51| "
+ - "L1.52[102,103] |L1.52| "
+ - "L1.53[104,105] |L1.53| "
+ - "L1.54[106,107] |L1.54| "
+ - "L1.55[108,109] |L1.55| "
+ - "L1.56[110,111] |L1.56| "
+ - "L1.57[112,113] |L1.57| "
+ - "L1.58[114,115] |L1.58| "
+ - "L1.59[116,117] |L1.59| "
+ - "L1.60[118,119] |L1.60| "
+ - "L1.61[120,121] |L1.61| "
+ - "L1.62[122,123] |L1.62| "
+ - "L1.63[124,125] |L1.63| "
+ - "L1.64[126,127] |L1.64| "
+ - "L1.65[128,129] |L1.65| "
+ - "L1.66[130,131] |L1.66| "
+ - "L1.67[132,133] |L1.67| "
+ - "L1.68[134,135] |L1.68| "
+ - "L1.69[136,137] |L1.69| "
+ - "L1.70[138,139] |L1.70| "
+ - "L1.71[140,141] |L1.71| "
+ - "L1.72[142,143] |L1.72| "
+ - "L1.73[144,145] |L1.73| "
+ - "L1.74[146,147] |L1.74| "
+ - "L1.75[148,149] |L1.75| "
+ - "L1.76[150,151] |L1.76| "
+ - "L1.77[152,153] |L1.77| "
+ - "L1.78[154,155] |L1.78| "
+ - "L1.79[156,157] |L1.79| "
+ - "L1.80[158,159] |L1.80| "
+ - "L1.81[160,161] |L1.81| "
+ - "L1.82[162,163] |L1.82| "
+ - "L1.83[164,165] |L1.83| "
+ - "L1.84[166,167] |L1.84| "
+ - "L1.85[168,169] |L1.85| "
+ - "L1.86[170,171] |L1.86| "
+ - "L1.87[172,173] |L1.87| "
+ - "L1.88[174,175] |L1.88| "
+ - "L1.89[176,177] |L1.89| "
+ - "L1.90[178,179] |L1.90| "
+ - "L1.91[180,181] |L1.91| "
+ - "L1.92[182,183] |L1.92| "
+ - "L1.93[184,185] |L1.93| "
+ - "L1.94[186,187] |L1.94| "
+ - "L1.95[188,189] |L1.95| "
+ - "L1.96[190,191] |L1.96| "
+ - "L1.97[192,193] |L1.97| "
+ - "L1.98[194,195] |L1.98| "
+ - "L1.99[196,197] |L1.99| "
+ - "L1.100[198,199] |L1.100| "
+ - "L1.101[200,201] |L1.101| "
+ - "L1.102[202,203] |L1.102| "
+ - "L1.103[204,205] |L1.103| "
+ - "L1.104[206,207] |L1.104| "
+ - "L1.105[208,209] |L1.105| "
+ - "L1.106[210,211] |L1.106| "
+ - "L1.107[212,213] |L1.107| "
+ - "L1.108[214,215] |L1.108| "
+ - "L1.109[216,217] |L1.109| "
+ - "L1.110[218,219] |L1.110| "
+ - "L1.111[220,221] |L1.111| "
+ - "L1.112[222,223] |L1.112| "
+ - "L1.113[224,225] |L1.113| "
+ - "L1.114[226,227] |L1.114| "
+ - "L1.115[228,229] |L1.115| "
+ - "L1.116[230,231] |L1.116| "
+ - "L1.117[232,233] |L1.117| "
+ - "L1.118[234,235] |L1.118| "
+ - "L1.119[236,237] |L1.119| "
+ - "L1.120[238,239] |L1.120| "
+ - "L1.121[240,241] |L1.121| "
+ - "L1.122[242,243] |L1.122| "
+ - "L1.123[244,245] |L1.123| "
+ - "L1.124[246,247] |L1.124| "
+ - "L1.125[248,249] |L1.125| "
+ - "L1.126[250,251] |L1.126| "
+ - "L1.127[252,253] |L1.127| "
+ - "L1.128[254,255] |L1.128| "
+ - "L1.129[256,257] |L1.129| "
+ - "L1.130[258,259] |L1.130| "
+ - "L1.131[260,261] |L1.131| "
+ - "L1.132[262,263] |L1.132| "
+ - "L1.133[264,265] |L1.133| "
+ - "L1.134[266,267] |L1.134| "
+ - "L1.135[268,269] |L1.135| "
+ - "L1.136[270,271] |L1.136| "
+ - "L1.137[272,273] |L1.137| "
+ - "L1.138[274,275] |L1.138| "
+ - "L1.139[276,277] |L1.139| "
+ - "L1.140[278,279] |L1.140| "
+ - "L1.141[280,281] |L1.141| "
+ - "L1.142[282,283] |L1.142| "
+ - "L1.143[284,285] |L1.143| "
+ - "L1.144[286,287] |L1.144| "
+ - "L1.145[288,289] |L1.145| "
+ - "L1.146[290,291] |L1.146| "
+ - "L1.147[292,293] |L1.147| "
+ - "L1.148[294,295] |L1.148| "
+ - "L1.149[296,297] |L1.149| "
+ - "L1.150[298,299] |L1.150| "
+ - "L1.151[300,301] |L1.151| "
+ - "L1.152[302,303] |L1.152| "
+ - "L1.153[304,305] |L1.153| "
+ - "L1.154[306,307] |L1.154| "
+ - "L1.155[308,309] |L1.155| "
+ - "L1.156[310,311] |L1.156| "
+ - "L1.157[312,313] |L1.157| "
+ - "L1.158[314,315] |L1.158| "
+ - "L1.159[316,317] |L1.159| "
+ - "L1.160[318,319] |L1.160| "
+ - "L1.161[320,321] |L1.161| "
+ - "L1.162[322,323] |L1.162| "
+ - "L1.163[324,325] |L1.163| "
+ - "L1.164[326,327] |L1.164| "
+ - "L1.165[328,329] |L1.165| "
+ - "L1.166[330,331] |L1.166| "
+ - "L1.167[332,333] |L1.167| "
+ - "L1.168[334,335] |L1.168| "
+ - "L1.169[336,337] |L1.169| "
+ - "L1.170[338,339] |L1.170| "
+ - "L1.171[340,341] |L1.171| "
+ - "L1.172[342,343] |L1.172| "
+ - "L1.173[344,345] |L1.173| "
+ - "L1.174[346,347] |L1.174| "
+ - "L1.175[348,349] |L1.175| "
+ - "L1.176[350,351] |L1.176| "
+ - "L1.177[352,353] |L1.177| "
+ - "L1.178[354,355] |L1.178| "
+ - "L1.179[356,357] |L1.179| "
+ - "L1.180[358,359] |L1.180| "
+ - "L1.181[360,361] |L1.181|"
+ - "L1.182[362,363] |L1.182|"
+ - "L1.183[364,365] |L1.183|"
+ - "L1.184[366,367] |L1.184|"
+ - "L1.185[368,369] |L1.185|"
+ - "L1.186[370,371] |L1.186|"
+ - "L1.187[372,373] |L1.187|"
+ - "L1.188[374,375] |L1.188|"
+ - "L1.189[376,377] |L1.189|"
+ - "L1.190[378,379] |L1.190|"
+ - "L1.191[380,381] |L1.191|"
+ - "L1.192[382,383] |L1.192|"
+ - "L1.193[384,385] |L1.193|"
+ - "L1.194[386,387] |L1.194|"
+ - "L1.195[388,389] |L1.195|"
+ - "L1.196[390,391] |L1.196|"
+ - "L1.197[392,393] |L1.197|"
+ - "L1.198[394,395] |L1.198|"
+ - "L1.199[396,397] |L1.199|"
+ - "L1.200[398,399] |L1.200|"
- "**** 1 Output Files (parquet_file_id not yet assigned), 1.37mb total:"
- "L1, all files 1.37mb "
- - "L1.?[0,200] |-------------------------------------L1.?-------------------------------------|"
+ - "L1.?[0,399] |-------------------------------------L1.?-------------------------------------|"
- "Committing partition 1:"
- " Soft Deleting 200 files: L1.1, L1.2, L1.3, L1.4, L1.5, L1.6, L1.7, L1.8, L1.9, L1.10, L1.11, L1.12, L1.13, L1.14, L1.15, L1.16, L1.17, L1.18, L1.19, L1.20, L1.21, L1.22, L1.23, L1.24, L1.25, L1.26, L1.27, L1.28, L1.29, L1.30, L1.31, L1.32, L1.33, L1.34, L1.35, L1.36, L1.37, L1.38, L1.39, L1.40, L1.41, L1.42, L1.43, L1.44, L1.45, L1.46, L1.47, L1.48, L1.49, L1.50, L1.51, L1.52, L1.53, L1.54, L1.55, L1.56, L1.57, L1.58, L1.59, L1.60, L1.61, L1.62, L1.63, L1.64, L1.65, L1.66, L1.67, L1.68, L1.69, L1.70, L1.71, L1.72, L1.73, L1.74, L1.75, L1.76, L1.77, L1.78, L1.79, L1.80, L1.81, L1.82, L1.83, L1.84, L1.85, L1.86, L1.87, L1.88, L1.89, L1.90, L1.91, L1.92, L1.93, L1.94, L1.95, L1.96, L1.97, L1.98, L1.99, L1.100, L1.101, L1.102, L1.103, L1.104, L1.105, L1.106, L1.107, L1.108, L1.109, L1.110, L1.111, L1.112, L1.113, L1.114, L1.115, L1.116, L1.117, L1.118, L1.119, L1.120, L1.121, L1.122, L1.123, L1.124, L1.125, L1.126, L1.127, L1.128, L1.129, L1.130, L1.131, L1.132, L1.133, L1.134, L1.135, L1.136, L1.137, L1.138, L1.139, L1.140, L1.141, L1.142, L1.143, L1.144, L1.145, L1.146, L1.147, L1.148, L1.149, L1.150, L1.151, L1.152, L1.153, L1.154, L1.155, L1.156, L1.157, L1.158, L1.159, L1.160, L1.161, L1.162, L1.163, L1.164, L1.165, L1.166, L1.167, L1.168, L1.169, L1.170, L1.171, L1.172, L1.173, L1.174, L1.175, L1.176, L1.177, L1.178, L1.179, L1.180, L1.181, L1.182, L1.183, L1.184, L1.185, L1.186, L1.187, L1.188, L1.189, L1.190, L1.191, L1.192, L1.193, L1.194, L1.195, L1.196, L1.197, L1.198, L1.199, L1.200"
- " Creating 1 files at level CompactionLevel::L1"
- "**** Simulation run 1, type=compact. 88 Input Files, 616kb total:"
- "L1, all files 7kb "
- - "L1.201[200,201] |L1.201| "
- - "L1.202[201,202] |L1.202| "
- - "L1.203[202,203] |L1.203| "
- - "L1.204[203,204] |L1.204| "
- - "L1.205[204,205] |L1.205| "
- - "L1.206[205,206] |L1.206| "
- - "L1.207[206,207] |L1.207| "
- - "L1.208[207,208] |L1.208| "
- - "L1.209[208,209] |L1.209| "
- - "L1.210[209,210] |L1.210| "
- - "L1.211[210,211] |L1.211| "
- - "L1.212[211,212] |L1.212| "
- - "L1.213[212,213] |L1.213| "
- - "L1.214[213,214] |L1.214| "
- - "L1.215[214,215] |L1.215| "
- - "L1.216[215,216] |L1.216| "
- - "L1.217[216,217] |L1.217| "
- - "L1.218[217,218] |L1.218| "
- - "L1.219[218,219] |L1.219| "
- - "L1.220[219,220] |L1.220| "
- - "L1.221[220,221] |L1.221| "
- - "L1.222[221,222] |L1.222| "
- - "L1.223[222,223] |L1.223| "
- - "L1.224[223,224] |L1.224| "
- - "L1.225[224,225] |L1.225| "
- - "L1.226[225,226] |L1.226| "
- - "L1.227[226,227] |L1.227| "
- - "L1.228[227,228] |L1.228| "
- - "L1.229[228,229] |L1.229| "
- - "L1.230[229,230] |L1.230| "
- - "L1.231[230,231] |L1.231| "
- - "L1.232[231,232] |L1.232| "
- - "L1.233[232,233] |L1.233| "
- - "L1.234[233,234] |L1.234| "
- - "L1.235[234,235] |L1.235| "
- - "L1.236[235,236] |L1.236| "
- - "L1.237[236,237] |L1.237| "
- - "L1.238[237,238] |L1.238| "
- - "L1.239[238,239] |L1.239| "
- - "L1.240[239,240] |L1.240| "
- - "L1.241[240,241] |L1.241| "
- - "L1.242[241,242] |L1.242| "
- - "L1.243[242,243] |L1.243| "
- - "L1.244[243,244] |L1.244| "
- - "L1.245[244,245] |L1.245| "
- - "L1.246[245,246] |L1.246| "
- - "L1.247[246,247] |L1.247| "
- - "L1.248[247,248] |L1.248| "
- - "L1.249[248,249] |L1.249| "
- - "L1.250[249,250] |L1.250| "
- - "L1.251[250,251] |L1.251| "
- - "L1.252[251,252] |L1.252| "
- - "L1.253[252,253] |L1.253| "
- - "L1.254[253,254] |L1.254| "
- - "L1.255[254,255] |L1.255| "
- - "L1.256[255,256] |L1.256| "
- - "L1.257[256,257] |L1.257| "
- - "L1.258[257,258] |L1.258| "
- - "L1.259[258,259] |L1.259| "
- - "L1.260[259,260] |L1.260| "
- - "L1.261[260,261] |L1.261| "
- - "L1.262[261,262] |L1.262| "
- - "L1.263[262,263] |L1.263| "
- - "L1.264[263,264] |L1.264| "
- - "L1.265[264,265] |L1.265| "
- - "L1.266[265,266] |L1.266| "
- - "L1.267[266,267] |L1.267| "
- - "L1.268[267,268] |L1.268| "
- - "L1.269[268,269] |L1.269| "
- - "L1.270[269,270] |L1.270| "
- - "L1.271[270,271] |L1.271| "
- - "L1.272[271,272] |L1.272| "
- - "L1.273[272,273] |L1.273| "
- - "L1.274[273,274] |L1.274| "
- - "L1.275[274,275] |L1.275| "
- - "L1.276[275,276] |L1.276| "
- - "L1.277[276,277] |L1.277| "
- - "L1.278[277,278] |L1.278| "
- - "L1.279[278,279] |L1.279| "
- - "L1.280[279,280] |L1.280| "
- - "L1.281[280,281] |L1.281|"
- - "L1.282[281,282] |L1.282|"
- - "L1.283[282,283] |L1.283|"
- - "L1.284[283,284] |L1.284|"
- - "L1.285[284,285] |L1.285|"
- - "L1.286[285,286] |L1.286|"
- - "L1.287[286,287] |L1.287|"
- - "L1.288[287,288] |L1.288|"
+ - "L1.201[400,401] |L1.201| "
+ - "L1.202[402,403] |L1.202| "
+ - "L1.203[404,405] |L1.203| "
+ - "L1.204[406,407] |L1.204| "
+ - "L1.205[408,409] |L1.205| "
+ - "L1.206[410,411] |L1.206| "
+ - "L1.207[412,413] |L1.207| "
+ - "L1.208[414,415] |L1.208| "
+ - "L1.209[416,417] |L1.209| "
+ - "L1.210[418,419] |L1.210| "
+ - "L1.211[420,421] |L1.211| "
+ - "L1.212[422,423] |L1.212| "
+ - "L1.213[424,425] |L1.213| "
+ - "L1.214[426,427] |L1.214| "
+ - "L1.215[428,429] |L1.215| "
+ - "L1.216[430,431] |L1.216| "
+ - "L1.217[432,433] |L1.217| "
+ - "L1.218[434,435] |L1.218| "
+ - "L1.219[436,437] |L1.219| "
+ - "L1.220[438,439] |L1.220| "
+ - "L1.221[440,441] |L1.221| "
+ - "L1.222[442,443] |L1.222| "
+ - "L1.223[444,445] |L1.223| "
+ - "L1.224[446,447] |L1.224| "
+ - "L1.225[448,449] |L1.225| "
+ - "L1.226[450,451] |L1.226| "
+ - "L1.227[452,453] |L1.227| "
+ - "L1.228[454,455] |L1.228| "
+ - "L1.229[456,457] |L1.229| "
+ - "L1.230[458,459] |L1.230| "
+ - "L1.231[460,461] |L1.231| "
+ - "L1.232[462,463] |L1.232| "
+ - "L1.233[464,465] |L1.233| "
+ - "L1.234[466,467] |L1.234| "
+ - "L1.235[468,469] |L1.235| "
+ - "L1.236[470,471] |L1.236| "
+ - "L1.237[472,473] |L1.237| "
+ - "L1.238[474,475] |L1.238| "
+ - "L1.239[476,477] |L1.239| "
+ - "L1.240[478,479] |L1.240| "
+ - "L1.241[480,481] |L1.241| "
+ - "L1.242[482,483] |L1.242| "
+ - "L1.243[484,485] |L1.243| "
+ - "L1.244[486,487] |L1.244| "
+ - "L1.245[488,489] |L1.245| "
+ - "L1.246[490,491] |L1.246| "
+ - "L1.247[492,493] |L1.247| "
+ - "L1.248[494,495] |L1.248| "
+ - "L1.249[496,497] |L1.249| "
+ - "L1.250[498,499] |L1.250| "
+ - "L1.251[500,501] |L1.251| "
+ - "L1.252[502,503] |L1.252| "
+ - "L1.253[504,505] |L1.253| "
+ - "L1.254[506,507] |L1.254| "
+ - "L1.255[508,509] |L1.255| "
+ - "L1.256[510,511] |L1.256| "
+ - "L1.257[512,513] |L1.257| "
+ - "L1.258[514,515] |L1.258| "
+ - "L1.259[516,517] |L1.259| "
+ - "L1.260[518,519] |L1.260| "
+ - "L1.261[520,521] |L1.261| "
+ - "L1.262[522,523] |L1.262| "
+ - "L1.263[524,525] |L1.263| "
+ - "L1.264[526,527] |L1.264| "
+ - "L1.265[528,529] |L1.265| "
+ - "L1.266[530,531] |L1.266| "
+ - "L1.267[532,533] |L1.267| "
+ - "L1.268[534,535] |L1.268| "
+ - "L1.269[536,537] |L1.269| "
+ - "L1.270[538,539] |L1.270| "
+ - "L1.271[540,541] |L1.271| "
+ - "L1.272[542,543] |L1.272| "
+ - "L1.273[544,545] |L1.273| "
+ - "L1.274[546,547] |L1.274| "
+ - "L1.275[548,549] |L1.275| "
+ - "L1.276[550,551] |L1.276| "
+ - "L1.277[552,553] |L1.277| "
+ - "L1.278[554,555] |L1.278| "
+ - "L1.279[556,557] |L1.279| "
+ - "L1.280[558,559] |L1.280|"
+ - "L1.281[560,561] |L1.281|"
+ - "L1.282[562,563] |L1.282|"
+ - "L1.283[564,565] |L1.283|"
+ - "L1.284[566,567] |L1.284|"
+ - "L1.285[568,569] |L1.285|"
+ - "L1.286[570,571] |L1.286|"
+ - "L1.287[572,573] |L1.287|"
+ - "L1.288[574,575] |L1.288|"
- "**** 1 Output Files (parquet_file_id not yet assigned), 616kb total:"
- "L1, all files 616kb "
- - "L1.?[200,288] |-------------------------------------L1.?-------------------------------------|"
+ - "L1.?[400,575] |-------------------------------------L1.?-------------------------------------|"
- "Committing partition 1:"
- " Soft Deleting 88 files: L1.201, L1.202, L1.203, L1.204, L1.205, L1.206, L1.207, L1.208, L1.209, L1.210, L1.211, L1.212, L1.213, L1.214, L1.215, L1.216, L1.217, L1.218, L1.219, L1.220, L1.221, L1.222, L1.223, L1.224, L1.225, L1.226, L1.227, L1.228, L1.229, L1.230, L1.231, L1.232, L1.233, L1.234, L1.235, L1.236, L1.237, L1.238, L1.239, L1.240, L1.241, L1.242, L1.243, L1.244, L1.245, L1.246, L1.247, L1.248, L1.249, L1.250, L1.251, L1.252, L1.253, L1.254, L1.255, L1.256, L1.257, L1.258, L1.259, L1.260, L1.261, L1.262, L1.263, L1.264, L1.265, L1.266, L1.267, L1.268, L1.269, L1.270, L1.271, L1.272, L1.273, L1.274, L1.275, L1.276, L1.277, L1.278, L1.279, L1.280, L1.281, L1.282, L1.283, L1.284, L1.285, L1.286, L1.287, L1.288"
- " Creating 1 files at level CompactionLevel::L1"
- "**** Final Output Files "
- "L1 "
- - "L1.289[0,200] 1.37mb|-----------------------L1.289------------------------| "
- - "L1.290[200,288] 616kb |--------L1.290--------| "
+ - "L1.289[0,399] 1.37mb|-----------------------L1.289------------------------| "
+ - "L1.290[400,575] 616kb |--------L1.290--------| "
"###
);
}
diff --git a/compactor2/tests/layouts/mod.rs b/compactor2/tests/layouts/mod.rs
index 7b1d3c73e9..b3b58a2379 100644
--- a/compactor2/tests/layouts/mod.rs
+++ b/compactor2/tests/layouts/mod.rs
@@ -105,6 +105,9 @@ pub(crate) async fn all_overlapping_l0_files(setup: TestSetup) -> TestSetup {
/// runs the scenario and returns a string based output for comparison
pub(crate) async fn run_layout_scenario(setup: &TestSetup) -> Vec<String> {
+ // verify the files are ok to begin with
+ setup.verify_invariants().await;
+
setup.catalog.time_provider.inc(Duration::from_nanos(200));
let input_files = setup.list_by_table_not_to_delete().await;
@@ -126,6 +129,10 @@ pub(crate) async fn run_layout_scenario(setup: &TestSetup) -> Vec<String> {
&sort_files(output_files),
));
+ // TODO this fails (as the compactor produces output files with overlapping ranges)
+ // verify that the output of the compactor was valid
+ //setup.verify_invariants().await;
+
output
}
diff --git a/compactor2_test_utils/src/lib.rs b/compactor2_test_utils/src/lib.rs
index 27ae510e14..0c7b956bdc 100644
--- a/compactor2_test_utils/src/lib.rs
+++ b/compactor2_test_utils/src/lib.rs
@@ -387,7 +387,7 @@ impl TestSetup {
TestSetupBuilder::new().await
}
- /// Get the catalog files stored in the catalog
+ /// Get the parquet files stored in the catalog
pub async fn list_by_table_not_to_delete(&self) -> Vec<ParquetFile> {
self.catalog
.list_by_table_not_to_delete(self.table.table.id)
@@ -455,6 +455,39 @@ impl TestSetup {
run_log: self.run_log.lock().unwrap().clone(),
}
}
+
+ /// Checks the catalog contents of this test setup.
+ ///
+ /// Currently checks:
+ /// 1. There are no overlapping files (the compactor should never create overlapping L1 or L2 files)
+ pub async fn verify_invariants(&self) {
+ let files: Vec<_> = self
+ .list_by_table_not_to_delete()
+ .await
+ .into_iter()
+ // ignore files that are deleted
+ .filter(|f| f.to_delete.is_none())
+ .collect();
+
+ for f1 in &files {
+ for f2 in &files {
+ assert_no_overlap(f1, f2);
+ }
+ }
+ }
+}
+
+/// Returns true of f1 and f2 are different, overlapping files in the
+/// L1 or L2 levels (the compactor should never create such files
+fn assert_no_overlap(f1: &ParquetFile, f2: &ParquetFile) {
+ if f1.id != f2.id
+ && (f1.compaction_level == CompactionLevel::FileNonOverlapped
+ || f1.compaction_level == CompactionLevel::Final)
+ && f1.compaction_level == f2.compaction_level
+ && f1.overlaps(f2)
+ {
+ panic!("Found overlapping files at L1/L2 target level!\n{f1:#?}\n{f2:#?}");
+ }
}
/// Information about the compaction that was run
@@ -1012,3 +1045,145 @@ pub fn create_overlapped_files_3_mix_size(size: i64) -> Vec<ParquetFile> {
// Put the files in random order
vec![l0_3, l0_2, l0_1, l0_4, l0_5, l0_6, l1_1, l1_2]
}
+
+#[cfg(test)]
+mod test {
+ use super::*;
+
+ #[tokio::test]
+ async fn good_setup_overlapping_l0() {
+ let builder = TestSetup::builder().await;
+
+ // two overlapping L0 Files
+ builder
+ .partition
+ .create_parquet_file(
+ parquet_builder()
+ .with_compaction_level(CompactionLevel::Initial)
+ .with_min_time(100)
+ .with_max_time(200),
+ )
+ .await;
+
+ builder
+ .partition
+ .create_parquet_file(
+ parquet_builder()
+ .with_compaction_level(CompactionLevel::Initial)
+ .with_min_time(50)
+ .with_max_time(200),
+ )
+ .await;
+
+ // expect no panic
+ builder.build().await.verify_invariants().await;
+ }
+
+ #[tokio::test]
+ #[should_panic(expected = "Found overlapping files at L1/L2 target level")]
+ async fn bad_setup_overlapping_l1() {
+ let builder = TestSetup::builder().await;
+
+ // two overlapping L1 Files
+
+ builder
+ .partition
+ .create_parquet_file(
+ parquet_builder()
+ .with_compaction_level(CompactionLevel::FileNonOverlapped)
+ .with_min_time(50)
+ .with_max_time(200),
+ )
+ .await;
+
+ builder
+ .partition
+ .create_parquet_file(
+ parquet_builder()
+ .with_compaction_level(CompactionLevel::FileNonOverlapped)
+ .with_min_time(100)
+ .with_max_time(200),
+ )
+ .await;
+
+ builder.build().await.verify_invariants().await;
+ }
+
+ #[tokio::test]
+ #[should_panic(expected = "Found overlapping files at L1/L2 target level")]
+ async fn bad_setup_overlapping_l1_leading_edge() {
+ let builder = TestSetup::builder().await;
+
+ // non overlapping l1 with
+ // two overlapping L1 files but right on edge (max time == min time)
+
+ builder
+ .partition
+ .create_parquet_file(
+ parquet_builder()
+ .with_compaction_level(CompactionLevel::FileNonOverlapped)
+ .with_min_time(100)
+ .with_max_time(200),
+ )
+ .await;
+
+ builder
+ .partition
+ .create_parquet_file(
+ parquet_builder()
+ .with_compaction_level(CompactionLevel::FileNonOverlapped)
+ .with_min_time(50)
+ .with_max_time(75),
+ )
+ .await;
+
+ builder
+ .partition
+ .create_parquet_file(
+ parquet_builder()
+ .with_compaction_level(CompactionLevel::FileNonOverlapped)
+ .with_min_time(200)
+ .with_max_time(300),
+ )
+ .await;
+
+ builder.build().await.verify_invariants().await;
+ }
+
+ #[tokio::test]
+ #[should_panic(expected = "Found overlapping files at L1/L2 target level")]
+ async fn bad_setup_overlapping_l2() {
+ let builder = TestSetup::builder().await;
+
+ // two overlapping L1 Files
+ builder
+ .partition
+ .create_parquet_file(
+ parquet_builder()
+ .with_compaction_level(CompactionLevel::Final)
+ .with_min_time(100)
+ .with_max_time(200),
+ )
+ .await;
+
+ builder
+ .partition
+ .create_parquet_file(
+ parquet_builder()
+ .with_compaction_level(CompactionLevel::Final)
+ .with_min_time(50)
+ .with_max_time(200),
+ )
+ .await;
+
+ builder.build().await.verify_invariants().await;
+ }
+
+ /// creates a TestParquetFileBuilder setup for layout tests
+ pub fn parquet_builder() -> TestParquetFileBuilder {
+ TestParquetFileBuilder::default()
+ // need some LP to generate the schema
+ .with_line_protocol("table,tag1=A,tag2=B,tag3=C field_int=1i 100")
+ .with_file_size_bytes(300)
+ }
+}
|
50d9d4032206c374064747791fa64e1c17409e83
|
Carol (Nichols || Goulding)
|
2023-03-16 15:01:53
|
Rename a filter slot based on when it's applied
|
The filter in this spot could potentially do whatever, but the important
part of what should go in this field is that it will be called on the
files after they're classified by the file classifier.
| null |
refactor: Rename a filter slot based on when it's applied
The filter in this spot could potentially do whatever, but the important
part of what should go in this field is that it will be called on the
files after they're classified by the file classifier.
|
diff --git a/compactor2/src/components/hardcoded.rs b/compactor2/src/components/hardcoded.rs
index 0a4c8af0da..d9463e0a9a 100644
--- a/compactor2/src/components/hardcoded.rs
+++ b/compactor2/src/components/hardcoded.rs
@@ -329,7 +329,7 @@ pub fn hardcoded_components(config: &Config) -> Arc<Components> {
)),
),
))),
- partition_too_large_to_compact_filter: Arc::new(LoggingPartitionFilterWrapper::new(
+ post_classification_partition_filter: Arc::new(LoggingPartitionFilterWrapper::new(
MetricsPartitionFilterWrapper::new(
AndPartitionFilter::new(partition_large_size_tiny_time_range_filter),
&config.metric_registry,
diff --git a/compactor2/src/components/mod.rs b/compactor2/src/components/mod.rs
index 675193bb0d..3a892be67f 100644
--- a/compactor2/src/components/mod.rs
+++ b/compactor2/src/components/mod.rs
@@ -55,7 +55,7 @@ pub struct Components {
/// stop condition for completing a partition compaction
pub partition_filter: Arc<dyn PartitionFilter>,
/// condition to avoid running out of resources during compaction
- pub partition_too_large_to_compact_filter: Arc<dyn PartitionFilter>,
+ pub post_classification_partition_filter: Arc<dyn PartitionFilter>,
/// Records "partition is done" status for given partition.
pub partition_done_sink: Arc<dyn PartitionDoneSink>,
/// Commits changes (i.e. deletion and creation) to the catalog.
diff --git a/compactor2/src/components/report.rs b/compactor2/src/components/report.rs
index 2eed43fd1b..5e9f44c690 100644
--- a/compactor2/src/components/report.rs
+++ b/compactor2/src/components/report.rs
@@ -99,7 +99,7 @@ pub fn log_components(components: &Components) {
partition_files_source,
round_info_source,
partition_filter,
- partition_too_large_to_compact_filter,
+ post_classification_partition_filter: partition_too_large_to_compact_filter,
partition_done_sink,
commit,
ir_planner,
diff --git a/compactor2/src/driver.rs b/compactor2/src/driver.rs
index 32918f0d1b..92082ec4a1 100644
--- a/compactor2/src/driver.rs
+++ b/compactor2/src/driver.rs
@@ -237,7 +237,7 @@ async fn try_compact_partition(
// Skip partition if it has neither files to upgrade nor files to compact or split
if files_to_upgrade.is_empty()
&& !components
- .partition_too_large_to_compact_filter
+ .post_classification_partition_filter
.apply(&partition_info, &files_to_compact_or_split.files())
.await?
{
|
458b1bf1a6ae80786a156fd1c56c2ebff27d0947
|
Fraser Savage
|
2023-07-12 14:22:58
|
Extract SpanContext from RPC write request
|
Ensure that if a `SpanContext` type is present in the request that the
trace ID is used for spans in the RPC write path.
| null |
feat(ingester): Extract SpanContext from RPC write request
Ensure that if a `SpanContext` type is present in the request that the
trace ID is used for spans in the RPC write path.
|
diff --git a/ingester/src/server/grpc/rpc_write.rs b/ingester/src/server/grpc/rpc_write.rs
index 3a6ac27c95..e5b4a182c2 100644
--- a/ingester/src/server/grpc/rpc_write.rs
+++ b/ingester/src/server/grpc/rpc_write.rs
@@ -9,6 +9,10 @@ use mutable_batch_pb::decode::decode_database_batch;
use observability_deps::tracing::*;
use thiserror::Error;
use tonic::{Code, Request, Response};
+use trace::{
+ ctx::SpanContext,
+ span::{SpanExt, SpanRecorder},
+};
use crate::{
dml_payload::write::{PartitionedData, TableData, WriteOperation},
@@ -132,6 +136,11 @@ where
&self,
request: Request<proto::WriteRequest>,
) -> Result<Response<proto::WriteResponse>, tonic::Status> {
+ // Extract the span context
+ let span_ctx: Option<SpanContext> = request.extensions().get().cloned();
+ let span = span_ctx.child_span("ingester write");
+ let mut span_recorder = SpanRecorder::new(span);
+
// Drop writes if the persistence is saturated or the ingester is
// shutting down.
//
@@ -157,8 +166,6 @@ where
.remote_addr()
.map(|v| v.to_string())
.unwrap_or_else(|| "<unknown>".to_string());
-
- // Extract the write payload
let payload = request.into_inner().payload.ok_or(RpcError::NoPayload)?;
let batches = decode_database_batch(&payload).map_err(RpcError::Decode)?;
@@ -199,23 +206,21 @@ where
})
.collect(),
partition_key,
- // TODO:
- // The tracing context should be propagated over the RPC boundary.
- //
- // See https://github.com/influxdata/influxdb_iox/issues/6177
- None,
+ span_recorder.span().map(|span| span.ctx.clone()),
);
- // Apply the IngestOp to the in-memory buffer.
+ // Apply the IngestOp to the DML sink.
match self.sink.apply(IngestOp::Write(op)).await {
- Ok(()) => {}
+ Ok(()) => {
+ span_recorder.ok("applied write");
+ Ok(Response::new(proto::WriteResponse {}))
+ }
Err(e) => {
error!(error=%e, "failed to apply ingest operation");
- return Err(e.into())?;
+ span_recorder.error(e.to_string());
+ Err(e.into())?
}
}
-
- Ok(Response::new(proto::WriteResponse {}))
}
}
@@ -228,6 +233,7 @@ mod tests {
Column, DatabaseBatch, TableBatch,
};
use std::{collections::HashSet, sync::Arc};
+ use trace::RingBufferTraceCollector;
use super::*;
use crate::{
@@ -609,4 +615,66 @@ mod tests {
// One write should have been passed through to the DML sinks.
assert_matches!(*mock.get_calls(), [IngestOp::Write(_)]);
}
+
+ /// Assert that the ingester propagates the SpanContext from the client
+ /// request.
+ #[tokio::test]
+ async fn test_rpc_write_span_propagation() {
+ let mock = Arc::new(MockDmlSink::default().with_apply_return(vec![Ok(())]));
+ let timestamp = Arc::new(TimestampOracle::new(0));
+
+ let ingest_state = Arc::new(IngestState::default());
+
+ let handler = RpcWrite::new(Arc::clone(&mock), timestamp, Arc::clone(&ingest_state));
+
+ let mut req = Request::new(proto::WriteRequest {
+ payload: Some(DatabaseBatch {
+ database_id: ARBITRARY_NAMESPACE_ID.get(),
+ partition_key: ARBITRARY_PARTITION_KEY.to_string(),
+ table_batches: vec![TableBatch {
+ table_id: ARBITRARY_TABLE_ID.get(),
+ columns: vec![Column {
+ column_name: "time".to_string(),
+ semantic_type: SemanticType::Time.into(),
+ values: Some(Values {
+ i64_values: vec![4242],
+ f64_values: vec![],
+ u64_values: vec![],
+ string_values: vec![],
+ bool_values: vec![],
+ bytes_values: vec![],
+ packed_string_values: None,
+ interned_string_values: None,
+ }),
+ null_mask: vec![0],
+ }],
+ row_count: 1,
+ }],
+ }),
+ });
+
+ // Initialise a trace context to bundle into the request.
+ let trace_collector = Arc::new(RingBufferTraceCollector::new(5));
+ let span_ctx = SpanContext::new(Arc::new(Arc::clone(&trace_collector)));
+ let external_span = span_ctx.child("external span");
+
+ // Insert the span context into the request extensions
+ req.extensions_mut().insert(external_span.clone().ctx);
+
+ handler.write(req).await.expect("write should succeed");
+
+ // One write should have been passed through to the DML sinks,
+ // containing the same trace ID.
+ assert_matches!(&mock.get_calls()[..], [IngestOp::Write(w)] => {
+ let got_span_ctx = w.clone().span_context().expect("op should contain span context").to_owned();
+ assert_eq!(got_span_ctx.trace_id, external_span.ctx.trace_id);
+ });
+
+ // Check the span name and the parent span ID are as expected
+ let spans = trace_collector.spans();
+ assert_matches!(spans.as_slice(), [handler_span] => {
+ assert_eq!(handler_span.name, "ingester write");
+ assert_eq!(handler_span.ctx.parent_span_id, Some(external_span.ctx.span_id));
+ })
+ }
}
diff --git a/querier/src/ingester/mod.rs b/querier/src/ingester/mod.rs
index cf45a30fa0..912ccea711 100644
--- a/querier/src/ingester/mod.rs
+++ b/querier/src/ingester/mod.rs
@@ -111,7 +111,7 @@ pub enum Error {
ChunkWithoutPartition { ingester_address: String },
#[snafu(display(
- "Duplicate partition info for partition {partition_id}, ingestger: {ingester_address}"
+ "Duplicate partition info for partition {partition_id}, ingester: {ingester_address}"
))]
DuplicatePartitionInfo {
partition_id: PartitionId,
|
9e3f7611bb2e434718a259971f0d5555ab8a84a6
|
Dom Dwyer
|
2023-09-12 17:03:12
|
buffered partition limit
|
This commit adds an optional (disabled by default) limit on the number
partitions that may be buffered for a namespace at any one time.
The exact value is configurable by setting
INFLUXDB_IOX_MAX_PARTITIONS_PER_NAMESPACE to a non-zero value, and is
disabled unless specified.
| null |
feat(ingester): buffered partition limit
This commit adds an optional (disabled by default) limit on the number
partitions that may be buffered for a namespace at any one time.
The exact value is configurable by setting
INFLUXDB_IOX_MAX_PARTITIONS_PER_NAMESPACE to a non-zero value, and is
disabled unless specified.
|
diff --git a/clap_blocks/src/ingester.rs b/clap_blocks/src/ingester.rs
index f2d87ae17d..d736b16d92 100644
--- a/clap_blocks/src/ingester.rs
+++ b/clap_blocks/src/ingester.rs
@@ -1,6 +1,6 @@
//! CLI config for the ingester using the RPC write path
-use std::path::PathBuf;
+use std::{num::NonZeroUsize, path::PathBuf};
use crate::gossip::GossipConfig;
@@ -75,4 +75,14 @@ pub struct IngesterConfig {
action
)]
pub persist_hot_partition_cost: usize,
+
+ /// Limit the number of partitions that may be buffered in a single
+ /// namespace (across all tables) at any one time.
+ ///
+ /// This limit is disabled by default.
+ #[clap(
+ long = "max-partitions-per-namespace",
+ env = "INFLUXDB_IOX_MAX_PARTITIONS_PER_NAMESPACE"
+ )]
+ pub max_partitions_per_namespace: Option<NonZeroUsize>,
}
diff --git a/influxdb_iox/src/commands/run/all_in_one.rs b/influxdb_iox/src/commands/run/all_in_one.rs
index 1f578afa67..788d2a960e 100644
--- a/influxdb_iox/src/commands/run/all_in_one.rs
+++ b/influxdb_iox/src/commands/run/all_in_one.rs
@@ -485,6 +485,7 @@ impl Config {
persist_hot_partition_cost,
rpc_write_max_incoming_bytes: 1024 * 1024 * 1024, // 1GiB
gossip_config: GossipConfig::disabled(),
+ max_partitions_per_namespace: None,
};
let router_config = RouterConfig {
diff --git a/ingester/src/buffer_tree/namespace.rs b/ingester/src/buffer_tree/namespace.rs
index 3649685b99..5fc37eedc3 100644
--- a/ingester/src/buffer_tree/namespace.rs
+++ b/ingester/src/buffer_tree/namespace.rs
@@ -11,9 +11,10 @@ use predicate::Predicate;
use trace::span::Span;
use super::{
- partition::resolver::PartitionProvider,
+ partition::{counter::PartitionCounter, resolver::PartitionProvider},
post_write::PostWriteObserver,
table::{metadata_resolver::TableProvider, TableData},
+ BufferWriteError,
};
use crate::{
arcmap::ArcMap,
@@ -80,6 +81,15 @@ pub(crate) struct NamespaceData<O> {
/// [`PartitionData`]: super::partition::PartitionData
partition_provider: Arc<dyn PartitionProvider>,
+ /// A counter tracking the approximate number of partitions currently
+ /// buffered.
+ ///
+ /// This counter is NOT atomically incremented w.r.t creation of the
+ /// partitions it tracks, and therefore is susceptible to "overrun",
+ /// breaching the configured partition count limit by a relatively small
+ /// degree.
+ partition_count: Arc<PartitionCounter>,
+
post_write_observer: Arc<O>,
}
@@ -90,6 +100,7 @@ impl<O> NamespaceData<O> {
namespace_name: Arc<DeferredLoad<NamespaceName>>,
catalog_table_resolver: Arc<dyn TableProvider>,
partition_provider: Arc<dyn PartitionProvider>,
+ partition_counter: PartitionCounter,
post_write_observer: Arc<O>,
metrics: &metric::Registry,
) -> Self {
@@ -108,6 +119,7 @@ impl<O> NamespaceData<O> {
table_count,
partition_provider,
post_write_observer,
+ partition_count: Arc::new(partition_counter),
}
}
@@ -141,7 +153,7 @@ impl<O> DmlSink for NamespaceData<O>
where
O: PostWriteObserver,
{
- type Error = mutable_batch::Error;
+ type Error = BufferWriteError;
async fn apply(&self, op: IngestOp) -> Result<(), Self::Error> {
match op {
@@ -160,6 +172,7 @@ where
self.namespace_id,
Arc::clone(&self.namespace_name),
Arc::clone(&self.partition_provider),
+ Arc::clone(&self.partition_count),
Arc::clone(&self.post_write_observer),
))
});
@@ -218,7 +231,7 @@ where
#[cfg(test)]
mod tests {
- use std::sync::Arc;
+ use std::{num::NonZeroUsize, sync::Arc};
use metric::{Attributes, Metric};
@@ -251,6 +264,7 @@ mod tests {
defer_namespace_name_1_ms(),
Arc::clone(&*ARBITRARY_TABLE_PROVIDER),
partition_provider,
+ PartitionCounter::new(NonZeroUsize::new(usize::MAX).unwrap()),
Arc::new(MockPostWriteObserver::default()),
&metrics,
);
diff --git a/ingester/src/buffer_tree/partition.rs b/ingester/src/buffer_tree/partition.rs
index a2d73887e9..3040968380 100644
--- a/ingester/src/buffer_tree/partition.rs
+++ b/ingester/src/buffer_tree/partition.rs
@@ -21,6 +21,7 @@ use crate::{
};
mod buffer;
+pub(crate) mod counter;
pub(crate) mod persisting;
mod persisting_list;
pub(crate) mod resolver;
diff --git a/ingester/src/buffer_tree/partition/counter.rs b/ingester/src/buffer_tree/partition/counter.rs
new file mode 100644
index 0000000000..4efa09e6a8
--- /dev/null
+++ b/ingester/src/buffer_tree/partition/counter.rs
@@ -0,0 +1,66 @@
+use std::{
+ num::NonZeroUsize,
+ sync::atomic::{AtomicUsize, Ordering},
+};
+
+/// A counter typed for counting partitions and applying a configured limit.
+///
+/// No ordering is guaranteed - increments and reads may be arbitrarily
+/// reordered w.r.t other memory operations (relaxed ordering).
+#[derive(Debug)]
+pub(crate) struct PartitionCounter {
+ current: AtomicUsize,
+ max: usize,
+}
+
+impl PartitionCounter {
+ pub(crate) fn new(max: NonZeroUsize) -> Self {
+ Self {
+ current: AtomicUsize::new(0),
+ max: max.get(),
+ }
+ }
+
+ /// Increment the counter by 1.
+ pub(crate) fn inc(&self) {
+ self.current.fetch_add(1, Ordering::Relaxed);
+ }
+
+ /// Read the approximate counter value.
+ ///
+ /// Reads may return stale values, but will always be monotonic.
+ pub(crate) fn read(&self) -> usize {
+ self.current.load(Ordering::Relaxed)
+ }
+
+ /// Return `true` if the configured limit has been reached.
+ pub(crate) fn is_maxed(&self) -> bool {
+ self.read() >= self.max
+ }
+
+ #[cfg(test)]
+ pub(crate) fn set(&self, v: usize) {
+ self.current.store(v, Ordering::Relaxed)
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn test_counter() {
+ const N: usize = 100;
+
+ let c = PartitionCounter::new(NonZeroUsize::new(N).unwrap());
+
+ for v in 0..N {
+ assert!(!c.is_maxed());
+ assert_eq!(c.read(), v);
+ c.inc();
+ }
+
+ assert_eq!(c.read(), N);
+ assert!(c.is_maxed());
+ }
+}
diff --git a/ingester/src/buffer_tree/root.rs b/ingester/src/buffer_tree/root.rs
index 2b0c11a748..6c001d9f90 100644
--- a/ingester/src/buffer_tree/root.rs
+++ b/ingester/src/buffer_tree/root.rs
@@ -1,15 +1,16 @@
-use std::{fmt::Debug, sync::Arc};
+use std::{fmt::Debug, num::NonZeroUsize, sync::Arc};
use async_trait::async_trait;
use data_types::{NamespaceId, TableId};
use metric::U64Counter;
use parking_lot::Mutex;
use predicate::Predicate;
+use thiserror::Error;
use trace::span::Span;
use super::{
namespace::{name_resolver::NamespaceNameProvider, NamespaceData},
- partition::{resolver::PartitionProvider, PartitionData},
+ partition::{counter::PartitionCounter, resolver::PartitionProvider, PartitionData},
post_write::PostWriteObserver,
table::metadata_resolver::TableProvider,
};
@@ -24,6 +25,16 @@ use crate::{
},
};
+/// An error buffering a write into the [`BufferTree`].
+#[derive(Debug, Error)]
+pub enum BufferWriteError {
+ #[error("namespace reached buffered partition limit ({count} partitions at once)")]
+ PartitionLimit { count: usize },
+
+ #[error(transparent)]
+ Write(#[from] mutable_batch::Error),
+}
+
/// A [`BufferTree`] is the root of an in-memory tree of many [`NamespaceData`]
/// containing one or more child [`TableData`] nodes, which in turn contain one
/// or more [`PartitionData`] nodes:
@@ -84,6 +95,9 @@ pub(crate) struct BufferTree<O> {
///
/// [`PartitionData`]: super::partition::PartitionData
partition_provider: Arc<dyn PartitionProvider>,
+ /// The maximum number of partitions that may be buffered for a single
+ /// namespace at any time.
+ max_partitions_per_namespace: NonZeroUsize,
/// A set of namespaces this [`BufferTree`] instance has processed
/// [`IngestOp`]'s for.
@@ -118,6 +132,7 @@ where
namespace_name_resolver: Arc<dyn NamespaceNameProvider>,
table_resolver: Arc<dyn TableProvider>,
partition_provider: Arc<dyn PartitionProvider>,
+ max_partitions_per_namespace: NonZeroUsize,
post_write_observer: Arc<O>,
metrics: Arc<metric::Registry>,
) -> Self {
@@ -134,6 +149,7 @@ where
table_resolver,
metrics,
partition_provider,
+ max_partitions_per_namespace,
post_write_observer,
namespace_count,
}
@@ -170,7 +186,7 @@ impl<O> DmlSink for BufferTree<O>
where
O: PostWriteObserver,
{
- type Error = mutable_batch::Error;
+ type Error = BufferWriteError;
async fn apply(&self, op: IngestOp) -> Result<(), Self::Error> {
let namespace_id = op.namespace();
@@ -184,6 +200,7 @@ where
Arc::new(self.namespace_name_resolver.for_namespace(namespace_id)),
Arc::clone(&self.table_resolver),
Arc::clone(&self.partition_provider),
+ PartitionCounter::new(self.max_partitions_per_namespace),
Arc::clone(&self.post_write_observer),
&self.metrics,
))
@@ -302,6 +319,7 @@ mod tests {
defer_namespace_name_1_ms(),
Arc::clone(&*ARBITRARY_TABLE_PROVIDER),
partition_provider,
+ PartitionCounter::new(NonZeroUsize::new(42).unwrap()),
Arc::new(MockPostWriteObserver::default()),
&metrics,
);
@@ -361,6 +379,7 @@ mod tests {
$name:ident,
$(table_provider = $table_provider:expr,)? // An optional table provider
$(projection = $projection:expr,)? // An optional OwnedProjection
+ $(partition_limit = $partition_limit:literal,)? // An optional partition count to limit namespaces to
partitions = [$($partition:expr), +], // The set of PartitionData for the mock
// partition provider
writes = [$($write:expr), *], // The set of WriteOperation to apply()
@@ -387,11 +406,18 @@ mod tests {
let table_provider: Arc<dyn TableProvider> = $table_provider;
)?
+ #[allow(unused_variables)]
+ let partition_count_limit = usize::MAX;
+ $(
+ let partition_count_limit = $partition_limit;
+ )?
+
// Init the buffer tree
let buf = BufferTree::new(
Arc::new(MockNamespaceNameProvider::new(&**ARBITRARY_NAMESPACE_NAME)),
table_provider,
partition_provider,
+ NonZeroUsize::new(partition_count_limit).unwrap(),
Arc::new(MockPostWriteObserver::default()),
Arc::new(metric::Registry::default()),
);
@@ -805,6 +831,78 @@ mod tests {
]
);
+ // Apply a partition limit and write N+1 partitions across multiple tables
+ // to the same partition, ensuring the limit is enforced.
+ //
+ // This validates partition counts are tracked across tables for a given
+ // namespace, and effectively enforced.
+ #[tokio::test]
+ async fn test_write_query_partition_limit_enforced() {
+ maybe_start_logging();
+
+ let partition_provider = Arc::new(
+ MockPartitionProvider::default()
+ .with_partition(
+ PartitionDataBuilder::new()
+ .with_partition_key(ARBITRARY_PARTITION_KEY.clone())
+ .build(),
+ )
+ .with_partition(
+ PartitionDataBuilder::new()
+ .with_partition_key(PARTITION2_KEY.clone())
+ .build(),
+ ),
+ );
+
+ let table_provider = Arc::clone(&*ARBITRARY_TABLE_PROVIDER);
+ let partition_count_limit = 1;
+
+ let buf = BufferTree::new(
+ Arc::new(MockNamespaceNameProvider::new(&**ARBITRARY_NAMESPACE_NAME)),
+ table_provider,
+ partition_provider,
+ NonZeroUsize::new(partition_count_limit).unwrap(),
+ Arc::new(MockPostWriteObserver::default()),
+ Arc::new(metric::Registry::default()),
+ );
+
+ // Write to the first partition should succeed
+ buf.apply(IngestOp::Write(make_write_op(
+ &ARBITRARY_PARTITION_KEY,
+ ARBITRARY_NAMESPACE_ID,
+ &ARBITRARY_TABLE_NAME,
+ ARBITRARY_TABLE_ID,
+ 0,
+ format!(
+ r#"{},region=Asturias temp=35 4242424242"#,
+ &*ARBITRARY_TABLE_NAME
+ )
+ .as_str(),
+ None,
+ )))
+ .await
+ .expect("failed to perform first write");
+
+ // Second write to a second table should hit the limit and be rejected
+ let err = buf
+ .apply(IngestOp::Write(make_write_op(
+ &PARTITION2_KEY,
+ ARBITRARY_NAMESPACE_ID,
+ "BANANASareDIFFERENT",
+ TableId::new(ARBITRARY_TABLE_ID.get() + 1),
+ 0,
+ "BANANASareDIFFERENT,region=Asturias temp=35 4242424242",
+ None,
+ )))
+ .await
+ .expect_err("limit should be enforced");
+
+ assert_matches!(err, BufferWriteError::PartitionLimit { count: 1 });
+
+ // Only one partition should exist (second was rejected)
+ assert_eq!(buf.partitions().count(), 1);
+ }
+
/// Ensure partition pruning during query execution also prunes metadata
/// frames.
///
@@ -840,6 +938,7 @@ mod tests {
Arc::new(MockNamespaceNameProvider::new(&**ARBITRARY_NAMESPACE_NAME)),
table_provider,
partition_provider,
+ NonZeroUsize::new(usize::MAX).unwrap(),
Arc::new(MockPostWriteObserver::default()),
Arc::new(metric::Registry::default()),
);
@@ -930,6 +1029,7 @@ mod tests {
Arc::new(MockNamespaceNameProvider::new(&**ARBITRARY_NAMESPACE_NAME)),
Arc::clone(&*ARBITRARY_TABLE_PROVIDER),
partition_provider,
+ NonZeroUsize::new(usize::MAX).unwrap(),
Arc::new(MockPostWriteObserver::default()),
Arc::clone(&metrics),
);
@@ -1024,6 +1124,7 @@ mod tests {
Arc::new(MockNamespaceNameProvider::new(&**ARBITRARY_NAMESPACE_NAME)),
Arc::clone(&*ARBITRARY_TABLE_PROVIDER),
partition_provider,
+ NonZeroUsize::new(usize::MAX).unwrap(),
Arc::new(MockPostWriteObserver::default()),
Arc::clone(&Arc::new(metric::Registry::default())),
);
@@ -1114,6 +1215,7 @@ mod tests {
Arc::new(MockNamespaceNameProvider::new(&**ARBITRARY_NAMESPACE_NAME)),
Arc::clone(&*ARBITRARY_TABLE_PROVIDER),
partition_provider,
+ NonZeroUsize::new(usize::MAX).unwrap(),
Arc::new(MockPostWriteObserver::default()),
Arc::new(metric::Registry::default()),
);
@@ -1215,6 +1317,7 @@ mod tests {
Arc::new(MockNamespaceNameProvider::new(&**ARBITRARY_NAMESPACE_NAME)),
Arc::clone(&*ARBITRARY_TABLE_PROVIDER),
partition_provider,
+ NonZeroUsize::new(usize::MAX).unwrap(),
Arc::new(MockPostWriteObserver::default()),
Arc::new(metric::Registry::default()),
);
@@ -1330,6 +1433,7 @@ mod tests {
Arc::new(MockNamespaceNameProvider::new(&**ARBITRARY_NAMESPACE_NAME)),
Arc::clone(&*ARBITRARY_TABLE_PROVIDER),
partition_provider,
+ NonZeroUsize::new(usize::MAX).unwrap(),
Arc::new(MockPostWriteObserver::default()),
Arc::new(metric::Registry::default()),
);
diff --git a/ingester/src/buffer_tree/table.rs b/ingester/src/buffer_tree/table.rs
index 1cc05d6feb..c1c33d154b 100644
--- a/ingester/src/buffer_tree/table.rs
+++ b/ingester/src/buffer_tree/table.rs
@@ -25,8 +25,9 @@ use self::metadata::TableMetadata;
use super::{
namespace::NamespaceName,
- partition::{resolver::PartitionProvider, PartitionData},
+ partition::{counter::PartitionCounter, resolver::PartitionProvider, PartitionData},
post_write::PostWriteObserver,
+ BufferWriteError,
};
use crate::{
arcmap::ArcMap,
@@ -38,7 +39,9 @@ use crate::{
query_adaptor::QueryAdaptor,
};
-/// Data of a Table in a given Namesapce
+const MAX_NAMESPACE_PARTITION_COUNT: usize = usize::MAX;
+
+/// Data of a Table in a given Namespace
#[derive(Debug)]
pub(crate) struct TableData<O> {
table_id: TableId,
@@ -55,6 +58,15 @@ pub(crate) struct TableData<O> {
// Map of partition key to its data
partition_data: ArcMap<PartitionKey, Mutex<PartitionData>>,
+ /// A counter tracking the approximate number of partitions currently
+ /// buffered.
+ ///
+ /// This counter is NOT atomically incremented w.r.t creation of the
+ /// partitions it tracks, and therefore is susceptible to "overrun",
+ /// breaching the configured partition count limit by a relatively small
+ /// degree.
+ partition_count: Arc<PartitionCounter>,
+
post_write_observer: Arc<O>,
}
@@ -70,6 +82,7 @@ impl<O> TableData<O> {
namespace_id: NamespaceId,
namespace_name: Arc<DeferredLoad<NamespaceName>>,
partition_provider: Arc<dyn PartitionProvider>,
+ partition_count: Arc<PartitionCounter>,
post_write_observer: Arc<O>,
) -> Self {
Self {
@@ -79,6 +92,7 @@ impl<O> TableData<O> {
namespace_name,
partition_data: Default::default(),
partition_provider,
+ partition_count,
post_write_observer,
}
}
@@ -127,10 +141,19 @@ where
sequence_number: SequenceNumber,
batch: MutableBatch,
partition_key: PartitionKey,
- ) -> Result<(), mutable_batch::Error> {
+ ) -> Result<(), BufferWriteError> {
let p = self.partition_data.get(&partition_key);
let partition_data = match p {
Some(p) => p,
+ None if self.partition_count.is_maxed() => {
+ // This namespace has exceeded the upper bound on partitions.
+ //
+ // This counter is approximate, but monotonic - the count may be
+ // over the desired limit.
+ return Err(BufferWriteError::PartitionLimit {
+ count: self.partition_count.read(),
+ });
+ }
None => {
let p = self
.partition_provider
@@ -146,7 +169,10 @@ where
//
// This MAY return a different instance than `p` if another
// thread has already initialised the partition.
- self.partition_data.get_or_insert_with(&partition_key, || p)
+ self.partition_data.get_or_insert_with(&partition_key, || {
+ self.partition_count.inc();
+ p
+ })
}
};
@@ -355,8 +381,9 @@ fn keep_after_pruning_partition_key(
#[cfg(test)]
mod tests {
- use std::sync::Arc;
+ use std::{num::NonZeroUsize, sync::Arc};
+ use assert_matches::assert_matches;
use mutable_batch_lp::lines_to_batches;
use super::*;
@@ -379,12 +406,15 @@ mod tests {
MockPartitionProvider::default().with_partition(PartitionDataBuilder::new().build()),
);
+ let partition_counter = Arc::new(PartitionCounter::new(NonZeroUsize::new(42).unwrap()));
+
let table = TableData::new(
ARBITRARY_TABLE_ID,
defer_table_metadata_1_sec(),
ARBITRARY_NAMESPACE_ID,
defer_namespace_name_1_sec(),
partition_provider,
+ Arc::clone(&partition_counter),
Arc::new(MockPostWriteObserver::default()),
);
@@ -411,5 +441,59 @@ mod tests {
// Referencing the partition should succeed
assert!(table.partition_data.get(&ARBITRARY_PARTITION_KEY).is_some());
+
+ // The partition should have been recorded in the partition count.
+ assert_eq!(partition_counter.read(), 1);
+ }
+
+ /// Ensure the partition limit is respected.
+ #[tokio::test]
+ async fn test_partition_limit() {
+ // Configure the mock partition provider to return a partition for a table ID.
+ let partition_provider = Arc::new(
+ MockPartitionProvider::default().with_partition(PartitionDataBuilder::new().build()),
+ );
+
+ const N: usize = 42;
+
+ // Configure the counter that has already exceeded the maximum limit.
+ let partition_counter = Arc::new(PartitionCounter::new(NonZeroUsize::new(N).unwrap()));
+ partition_counter.set(N);
+
+ let table = TableData::new(
+ ARBITRARY_TABLE_ID,
+ defer_table_metadata_1_sec(),
+ ARBITRARY_NAMESPACE_ID,
+ defer_namespace_name_1_sec(),
+ partition_provider,
+ Arc::clone(&partition_counter),
+ Arc::new(MockPostWriteObserver::default()),
+ );
+
+ let batch = lines_to_batches(
+ &format!(r#"{},bat=man value=24 42"#, &*ARBITRARY_TABLE_NAME),
+ 0,
+ )
+ .unwrap()
+ .remove(&***ARBITRARY_TABLE_NAME)
+ .unwrap();
+
+ // Write some test data
+ let err = table
+ .buffer_table_write(
+ SequenceNumber::new(42),
+ batch,
+ ARBITRARY_PARTITION_KEY.clone(),
+ )
+ .await
+ .expect_err("buffer op should hit partition limit");
+
+ assert_matches!(err, BufferWriteError::PartitionLimit { count: N });
+
+ // The partition should not have been created
+ assert_eq!(table.partition_data.values().len(), 0);
+
+ // The partition counter should be unchanged
+ assert_eq!(partition_counter.read(), N);
}
}
diff --git a/ingester/src/dml_sink/trait.rs b/ingester/src/dml_sink/trait.rs
index b72dab6cba..dee1043bb2 100644
--- a/ingester/src/dml_sink/trait.rs
+++ b/ingester/src/dml_sink/trait.rs
@@ -1,6 +1,6 @@
use std::{error::Error, fmt::Debug, ops::Deref, sync::Arc};
-use crate::dml_payload::IngestOp;
+use crate::{buffer_tree::BufferWriteError, dml_payload::IngestOp};
use async_trait::async_trait;
use thiserror::Error;
@@ -11,7 +11,7 @@ pub enum DmlError {
///
/// [`BufferTree`]: crate::buffer_tree::BufferTree
#[error("failed to buffer op: {0}")]
- Buffer(#[from] mutable_batch::Error),
+ Buffer(#[from] BufferWriteError),
/// An error appending the [`IngestOp`] to the write-ahead log.
#[error("wal commit failure: {0}")]
diff --git a/ingester/src/init.rs b/ingester/src/init.rs
index 2f18d87e2b..746c6a09cc 100644
--- a/ingester/src/init.rs
+++ b/ingester/src/init.rs
@@ -9,7 +9,7 @@ pub use wal_replay::*;
mod graceful_shutdown;
mod wal_replay;
-use std::{net::SocketAddr, path::PathBuf, sync::Arc, time::Duration};
+use std::{net::SocketAddr, num::NonZeroUsize, path::PathBuf, sync::Arc, time::Duration};
use arrow_flight::flight_service_server::FlightService;
use backoff::BackoffConfig;
@@ -278,6 +278,7 @@ pub async fn new<F>(
persist_hot_partition_cost: usize,
object_store: ParquetStorage,
gossip: GossipConfig,
+ max_partitions_per_namespace: NonZeroUsize,
shutdown: F,
) -> Result<IngesterGuard<impl IngesterRpcInterface>, InitError>
where
@@ -430,6 +431,7 @@ where
namespace_name_provider,
table_provider,
partition_provider,
+ max_partitions_per_namespace,
Arc::new(hot_partition_persister),
Arc::clone(&metrics),
));
diff --git a/ingester/src/persist/handle.rs b/ingester/src/persist/handle.rs
index 36a7ee3bab..30f20e5597 100644
--- a/ingester/src/persist/handle.rs
+++ b/ingester/src/persist/handle.rs
@@ -478,7 +478,7 @@ impl<T> Drop for AbortOnDrop<T> {
#[cfg(test)]
mod tests {
- use std::{sync::Arc, task::Poll, time::Duration};
+ use std::{num::NonZeroUsize, sync::Arc, task::Poll, time::Duration};
use assert_matches::assert_matches;
use data_types::SortedColumnSet;
@@ -525,6 +525,7 @@ mod tests {
.build(),
),
),
+ NonZeroUsize::new(usize::MAX).unwrap(),
Arc::new(MockPostWriteObserver::default()),
Default::default(),
);
diff --git a/ingester/src/persist/mod.rs b/ingester/src/persist/mod.rs
index 9b042c65b4..f2acf818c5 100644
--- a/ingester/src/persist/mod.rs
+++ b/ingester/src/persist/mod.rs
@@ -13,7 +13,7 @@ mod worker;
#[cfg(test)]
mod tests {
- use std::{sync::Arc, time::Duration};
+ use std::{num::NonZeroUsize, sync::Arc, time::Duration};
use assert_matches::assert_matches;
use data_types::{CompactionLevel, ParquetFile, SortedColumnSet};
@@ -69,6 +69,7 @@ mod tests {
Arc::clone(&*ARBITRARY_NAMESPACE_NAME_PROVIDER),
Arc::clone(&*ARBITRARY_TABLE_PROVIDER),
Arc::new(CatalogPartitionResolver::new(Arc::clone(&catalog))),
+ NonZeroUsize::new(usize::MAX).unwrap(),
Arc::new(MockPostWriteObserver::default()),
Arc::new(metric::Registry::default()),
);
diff --git a/ingester/src/server/grpc/rpc_write.rs b/ingester/src/server/grpc/rpc_write.rs
index de461dcb94..e008ccb049 100644
--- a/ingester/src/server/grpc/rpc_write.rs
+++ b/ingester/src/server/grpc/rpc_write.rs
@@ -15,6 +15,7 @@ use trace::{
};
use crate::{
+ buffer_tree::BufferWriteError,
dml_payload::write::{PartitionedData, TableData, WriteOperation},
dml_payload::IngestOp,
dml_sink::{DmlError, DmlSink},
@@ -66,7 +67,10 @@ impl From<RpcError> for tonic::Status {
impl From<DmlError> for tonic::Status {
fn from(e: DmlError) -> Self {
match e {
- DmlError::Buffer(e) => map_write_error(e),
+ DmlError::Buffer(BufferWriteError::PartitionLimit { .. }) => {
+ Self::resource_exhausted(e.to_string())
+ }
+ DmlError::Buffer(BufferWriteError::Write(e)) => map_write_error(e),
DmlError::Wal(_) => Self::internal(e.to_string()),
DmlError::ApplyTimeout => Self::internal(e.to_string()),
}
diff --git a/ingester_test_ctx/src/lib.rs b/ingester_test_ctx/src/lib.rs
index 6fa73bdbde..57cba40b4b 100644
--- a/ingester_test_ctx/src/lib.rs
+++ b/ingester_test_ctx/src/lib.rs
@@ -17,7 +17,7 @@
// Workaround for "unused crate" lint false positives.
use workspace_hack as _;
-use std::{collections::HashMap, sync::Arc, time::Duration};
+use std::{collections::HashMap, num::NonZeroUsize, sync::Arc, time::Duration};
use arrow::record_batch::RecordBatch;
use arrow_flight::{decode::FlightRecordBatchStream, flight_service_server::FlightService, Ticket};
@@ -169,6 +169,7 @@ impl TestContextBuilder {
persist_hot_partition_cost,
storage.clone(),
GossipConfig::default(),
+ NonZeroUsize::new(usize::MAX).unwrap(),
shutdown_rx.map(|v| v.expect("shutdown sender dropped without calling shutdown")),
)
.await
diff --git a/ioxd_ingester/src/lib.rs b/ioxd_ingester/src/lib.rs
index 354d4c5492..66c7caabdd 100644
--- a/ioxd_ingester/src/lib.rs
+++ b/ioxd_ingester/src/lib.rs
@@ -40,6 +40,7 @@ use metric::Registry;
use parquet_file::storage::ParquetStorage;
use std::{
fmt::{Debug, Display},
+ num::NonZeroUsize,
sync::{Arc, Mutex},
time::Duration,
};
@@ -230,6 +231,9 @@ pub async fn create_ingester_server_type(
ingester_config.persist_hot_partition_cost,
object_store,
gossip,
+ ingester_config
+ .max_partitions_per_namespace
+ .unwrap_or_else(|| NonZeroUsize::new(usize::MAX).unwrap()),
shutdown_rx.map(|v| v.expect("shutdown sender dropped without calling shutdown")),
)
.await?;
|
8fec1d636e82720389d06355d93245a06a8d90ad
|
Michael Gattozzi
|
2024-02-27 11:57:10
|
Add write_lp partial write, name check, and precision (#24677)
|
* feat: Add partial write and name check to write_lp
This commit adds new behavior to the v3 write_lp http endpoint by
implementing both partial writes and checking the db name for validity.
It also sets the partial write behavior as the default now, whereas
before we would reject the entire request if one line was incorrect.
Users who *do* actually want that behavior can now opt in by putting
'accept_partial=false' into the url of the request.
We also check that the db name used in the request contains only
numbers, letters, underscores and hyphens and that it must start with
either a number or letter.
We also introduce a more standardized way to return errors to the user
as JSON that we can expand over time to give actionable error messages
to the user that they can use to fix their requests.
Finally tests have been included to mock out and test the behavior for
all of the above so that changes to the error messages are reflected in
tests, that both partial and not partial writes work as expected, and
that invalid db names are rejected without writing.
* feat: Add precision to write_lp http endpoint
This commit adds the ability to control the precision of the time stamp
passed in to the endpoint. For example if a user chooses 'second' and
the timestamp 20 that will be 20 seconds past the Unix Epoch. If they
choose 'millisecond' instead it will be 20 milliseconds past the Epoch.
Up to this point we assumed that all data passed in was of nanosecond
precision. The data is still stored in the database as nanoseconds.
Instead upon receiving the data we convert it to nanoseconds. If the
precision URL parameter is not specified we default to auto and take a
best effort guess at what the user wanted based on the order of
magnitude of the data passed in.
This change will allow users finer grained control over what precision
they want to use for their data as well as trying our best to make a
good user experience and having things work as expected and not creating
a failure mode whereby a user wanted seconds and instead put in
nanoseconds by default.
| null |
feat: Add write_lp partial write, name check, and precision (#24677)
* feat: Add partial write and name check to write_lp
This commit adds new behavior to the v3 write_lp http endpoint by
implementing both partial writes and checking the db name for validity.
It also sets the partial write behavior as the default now, whereas
before we would reject the entire request if one line was incorrect.
Users who *do* actually want that behavior can now opt in by putting
'accept_partial=false' into the url of the request.
We also check that the db name used in the request contains only
numbers, letters, underscores and hyphens and that it must start with
either a number or letter.
We also introduce a more standardized way to return errors to the user
as JSON that we can expand over time to give actionable error messages
to the user that they can use to fix their requests.
Finally tests have been included to mock out and test the behavior for
all of the above so that changes to the error messages are reflected in
tests, that both partial and not partial writes work as expected, and
that invalid db names are rejected without writing.
* feat: Add precision to write_lp http endpoint
This commit adds the ability to control the precision of the time stamp
passed in to the endpoint. For example if a user chooses 'second' and
the timestamp 20 that will be 20 seconds past the Unix Epoch. If they
choose 'millisecond' instead it will be 20 milliseconds past the Epoch.
Up to this point we assumed that all data passed in was of nanosecond
precision. The data is still stored in the database as nanoseconds.
Instead upon receiving the data we convert it to nanoseconds. If the
precision URL parameter is not specified we default to auto and take a
best effort guess at what the user wanted based on the order of
magnitude of the data passed in.
This change will allow users finer grained control over what precision
they want to use for their data as well as trying our best to make a
good user experience and having things work as expected and not creating
a failure mode whereby a user wanted seconds and instead put in
nanoseconds by default.
|
diff --git a/Cargo.lock b/Cargo.lock
index a384adc017..449bc0a7ea 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -90,9 +90,9 @@ checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299"
[[package]]
name = "anstream"
-version = "0.6.11"
+version = "0.6.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6e2e1ebcb11de5c03c67de28a7df593d32191b44939c482e97702baaaa6ab6a5"
+checksum = "96b09b5178381e0874812a9b157f7fe84982617e48f71f4e3235482775e5b540"
dependencies = [
"anstyle",
"anstyle-parse",
@@ -138,9 +138,9 @@ dependencies = [
[[package]]
name = "anyhow"
-version = "1.0.79"
+version = "1.0.80"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "080e9890a082662b09c1ad45f567faeeb47f22b5fb23895fbe1e651e718e25ca"
+checksum = "5ad32ce52e4161730f7098c077cd2ed6229b5804ccf99e5366be1ab72a98b4e1"
[[package]]
name = "arc-swap"
@@ -418,7 +418,7 @@ dependencies = [
"proptest",
"rand",
"regex",
- "snafu 0.8.0",
+ "snafu 0.8.1",
"uuid",
"workspace-hack",
]
@@ -461,7 +461,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f28243a43d821d11341ab73c80bed182dc015c514b951616cf79bd4af39af0c3"
dependencies = [
"concurrent-queue",
- "event-listener 5.0.0",
+ "event-listener 5.1.0",
"event-listener-strategy",
"futures-core",
"pin-project-lite",
@@ -513,7 +513,7 @@ checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.48",
+ "syn 2.0.51",
]
[[package]]
@@ -524,7 +524,7 @@ checksum = "c980ee35e870bd1a4d2c8294d4c04d0499e67bca1e4b5cefcc693c2fa00caea9"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.48",
+ "syn 2.0.51",
]
[[package]]
@@ -561,7 +561,7 @@ dependencies = [
"observability_deps",
"parking_lot 0.12.1",
"paste",
- "snafu 0.8.0",
+ "snafu 0.8.1",
"test_helpers_end_to_end",
"tokio",
"tonic 0.10.2",
@@ -625,7 +625,7 @@ version = "0.1.0"
dependencies = [
"observability_deps",
"rand",
- "snafu 0.8.0",
+ "snafu 0.8.1",
"tokio",
"workspace-hack",
]
@@ -737,9 +737,9 @@ dependencies = [
[[package]]
name = "bstr"
-version = "1.9.0"
+version = "1.9.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c48f0051a4b4c5e0b6d365cd04af53aeaa209e3cc15ec2cdb69e73cc87fbd0dc"
+checksum = "05efc5cfd9110c8416e471df0e96702d58690178e206e61b7173706673c93706"
dependencies = [
"memchr",
"regex-automata 0.4.5",
@@ -748,9 +748,9 @@ dependencies = [
[[package]]
name = "bumpalo"
-version = "3.14.0"
+version = "3.15.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7f30e7476521f6f8af1a1c4c0b8cc94f0bee37d91763d0ca2665f299b6cd8aec"
+checksum = "8ea184aa71bb362a1157c896979544cc23974e08fd265f29ea96b59f0b4a555b"
[[package]]
name = "bytecount"
@@ -867,7 +867,7 @@ dependencies = [
"futures",
"hyper",
"reqwest",
- "snafu 0.8.0",
+ "snafu 0.8.1",
"tokio",
"tokio-util",
"url",
@@ -876,11 +876,10 @@ dependencies = [
[[package]]
name = "cc"
-version = "1.0.83"
+version = "1.0.88"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f1174fb0b6ec23863f8b971027804a42614e347eafb0a95bf0b12cdae21fc4d0"
+checksum = "02f341c093d19155a6e41631ce5971aac4e9a868262212153124c15fa22d1cdc"
dependencies = [
- "jobserver",
"libc",
]
@@ -902,7 +901,7 @@ dependencies = [
"num-traits",
"serde",
"wasm-bindgen",
- "windows-targets 0.52.0",
+ "windows-targets 0.52.3",
]
[[package]]
@@ -981,7 +980,7 @@ dependencies = [
"object_store",
"observability_deps",
"parquet_cache",
- "snafu 0.8.0",
+ "snafu 0.8.1",
"sysinfo",
"tempfile",
"test_helpers",
@@ -1013,7 +1012,7 @@ dependencies = [
"heck",
"proc-macro2",
"quote",
- "syn 2.0.48",
+ "syn 2.0.51",
]
[[package]]
@@ -1362,14 +1361,14 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.48",
+ "syn 2.0.51",
]
[[package]]
name = "darling"
-version = "0.20.5"
+version = "0.20.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "fc5d6b04b3fd0ba9926f945895de7d806260a2d7431ba82e7edaecb043c4c6b8"
+checksum = "54e36fcd13ed84ffdfda6f5be89b31287cbb80c439841fe69e04841435464391"
dependencies = [
"darling_core",
"darling_macro",
@@ -1377,27 +1376,27 @@ dependencies = [
[[package]]
name = "darling_core"
-version = "0.20.5"
+version = "0.20.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "04e48a959bcd5c761246f5d090ebc2fbf7b9cd527a492b07a67510c108f1e7e3"
+checksum = "9c2cf1c23a687a1feeb728783b993c4e1ad83d99f351801977dd809b48d0a70f"
dependencies = [
"fnv",
"ident_case",
"proc-macro2",
"quote",
"strsim 0.10.0",
- "syn 2.0.48",
+ "syn 2.0.51",
]
[[package]]
name = "darling_macro"
-version = "0.20.5"
+version = "0.20.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1d1545d67a2149e1d93b7e5c7752dce5a7426eb5d1357ddcfd89336b94444f77"
+checksum = "a668eda54683121533a393014d8692171709ff57a7d61f187b6e782719f8933f"
dependencies = [
"darling_core",
"quote",
- "syn 2.0.48",
+ "syn 2.0.51",
]
[[package]]
@@ -1438,7 +1437,7 @@ dependencies = [
"serde_json",
"sha2",
"siphasher 1.0.0",
- "snafu 0.8.0",
+ "snafu 0.8.1",
"sqlx",
"test_helpers",
"thiserror",
@@ -1765,9 +1764,9 @@ checksum = "1aaf95b3e5c8f23aa320147307562d361db0ae0d51242340f558153b4eb2439b"
[[package]]
name = "dyn-clone"
-version = "1.0.16"
+version = "1.0.17"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "545b22097d44f8a9581187cdf93de7a71e4722bf51200cfaba810865b49a495d"
+checksum = "0d6ef0072f8a535281e4876be788938b528e9a1d43900b82c2569af7da799125"
[[package]]
name = "ed25519"
@@ -1861,9 +1860,9 @@ checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0"
[[package]]
name = "event-listener"
-version = "5.0.0"
+version = "5.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b72557800024fabbaa2449dd4bf24e37b93702d457a4d4f2b0dd1f0f039f20c1"
+checksum = "b7ad6fd685ce13acd6d9541a30f6db6567a7a24c9ffd4ba2955d29e3f22c8b27"
dependencies = [
"concurrent-queue",
"parking",
@@ -1876,7 +1875,7 @@ version = "0.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "feedafcaa9b749175d5ac357452a9d41ea2911da598fde46ce1fe02c37751291"
dependencies = [
- "event-listener 5.0.0",
+ "event-listener 5.1.0",
"pin-project-lite",
]
@@ -1891,7 +1890,7 @@ dependencies = [
"once_cell",
"parking_lot 0.12.1",
"pin-project",
- "snafu 0.8.0",
+ "snafu 0.8.1",
"tokio",
"tokio-util",
"tokio_metrics_bridge",
@@ -1980,7 +1979,7 @@ dependencies = [
"observability_deps",
"once_cell",
"prost 0.12.3",
- "snafu 0.8.0",
+ "snafu 0.8.1",
"workspace-hack",
]
@@ -2086,7 +2085,7 @@ checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.48",
+ "syn 2.0.51",
]
[[package]]
@@ -2239,9 +2238,9 @@ dependencies = [
[[package]]
name = "half"
-version = "2.3.1"
+version = "2.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "bc52e53916c08643f1b56ec082790d1e86a32e58dc5268f897f313fbae7b4872"
+checksum = "b5eceaaeec696539ddaf7b333340f1af35a5aa87ae3e4f3ead0532f72affab2e"
dependencies = [
"cfg-if",
"crunchy",
@@ -2326,9 +2325,9 @@ dependencies = [
[[package]]
name = "hermit-abi"
-version = "0.3.6"
+version = "0.3.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "bd5256b483761cd23699d0da46cc6fd2ee3be420bbe6d020ae4a091e70b7e9fd"
+checksum = "379dada1584ad501b383485dd706b8afb7a70fcbc7f4da7d780638a5a6124a60"
[[package]]
name = "hex"
@@ -2567,7 +2566,7 @@ dependencies = [
"log",
"nom",
"smallvec",
- "snafu 0.8.0",
+ "snafu 0.8.1",
"test_helpers",
]
@@ -2583,7 +2582,7 @@ dependencies = [
"reqwest",
"serde",
"serde_json",
- "snafu 0.8.0",
+ "snafu 0.8.1",
"test_helpers",
"tokio",
"url",
@@ -2691,6 +2690,7 @@ dependencies = [
"parquet",
"parquet_file",
"pin-project-lite",
+ "pretty_assertions",
"schema",
"serde",
"serde_json",
@@ -2709,6 +2709,7 @@ dependencies = [
"trace_exporters",
"trace_http",
"tracker",
+ "unicode-segmentation",
"urlencoding 1.3.3",
"workspace-hack",
]
@@ -2821,7 +2822,7 @@ name = "influxrpc_parser"
version = "0.1.0"
dependencies = [
"generated_types",
- "snafu 0.8.0",
+ "snafu 0.8.1",
"sqlparser",
"workspace-hack",
]
@@ -2844,7 +2845,7 @@ dependencies = [
"prost-build",
"query_functions",
"serde",
- "snafu 0.8.0",
+ "snafu 0.8.1",
"tonic 0.10.2",
"tonic-build",
"workspace-hack",
@@ -2931,7 +2932,7 @@ dependencies = [
"rand",
"serde",
"siphasher 1.0.0",
- "snafu 0.8.0",
+ "snafu 0.8.1",
"sqlx",
"sqlx-hotswap-pool",
"tempfile",
@@ -2966,7 +2967,7 @@ dependencies = [
"schema",
"serde",
"serde_json",
- "snafu 0.8.0",
+ "snafu 0.8.1",
"test_helpers",
"tokio",
"toml",
@@ -3004,7 +3005,7 @@ dependencies = [
"query_functions",
"schema",
"serde",
- "snafu 0.8.0",
+ "snafu 0.8.1",
"test_helpers",
"tokio",
"tokio-stream",
@@ -3058,7 +3059,7 @@ dependencies = [
"predicate",
"query_functions",
"schema",
- "snafu 0.8.0",
+ "snafu 0.8.1",
"test_helpers",
"tokio",
"workspace-hack",
@@ -3137,7 +3138,7 @@ dependencies = [
"serde_json",
"serde_urlencoded",
"service_grpc_testing",
- "snafu 0.8.0",
+ "snafu 0.8.1",
"tokio",
"tokio-stream",
"tokio-util",
@@ -3162,7 +3163,7 @@ dependencies = [
"hyper",
"ioxd_common",
"metric",
- "snafu 0.8.0",
+ "snafu 0.8.1",
"tokio-util",
"trace",
"workspace-hack",
@@ -3218,15 +3219,6 @@ version = "1.0.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b1a46d1a171d865aa5f83f92695765caa047a9b4cbae2cbf37dbd613a793fd4c"
-[[package]]
-name = "jobserver"
-version = "0.1.28"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ab46a6e9526ddef3ae7f787c06f0f2600639ba80ea3eade3d8e670a2230f51d6"
-dependencies = [
- "libc",
-]
-
[[package]]
name = "js-sys"
version = "0.3.68"
@@ -3373,7 +3365,7 @@ dependencies = [
"proc-macro2",
"quote",
"serde_json",
- "syn 2.0.48",
+ "syn 2.0.51",
]
[[package]]
@@ -3668,12 +3660,12 @@ dependencies = [
[[package]]
name = "mockito"
-version = "1.2.0"
+version = "1.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f8d3038e23466858569c2d30a537f691fa0d53b51626630ae08262943e3bbb8b"
+checksum = "031ec85a3f39370cc7663640077c38766fd32b03e6beb54e6e402d0454443f7f"
dependencies = [
"assert-json-diff",
- "futures",
+ "futures-core",
"hyper",
"log",
"rand",
@@ -3748,7 +3740,7 @@ dependencies = [
"proptest",
"rand",
"schema",
- "snafu 0.8.0",
+ "snafu 0.8.1",
"workspace-hack",
]
@@ -3764,7 +3756,7 @@ dependencies = [
"itertools 0.12.1",
"mutable_batch",
"schema",
- "snafu 0.8.0",
+ "snafu 0.8.1",
"test_helpers",
"workspace-hack",
]
@@ -3782,7 +3774,7 @@ dependencies = [
"mutable_batch_lp",
"partition",
"schema",
- "snafu 0.8.0",
+ "snafu 0.8.1",
"workspace-hack",
]
@@ -4044,7 +4036,7 @@ dependencies = [
"metric",
"object_store",
"pin-project",
- "snafu 0.8.0",
+ "snafu 0.8.1",
"tokio",
"workspace-hack",
]
@@ -4118,7 +4110,7 @@ dependencies = [
"proc-macro2",
"proc-macro2-diagnostics",
"quote",
- "syn 2.0.48",
+ "syn 2.0.51",
]
[[package]]
@@ -4294,7 +4286,7 @@ dependencies = [
"prost 0.12.3",
"rand",
"schema",
- "snafu 0.8.0",
+ "snafu 0.8.1",
"test_helpers",
"thiserror",
"thrift",
@@ -4317,7 +4309,7 @@ dependencies = [
"object_store",
"parquet_file",
"schema",
- "snafu 0.8.0",
+ "snafu 0.8.1",
"tokio",
"workspace-hack",
]
@@ -4463,7 +4455,7 @@ dependencies = [
"pest_meta",
"proc-macro2",
"quote",
- "syn 2.0.48",
+ "syn 2.0.51",
]
[[package]]
@@ -4542,7 +4534,7 @@ checksum = "266c042b60c9c76b8d53061e52b2e0d1116abc57cefc8c5cd671619a56ac3690"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.48",
+ "syn 2.0.51",
]
[[package]]
@@ -4580,9 +4572,9 @@ dependencies = [
[[package]]
name = "pkg-config"
-version = "0.3.29"
+version = "0.3.30"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2900ede94e305130c13ddd391e0ab7cbaeb783945ae07a279c268cb05109c6cb"
+checksum = "d231b230927b5e4ad203db57bbcbee2802f6bce620b1e4a9024a07d94e2907ec"
[[package]]
name = "platforms"
@@ -4635,7 +4627,7 @@ dependencies = [
"observability_deps",
"query_functions",
"schema",
- "snafu 0.8.0",
+ "snafu 0.8.1",
"sqlparser",
"test_helpers",
"workspace-hack",
@@ -4685,7 +4677,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a41cf62165e97c7f814d2221421dbb9afcbcdb0a88068e5ea206e19951c2cbb5"
dependencies = [
"proc-macro2",
- "syn 2.0.48",
+ "syn 2.0.51",
]
[[package]]
@@ -4705,7 +4697,7 @@ checksum = "af066a9c399a26e020ada66a034357a868728e72cd426f3adcd35f80d88d88c8"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.48",
+ "syn 2.0.51",
"version_check",
"yansi 1.0.0-rc.1",
]
@@ -4777,7 +4769,7 @@ dependencies = [
"prost 0.12.3",
"prost-types 0.12.3",
"regex",
- "syn 2.0.48",
+ "syn 2.0.51",
"tempfile",
"which",
]
@@ -4805,7 +4797,7 @@ dependencies = [
"itertools 0.11.0",
"proc-macro2",
"quote",
- "syn 2.0.48",
+ "syn 2.0.51",
]
[[package]]
@@ -4871,7 +4863,7 @@ dependencies = [
"regex",
"regex-syntax 0.8.2",
"schema",
- "snafu 0.8.0",
+ "snafu 0.8.1",
"tokio",
"workspace-hack",
]
@@ -5201,9 +5193,9 @@ checksum = "7ffc183a10b4478d04cbbbfc96d0873219d962dd5accaff2ffbd4ceb7df837f4"
[[package]]
name = "ryu"
-version = "1.0.16"
+version = "1.0.17"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f98d2aa92eebf49b69786be48e4477826b256916e84a57ff2a4f21923b48eb4c"
+checksum = "e86697c916019a8588c99b5fac3cead74ec0b4b819707a682fd4d23fa0ce1ba1"
[[package]]
name = "same-file"
@@ -5232,7 +5224,7 @@ dependencies = [
"indexmap 2.2.3",
"observability_deps",
"once_cell",
- "snafu 0.8.0",
+ "snafu 0.8.1",
"workspace-hack",
]
@@ -5311,9 +5303,9 @@ dependencies = [
[[package]]
name = "semver"
-version = "1.0.21"
+version = "1.0.22"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b97ed7a9823b74f99c7742f5336af7be5ecd3eeafcb1507d1fa93347b1d589b0"
+checksum = "92d43fe69e652f3df9bdc2b85b2854a0825b86e4fb76bc44d945137d053639ca"
dependencies = [
"serde",
]
@@ -5351,7 +5343,7 @@ checksum = "7eb0b34b42edc17f6b7cac84a52a1c5f0e1bb2227e997ca9011ea3dd34e8610b"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.48",
+ "syn 2.0.51",
]
[[package]]
@@ -5399,9 +5391,9 @@ dependencies = [
[[package]]
name = "serde_yaml"
-version = "0.9.31"
+version = "0.9.32"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "adf8a49373e98a4c5f0ceb5d05aa7c648d75f63774981ed95b7c7443bbd50c6e"
+checksum = "8fd075d994154d4a774f95b51fb96bdc2832b0ea48425c92546073816cda1f2f"
dependencies = [
"indexmap 2.2.3",
"itoa",
@@ -5445,7 +5437,7 @@ dependencies = [
"serde",
"serde_json",
"service_common",
- "snafu 0.8.0",
+ "snafu 0.8.1",
"test_helpers",
"tokio",
"tonic 0.10.2",
@@ -5591,11 +5583,11 @@ dependencies = [
[[package]]
name = "snafu"
-version = "0.8.0"
+version = "0.8.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d342c51730e54029130d7dc9fd735d28c4cd360f1368c01981d4f03ff207f096"
+checksum = "5ed22871b3fe6eff9f1b48f6cbd54149ff8e9acd740dea9146092435f9c43bd3"
dependencies = [
- "snafu-derive 0.8.0",
+ "snafu-derive 0.8.1",
]
[[package]]
@@ -5612,14 +5604,14 @@ dependencies = [
[[package]]
name = "snafu-derive"
-version = "0.8.0"
+version = "0.8.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "080c44971436b1af15d6f61ddd8b543995cf63ab8e677d46b00cc06f4ef267a0"
+checksum = "4651148226ec36010993fcba6c3381552e8463e9f3e337b75af202b0688b5274"
dependencies = [
"heck",
"proc-macro2",
"quote",
- "syn 2.0.48",
+ "syn 2.0.51",
]
[[package]]
@@ -5630,12 +5622,12 @@ checksum = "1b6b67fb9a61334225b5b790716f609cd58395f895b3fe8b328786812a40bc3b"
[[package]]
name = "socket2"
-version = "0.5.5"
+version = "0.5.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7b5fac59a5cb5dd637972e5fca70daf0523c9067fcdc4842f053dae04a18f8e9"
+checksum = "05ffd9c0a93b7543e062e759284fcf5f5e3b098501104bfbdde4d404db792871"
dependencies = [
"libc",
- "windows-sys 0.48.0",
+ "windows-sys 0.52.0",
]
[[package]]
@@ -5692,7 +5684,7 @@ checksum = "01b2e185515564f15375f593fb966b5718bc624ba77fe49fa4616ad619690554"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.48",
+ "syn 2.0.51",
]
[[package]]
@@ -5972,7 +5964,7 @@ dependencies = [
"proc-macro2",
"quote",
"rustversion",
- "syn 2.0.48",
+ "syn 2.0.51",
]
[[package]]
@@ -6017,9 +6009,9 @@ dependencies = [
[[package]]
name = "syn"
-version = "2.0.48"
+version = "2.0.51"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0f3531638e407dfc0814761abb7c00a5b54992b849452a0646b7f65c9f770f3f"
+checksum = "6ab617d94515e94ae53b8406c628598680aa0c9587474ecbe58188f7b345d66c"
dependencies = [
"proc-macro2",
"quote",
@@ -6145,7 +6137,7 @@ dependencies = [
"regex",
"reqwest",
"serde_json",
- "snafu 0.8.0",
+ "snafu 0.8.1",
"sqlx",
"tempfile",
"test_helpers",
@@ -6172,14 +6164,14 @@ checksum = "a953cb265bef375dae3de6663da4d3804eee9682ea80d8e2542529b73c531c81"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.48",
+ "syn 2.0.51",
]
[[package]]
name = "thread_local"
-version = "1.1.7"
+version = "1.1.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3fdd6f064ccff2d6567adcb3873ca630700f00b5ad3f060c25b5dcfd9a4ce152"
+checksum = "8b9ef9bad013ada3808854ceac7b46812a6465ba368859a37e2100283d2d719c"
dependencies = [
"cfg-if",
"once_cell",
@@ -6300,7 +6292,7 @@ checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.48",
+ "syn 2.0.51",
]
[[package]]
@@ -6384,9 +6376,9 @@ dependencies = [
[[package]]
name = "toml_edit"
-version = "0.22.5"
+version = "0.22.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "99e68c159e8f5ba8a28c4eb7b0c0c190d77bb479047ca713270048145a9ad28a"
+checksum = "2c1b5fd4128cc8d3e0cb74d4ed9a9cc7c7284becd4df68f5f940e1ad123606f6"
dependencies = [
"indexmap 2.2.3",
"serde",
@@ -6464,7 +6456,7 @@ dependencies = [
"proc-macro2",
"prost-build",
"quote",
- "syn 2.0.48",
+ "syn 2.0.51",
]
[[package]]
@@ -6580,7 +6572,7 @@ dependencies = [
"futures",
"iox_time",
"observability_deps",
- "snafu 0.8.0",
+ "snafu 0.8.1",
"thrift",
"tokio",
"trace",
@@ -6601,7 +6593,7 @@ dependencies = [
"observability_deps",
"parking_lot 0.12.1",
"pin-project",
- "snafu 0.8.0",
+ "snafu 0.8.1",
"tower",
"trace",
"workspace-hack",
@@ -6627,7 +6619,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.48",
+ "syn 2.0.51",
]
[[package]]
@@ -6909,7 +6901,7 @@ dependencies = [
"observability_deps",
"parking_lot 0.12.1",
"prost 0.12.3",
- "snafu 0.8.0",
+ "snafu 0.8.1",
"snap",
"test_helpers",
"tokio",
@@ -6982,7 +6974,7 @@ dependencies = [
"once_cell",
"proc-macro2",
"quote",
- "syn 2.0.48",
+ "syn 2.0.51",
"wasm-bindgen-shared",
]
@@ -7016,7 +7008,7 @@ checksum = "642f325be6301eb8107a83d12a8ac6c1e1c54345a7ef1a9261962dfefda09e66"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.48",
+ "syn 2.0.51",
"wasm-bindgen-backend",
"wasm-bindgen-shared",
]
@@ -7112,7 +7104,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e48a53791691ab099e5e2ad123536d0fff50652600abaf43bbf952894110d0be"
dependencies = [
"windows-core",
- "windows-targets 0.52.0",
+ "windows-targets 0.52.3",
]
[[package]]
@@ -7121,7 +7113,7 @@ version = "0.52.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "33ab640c8d7e35bf8ba19b884ba838ceb4fba93a4e8c65a9059d08afcfc683d9"
dependencies = [
- "windows-targets 0.52.0",
+ "windows-targets 0.52.3",
]
[[package]]
@@ -7139,7 +7131,7 @@ version = "0.52.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d"
dependencies = [
- "windows-targets 0.52.0",
+ "windows-targets 0.52.3",
]
[[package]]
@@ -7159,17 +7151,17 @@ dependencies = [
[[package]]
name = "windows-targets"
-version = "0.52.0"
+version = "0.52.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8a18201040b24831fbb9e4eb208f8892e1f50a37feb53cc7ff887feb8f50e7cd"
+checksum = "d380ba1dc7187569a8a9e91ed34b8ccfc33123bbacb8c0aed2d1ad7f3ef2dc5f"
dependencies = [
- "windows_aarch64_gnullvm 0.52.0",
- "windows_aarch64_msvc 0.52.0",
- "windows_i686_gnu 0.52.0",
- "windows_i686_msvc 0.52.0",
- "windows_x86_64_gnu 0.52.0",
- "windows_x86_64_gnullvm 0.52.0",
- "windows_x86_64_msvc 0.52.0",
+ "windows_aarch64_gnullvm 0.52.3",
+ "windows_aarch64_msvc 0.52.3",
+ "windows_i686_gnu 0.52.3",
+ "windows_i686_msvc 0.52.3",
+ "windows_x86_64_gnu 0.52.3",
+ "windows_x86_64_gnullvm 0.52.3",
+ "windows_x86_64_msvc 0.52.3",
]
[[package]]
@@ -7180,9 +7172,9 @@ checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8"
[[package]]
name = "windows_aarch64_gnullvm"
-version = "0.52.0"
+version = "0.52.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "cb7764e35d4db8a7921e09562a0304bf2f93e0a51bfccee0bd0bb0b666b015ea"
+checksum = "68e5dcfb9413f53afd9c8f86e56a7b4d86d9a2fa26090ea2dc9e40fba56c6ec6"
[[package]]
name = "windows_aarch64_msvc"
@@ -7192,9 +7184,9 @@ checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc"
[[package]]
name = "windows_aarch64_msvc"
-version = "0.52.0"
+version = "0.52.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "bbaa0368d4f1d2aaefc55b6fcfee13f41544ddf36801e793edbbfd7d7df075ef"
+checksum = "8dab469ebbc45798319e69eebf92308e541ce46760b49b18c6b3fe5e8965b30f"
[[package]]
name = "windows_i686_gnu"
@@ -7204,9 +7196,9 @@ checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e"
[[package]]
name = "windows_i686_gnu"
-version = "0.52.0"
+version = "0.52.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a28637cb1fa3560a16915793afb20081aba2c92ee8af57b4d5f28e4b3e7df313"
+checksum = "2a4e9b6a7cac734a8b4138a4e1044eac3404d8326b6c0f939276560687a033fb"
[[package]]
name = "windows_i686_msvc"
@@ -7216,9 +7208,9 @@ checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406"
[[package]]
name = "windows_i686_msvc"
-version = "0.52.0"
+version = "0.52.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ffe5e8e31046ce6230cc7215707b816e339ff4d4d67c65dffa206fd0f7aa7b9a"
+checksum = "28b0ec9c422ca95ff34a78755cfa6ad4a51371da2a5ace67500cf7ca5f232c58"
[[package]]
name = "windows_x86_64_gnu"
@@ -7228,9 +7220,9 @@ checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e"
[[package]]
name = "windows_x86_64_gnu"
-version = "0.52.0"
+version = "0.52.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3d6fa32db2bc4a2f5abeacf2b69f7992cd09dca97498da74a151a3132c26befd"
+checksum = "704131571ba93e89d7cd43482277d6632589b18ecf4468f591fbae0a8b101614"
[[package]]
name = "windows_x86_64_gnullvm"
@@ -7240,9 +7232,9 @@ checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc"
[[package]]
name = "windows_x86_64_gnullvm"
-version = "0.52.0"
+version = "0.52.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1a657e1e9d3f514745a572a6846d3c7aa7dbe1658c056ed9c3344c4109a6949e"
+checksum = "42079295511643151e98d61c38c0acc444e52dd42ab456f7ccfd5152e8ecf21c"
[[package]]
name = "windows_x86_64_msvc"
@@ -7252,15 +7244,15 @@ checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538"
[[package]]
name = "windows_x86_64_msvc"
-version = "0.52.0"
+version = "0.52.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "dff9641d1cd4be8d1a070daf9e3773c5f67e78b4d9d42263020c057706765c04"
+checksum = "0770833d60a970638e989b3fa9fd2bb1aaadcf88963d1659fd7d9990196ed2d6"
[[package]]
name = "winnow"
-version = "0.6.0"
+version = "0.6.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6b1dbce9e90e5404c5a52ed82b1d13fc8cfbdad85033b6f57546ffd1265f8451"
+checksum = "7a4191c47f15cc3ec71fcb4913cb83d58def65dd3787610213c649283b5ce178"
dependencies = [
"memchr",
]
@@ -7354,7 +7346,7 @@ dependencies = [
"sqlx-sqlite",
"strum",
"syn 1.0.109",
- "syn 2.0.48",
+ "syn 2.0.51",
"thrift",
"tokio",
"tokio-stream",
@@ -7376,9 +7368,9 @@ dependencies = [
[[package]]
name = "xxhash-rust"
-version = "0.8.8"
+version = "0.8.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "53be06678ed9e83edb1745eb72efc0bbcd7b5c3c35711a860906aed827a13d61"
+checksum = "927da81e25be1e1a2901d59b81b37dd2efd1fc9c9345a55007f09bf5a2d3ee03"
[[package]]
name = "xz2"
@@ -7427,7 +7419,7 @@ checksum = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.48",
+ "syn 2.0.51",
]
[[package]]
diff --git a/influxdb3/tests/flight.rs b/influxdb3/tests/flight.rs
index 9bad90f0fe..2d15a4e170 100644
--- a/influxdb3/tests/flight.rs
+++ b/influxdb3/tests/flight.rs
@@ -2,6 +2,7 @@ use arrow::record_batch::RecordBatch;
use arrow_flight::{decode::FlightRecordBatchStream, sql::SqlInfo};
use arrow_util::assert_batches_sorted_eq;
use futures::TryStreamExt;
+use influxdb3_client::Precision;
use crate::common::TestServer;
@@ -18,6 +19,7 @@ async fn flight() {
"cpu,host=s1,region=us-east usage=0.9 1\n\
cpu,host=s1,region=us-east usage=0.89 2\n\
cpu,host=s1,region=us-east usage=0.85 3",
+ Precision::Nanosecond,
)
.await;
@@ -133,11 +135,17 @@ async fn flight() {
}
}
-async fn write_lp_to_db(server: &TestServer, database: &str, lp: &'static str) {
+async fn write_lp_to_db(
+ server: &TestServer,
+ database: &str,
+ lp: &'static str,
+ precision: Precision,
+) {
let client = influxdb3_client::Client::new(server.client_addr()).unwrap();
client
.api_v3_write_lp(database)
.body(lp)
+ .precision(precision)
.send()
.await
.unwrap();
diff --git a/influxdb3_client/src/lib.rs b/influxdb3_client/src/lib.rs
index 1d7b100614..c83104d7d0 100644
--- a/influxdb3_client/src/lib.rs
+++ b/influxdb3_client/src/lib.rs
@@ -86,7 +86,7 @@ impl Client {
/// let client = Client::new("http://localhost:8181")?;
/// client
/// .api_v3_write_lp("db_name")
- /// .precision(Precision::Milli)
+ /// .precision(Precision::Millisecond)
/// .accept_partial(true)
/// .body("cpu,host=s1 usage=0.5")
/// .send()
@@ -162,9 +162,9 @@ impl<'a, B> From<&'a WriteRequestBuilder<'a, B>> for WriteParams<'a> {
#[serde(rename_all = "snake_case")]
pub enum Precision {
Second,
- Milli,
- Micro,
- Nano,
+ Millisecond,
+ Microsecond,
+ Nanosecond,
}
/// Builder type for composing a request to `/api/v3/write_lp`
@@ -332,7 +332,7 @@ mod tests {
.mock("POST", "/api/v3/write_lp")
.match_header("Authorization", format!("Bearer {token}").as_str())
.match_query(Matcher::AllOf(vec![
- Matcher::UrlEncoded("precision".into(), "milli".into()),
+ Matcher::UrlEncoded("precision".into(), "millisecond".into()),
Matcher::UrlEncoded("db".into(), db.into()),
Matcher::UrlEncoded("accept_partial".into(), "true".into()),
]))
@@ -346,7 +346,7 @@ mod tests {
client
.api_v3_write_lp(db)
- .precision(Precision::Milli)
+ .precision(Precision::Millisecond)
.accept_partial(true)
.body(body)
.send()
diff --git a/influxdb3_server/Cargo.toml b/influxdb3_server/Cargo.toml
index 5c7aae45f2..d8bdd09f24 100644
--- a/influxdb3_server/Cargo.toml
+++ b/influxdb3_server/Cargo.toml
@@ -30,10 +30,10 @@ trace_http = { path = "../trace_http" }
tracker = { path = "../tracker" }
arrow = { workspace = true, features = ["prettyprint"] }
+arrow-csv = "49.0.0"
arrow-flight.workspace = true
arrow-json = "49.0.0"
arrow-schema = "49.0.0"
-arrow-csv = "49.0.0"
async-trait = "0.1"
chrono = "0.4"
datafusion = { workspace = true }
@@ -43,15 +43,16 @@ hex = "0.4.3"
hyper = "0.14"
parking_lot = "0.11.1"
pin-project-lite = "0.2"
-thiserror = "1.0"
-tokio = { version = "1", features = ["rt-multi-thread", "macros", "time"] }
-tokio-util = { version = "0.7.9" }
-tonic = { workspace = true }
serde = { version = "1.0.197", features = ["derive"] }
serde_json = "1.0.114"
serde_urlencoded = "0.7.0"
sha2 = "0.10.8"
+thiserror = "1.0"
+tokio = { version = "1", features = ["rt-multi-thread", "macros", "time"] }
+tokio-util = { version = "0.7.9" }
+tonic = { workspace = true }
tower = "0.4.13"
+unicode-segmentation = "1.11.0"
workspace-hack = { version = "0.1", path = "../workspace-hack" }
[dev-dependencies]
@@ -62,3 +63,4 @@ test_helpers_end_to_end = { path = "../test_helpers_end_to_end" }
http = "0.2.9"
hyper = "0.14"
urlencoding = "1.1"
+pretty_assertions = "1.4.0"
diff --git a/influxdb3_server/src/http.rs b/influxdb3_server/src/http.rs
index 1de02da081..31cb3474a8 100644
--- a/influxdb3_server/src/http.rs
+++ b/influxdb3_server/src/http.rs
@@ -14,10 +14,14 @@ use hyper::header::CONTENT_ENCODING;
use hyper::http::HeaderValue;
use hyper::{Body, Method, Request, Response, StatusCode};
use influxdb3_write::persister::TrackedMemoryArrowWriter;
+use influxdb3_write::write_buffer::Error as WriteBufferError;
+use influxdb3_write::BufferedWriteRequest;
+use influxdb3_write::Precision;
use influxdb3_write::WriteBuffer;
use iox_time::{SystemProvider, TimeProvider};
use observability_deps::tracing::{debug, error, info};
use serde::Deserialize;
+use serde::Serialize;
use sha2::Digest;
use sha2::Sha256;
use std::convert::Infallible;
@@ -26,6 +30,7 @@ use std::num::NonZeroI32;
use std::str::Utf8Error;
use std::sync::Arc;
use thiserror::Error;
+use unicode_segmentation::UnicodeSegmentation;
#[derive(Debug, Error)]
pub enum Error {
@@ -129,6 +134,17 @@ pub enum Error {
// Influxdb3 Write
#[error("serde json error: {0}")]
Influxdb3Write(#[from] influxdb3_write::Error),
+
+ // Invalid Start Character for a Database Name
+ #[error("db name did not start with a number or letter")]
+ DbNameInvalidStartChar,
+
+ // Invalid Character for a Database Name
+ #[error("db name must use ASCII letters, numbers, underscores and hyphens only")]
+ DbNameInvalidChar,
+
+ #[error("partial write of line protocol ocurred")]
+ PartialLpWrite(BufferedWriteRequest),
}
#[derive(Debug, Error)]
@@ -142,12 +158,57 @@ pub enum AuthorizationError {
}
impl Error {
- fn response(&self) -> Response<Body> {
- let body = Body::from(self.to_string());
- Response::builder()
- .status(StatusCode::INTERNAL_SERVER_ERROR)
- .body(body)
- .unwrap()
+ fn response(self) -> Response<Body> {
+ #[derive(Debug, Serialize)]
+ struct ErrorMessage<T: Serialize> {
+ error: String,
+ data: Option<T>,
+ }
+ match self {
+ Self::WriteBuffer(WriteBufferError::ParseError(err)) => {
+ let err = ErrorMessage {
+ error: "parsing failed for write_lp endpoint".into(),
+ data: Some(err),
+ };
+ let serialized = serde_json::to_string(&err).unwrap();
+ let body = Body::from(serialized);
+ Response::builder()
+ .status(StatusCode::BAD_REQUEST)
+ .body(body)
+ .unwrap()
+ }
+ Self::DbNameInvalidStartChar | Self::DbNameInvalidChar => {
+ let err: ErrorMessage<()> = ErrorMessage {
+ error: self.to_string(),
+ data: None,
+ };
+ let serialized = serde_json::to_string(&err).unwrap();
+ let body = Body::from(serialized);
+ Response::builder()
+ .status(StatusCode::BAD_REQUEST)
+ .body(body)
+ .unwrap()
+ }
+ Self::PartialLpWrite(data) => {
+ let err = ErrorMessage {
+ error: "partial write of line protocol ocurred".into(),
+ data: Some(data.invalid_lines),
+ };
+ let serialized = serde_json::to_string(&err).unwrap();
+ let body = Body::from(serialized);
+ Response::builder()
+ .status(StatusCode::BAD_REQUEST)
+ .body(body)
+ .unwrap()
+ }
+ _ => {
+ let body = Body::from(self.to_string());
+ Response::builder()
+ .status(StatusCode::INTERNAL_SERVER_ERROR)
+ .body(body)
+ .unwrap()
+ }
+ }
}
}
@@ -185,6 +246,7 @@ where
async fn write_lp(&self, req: Request<Body>) -> Result<Response<Body>> {
let query = req.uri().query().ok_or(Error::MissingWriteParams)?;
let params: WriteParams = serde_urlencoded::from_str(query)?;
+ validate_db_name(¶ms.db)?;
info!("write_lp to {}", params.db);
let body = self.read_body(req).await?;
@@ -195,11 +257,22 @@ where
// TODO: use the time provider
let default_time = SystemProvider::new().now().timestamp_nanos();
- self.write_buffer
- .write_lp(database, body, default_time)
+ let result = self
+ .write_buffer
+ .write_lp(
+ database,
+ body,
+ default_time,
+ params.accept_partial,
+ params.precision,
+ )
.await?;
- Ok(Response::new(Body::from("{}")))
+ if result.invalid_lines.is_empty() {
+ Ok(Response::new(Body::empty()))
+ } else {
+ Err(Error::PartialLpWrite(result))
+ }
}
async fn query_sql(&self, req: Request<Body>) -> Result<Response<Body>> {
@@ -429,6 +502,33 @@ where
}
}
+/// A valid name:
+/// - Starts with a letter or a number
+/// - Is ASCII not UTF-8
+/// - Contains only letters, numbers, underscores or hyphens
+fn validate_db_name(name: &str) -> Result<()> {
+ let mut is_first_char = true;
+ for grapheme in name.graphemes(true) {
+ if grapheme.as_bytes().len() > 1 {
+ // In the case of a unicode we need to handle multibyte chars
+ return Err(Error::DbNameInvalidChar);
+ }
+ let char = grapheme.as_bytes()[0] as char;
+ if !is_first_char {
+ if !(char.is_ascii_alphanumeric() || char == '_' || char == '-') {
+ return Err(Error::DbNameInvalidChar);
+ }
+ } else {
+ if !char.is_ascii_alphanumeric() {
+ return Err(Error::DbNameInvalidStartChar);
+ }
+ is_first_char = false;
+ }
+ }
+
+ Ok(())
+}
+
#[derive(Debug, Deserialize)]
pub(crate) struct QuerySqlParams {
pub(crate) db: String,
@@ -436,9 +536,17 @@ pub(crate) struct QuerySqlParams {
pub(crate) format: Option<String>,
}
+// This is a hack around the fact that bool default is false not true
+const fn true_fn() -> bool {
+ true
+}
#[derive(Debug, Deserialize)]
pub(crate) struct WriteParams {
pub(crate) db: String,
+ #[serde(default = "true_fn")]
+ pub(crate) accept_partial: bool,
+ #[serde(default)]
+ pub(crate) precision: Precision,
}
pub(crate) async fn route_request<W: WriteBuffer, Q: QueryExecutor>(
diff --git a/influxdb3_server/src/lib.rs b/influxdb3_server/src/lib.rs
index 302c977c7b..5bd5fa9dbe 100644
--- a/influxdb3_server/src/lib.rs
+++ b/influxdb3_server/src/lib.rs
@@ -225,12 +225,13 @@ pub async fn wait_for_signal() {
mod tests {
use crate::serve;
use datafusion::parquet::data_type::AsBytes;
- use hyper::{body, Body, Client, Request, Response};
+ use hyper::{body, Body, Client, Request, Response, StatusCode};
use influxdb3_write::persister::PersisterImpl;
use influxdb3_write::SegmentId;
use iox_query::exec::{Executor, ExecutorConfig};
use object_store::DynObjectStore;
use parquet_file::storage::{ParquetStorage, StorageId};
+ use pretty_assertions::assert_eq;
use std::collections::HashMap;
use std::net::{SocketAddr, SocketAddrV4};
use std::num::NonZeroUsize;
@@ -300,7 +301,15 @@ mod tests {
tokio::spawn(async move { serve(server, frontend_shutdown).await });
let server = format!("http://{}", addr);
- write_lp(&server, "foo", "cpu,host=a val=1i 123", None).await;
+ write_lp(
+ &server,
+ "foo",
+ "cpu,host=a val=1i 123",
+ None,
+ false,
+ "nanosecond",
+ )
+ .await;
// Test that we can query the output with a pretty output
let res = query(&server, "foo", "select * from cpu", "pretty", None).await;
@@ -371,15 +380,363 @@ mod tests {
shutdown.cancel();
}
+ #[tokio::test(flavor = "multi_thread", worker_threads = 2)]
+ async fn write_lp_tests() {
+ let addr = get_free_port();
+ let trace_header_parser = trace_http::ctx::TraceHeaderParser::new();
+ let metrics = Arc::new(metric::Registry::new());
+ let common_state = crate::CommonServerState::new(
+ Arc::clone(&metrics),
+ None,
+ trace_header_parser,
+ addr,
+ None,
+ )
+ .unwrap();
+ let catalog = Arc::new(influxdb3_write::catalog::Catalog::new());
+ let object_store: Arc<DynObjectStore> = Arc::new(object_store::memory::InMemory::new());
+ let parquet_store =
+ ParquetStorage::new(Arc::clone(&object_store), StorageId::from("influxdb3"));
+ let num_threads = NonZeroUsize::new(2).unwrap();
+ let exec = Arc::new(Executor::new_with_config(ExecutorConfig {
+ num_threads,
+ target_query_partitions: NonZeroUsize::new(1).unwrap(),
+ object_stores: [&parquet_store]
+ .into_iter()
+ .map(|store| (store.id(), Arc::clone(store.object_store())))
+ .collect(),
+ metric_registry: Arc::clone(&metrics),
+ mem_pool_size: usize::MAX,
+ }));
+
+ let write_buffer = Arc::new(
+ influxdb3_write::write_buffer::WriteBufferImpl::new(
+ Arc::clone(&catalog),
+ None::<Arc<influxdb3_write::wal::WalImpl>>,
+ SegmentId::new(0),
+ )
+ .unwrap(),
+ );
+ let query_executor = crate::query_executor::QueryExecutorImpl::new(
+ catalog,
+ Arc::clone(&write_buffer),
+ Arc::clone(&exec),
+ Arc::clone(&metrics),
+ Arc::new(HashMap::new()),
+ 10,
+ );
+ let persister = Arc::new(PersisterImpl::new(Arc::clone(&object_store)));
+
+ let server = crate::Server::new(
+ common_state,
+ persister,
+ Arc::clone(&write_buffer),
+ Arc::new(query_executor),
+ usize::MAX,
+ );
+ let frontend_shutdown = CancellationToken::new();
+ let shutdown = frontend_shutdown.clone();
+
+ tokio::spawn(async move { serve(server, frontend_shutdown).await });
+
+ // Test that only one error comes back
+ let server = format!("http://{}", addr);
+ let resp = write_lp(
+ &server,
+ "foo",
+ "cpu,host=a val= 123\ncpu,host=b val=5 124\ncpu,host=b val= 124",
+ None,
+ false,
+ "nanosecond",
+ )
+ .await;
+
+ let status = resp.status();
+ let body =
+ String::from_utf8(body::to_bytes(resp.into_body()).await.unwrap().to_vec()).unwrap();
+
+ assert_eq!(status, StatusCode::BAD_REQUEST);
+ assert_eq!(
+ body,
+ "{\
+ \"error\":\"parsing failed for write_lp endpoint\",\
+ \"data\":{\
+ \"original_line\":\"cpu,host=a val= 123\",\
+ \"line_number\":1,\
+ \"error_message\":\"No fields were provided\"\
+ }\
+ }"
+ );
+
+ let resp = write_lp(
+ &server,
+ "foo",
+ "cpu,host=b val=2 155\ncpu,host=a val= 123\ncpu,host=b val=5 199",
+ None,
+ true,
+ "nanosecond",
+ )
+ .await;
+
+ let status = resp.status();
+ let body =
+ String::from_utf8(body::to_bytes(resp.into_body()).await.unwrap().to_vec()).unwrap();
+
+ assert_eq!(status, StatusCode::BAD_REQUEST);
+ assert_eq!(
+ body,
+ "{\
+ \"error\":\"partial write of line protocol ocurred\",\
+ \"data\":[{\
+ \"original_line\":\"cpu,host=a val= 123\",\
+ \"line_number\":2,\
+ \"error_message\":\"No fields were provided\"\
+ }]\
+ }"
+ );
+
+ // Check that the first write did not partially write any data. We
+ // should only see 2 values from the above write.
+ let res = query(&server, "foo", "select * from cpu", "csv", None).await;
+ let body = body::to_bytes(res.into_body()).await.unwrap();
+ let actual = std::str::from_utf8(body.as_bytes()).unwrap();
+ let expected = "host,time,val\n\
+ b,1970-01-01T00:00:00.000000155,2.0\n\
+ b,1970-01-01T00:00:00.000000199,5.0\n";
+ assert_eq!(actual, expected);
+
+ // Check that invalid database names are rejected
+ let resp = write_lp(
+ &server,
+ "this/_is_fine",
+ "cpu,host=b val=2 155\n",
+ None,
+ true,
+ "nanosecond",
+ )
+ .await;
+
+ let status = resp.status();
+ let body =
+ String::from_utf8(body::to_bytes(resp.into_body()).await.unwrap().to_vec()).unwrap();
+
+ assert_eq!(status, StatusCode::BAD_REQUEST);
+ assert_eq!(
+ body,
+ "{\
+ \"error\":\"db name must use ASCII letters, numbers, underscores and hyphens only\",\
+ \"data\":null\
+ }"
+ );
+
+ let resp = write_lp(
+ &server,
+ "?this_is_fine",
+ "cpu,host=b val=2 155\n",
+ None,
+ true,
+ "nanosecond",
+ )
+ .await;
+
+ let status = resp.status();
+ let body =
+ String::from_utf8(body::to_bytes(resp.into_body()).await.unwrap().to_vec()).unwrap();
+
+ assert_eq!(status, StatusCode::BAD_REQUEST);
+ assert_eq!(
+ body,
+ "{\
+ \"error\":\"db name did not start with a number or letter\",\
+ \"data\":null\
+ }"
+ );
+
+ shutdown.cancel();
+ }
+
+ #[tokio::test(flavor = "multi_thread", worker_threads = 2)]
+ async fn write_lp_precision_tests() {
+ let addr = get_free_port();
+ let trace_header_parser = trace_http::ctx::TraceHeaderParser::new();
+ let metrics = Arc::new(metric::Registry::new());
+ let common_state = crate::CommonServerState::new(
+ Arc::clone(&metrics),
+ None,
+ trace_header_parser,
+ addr,
+ None,
+ )
+ .unwrap();
+ let catalog = Arc::new(influxdb3_write::catalog::Catalog::new());
+ let object_store: Arc<DynObjectStore> = Arc::new(object_store::memory::InMemory::new());
+ let parquet_store =
+ ParquetStorage::new(Arc::clone(&object_store), StorageId::from("influxdb3"));
+ let num_threads = NonZeroUsize::new(2).unwrap();
+ let exec = Arc::new(Executor::new_with_config(ExecutorConfig {
+ num_threads,
+ target_query_partitions: NonZeroUsize::new(1).unwrap(),
+ object_stores: [&parquet_store]
+ .into_iter()
+ .map(|store| (store.id(), Arc::clone(store.object_store())))
+ .collect(),
+ metric_registry: Arc::clone(&metrics),
+ mem_pool_size: usize::MAX,
+ }));
+
+ let write_buffer = Arc::new(
+ influxdb3_write::write_buffer::WriteBufferImpl::new(
+ Arc::clone(&catalog),
+ None::<Arc<influxdb3_write::wal::WalImpl>>,
+ SegmentId::new(0),
+ )
+ .unwrap(),
+ );
+ let query_executor = crate::query_executor::QueryExecutorImpl::new(
+ catalog,
+ Arc::clone(&write_buffer),
+ Arc::clone(&exec),
+ Arc::clone(&metrics),
+ Arc::new(HashMap::new()),
+ 10,
+ );
+ let persister = Arc::new(PersisterImpl::new(Arc::clone(&object_store)));
+
+ let server = crate::Server::new(
+ common_state,
+ persister,
+ Arc::clone(&write_buffer),
+ Arc::new(query_executor),
+ usize::MAX,
+ );
+ let frontend_shutdown = CancellationToken::new();
+ let shutdown = frontend_shutdown.clone();
+
+ tokio::spawn(async move { serve(server, frontend_shutdown).await });
+
+ let server = format!("http://{}", addr);
+ let resp = write_lp(
+ &server,
+ "foo",
+ "cpu,host=b val=5 1708473600",
+ None,
+ false,
+ "auto",
+ )
+ .await;
+ assert_eq!(resp.status(), StatusCode::OK);
+ let resp = write_lp(
+ &server,
+ "foo",
+ "cpu,host=b val=5 1708473601000",
+ None,
+ false,
+ "auto",
+ )
+ .await;
+ assert_eq!(resp.status(), StatusCode::OK);
+ let resp = write_lp(
+ &server,
+ "foo",
+ "cpu,host=b val=5 1708473602000000",
+ None,
+ false,
+ "auto",
+ )
+ .await;
+ assert_eq!(resp.status(), StatusCode::OK);
+ let resp = write_lp(
+ &server,
+ "foo",
+ "cpu,host=b val=5 1708473603000000000",
+ None,
+ false,
+ "auto",
+ )
+ .await;
+ assert_eq!(resp.status(), StatusCode::OK);
+ let resp = write_lp(
+ &server,
+ "foo",
+ "cpu,host=b val=6 1708473604",
+ None,
+ false,
+ "second",
+ )
+ .await;
+ assert_eq!(resp.status(), StatusCode::OK);
+ let resp = write_lp(
+ &server,
+ "foo",
+ "cpu,host=b val=6 1708473605000",
+ None,
+ false,
+ "millisecond",
+ )
+ .await;
+ assert_eq!(resp.status(), StatusCode::OK);
+ let resp = write_lp(
+ &server,
+ "foo",
+ "cpu,host=b val=6 1708473606000000",
+ None,
+ false,
+ "microsecond",
+ )
+ .await;
+ assert_eq!(resp.status(), StatusCode::OK);
+ let resp = write_lp(
+ &server,
+ "foo",
+ "cpu,host=b val=6 1708473607000000000",
+ None,
+ false,
+ "nanosecond",
+ )
+ .await;
+ assert_eq!(resp.status(), StatusCode::OK);
+
+ let res = query(&server, "foo", "select * from cpu", "csv", None).await;
+ let body = body::to_bytes(res.into_body()).await.unwrap();
+ // Since a query can come back with data in any order we need to sort it
+ // here before we do any assertions
+ let mut unsorted = String::from_utf8(body.as_bytes().to_vec())
+ .unwrap()
+ .lines()
+ .skip(1)
+ .map(|s| s.to_string())
+ .collect::<Vec<String>>();
+ unsorted.sort();
+ let actual = unsorted.join("\n");
+ let expected = "b,2024-02-21T00:00:00,5.0\n\
+ b,2024-02-21T00:00:01,5.0\n\
+ b,2024-02-21T00:00:02,5.0\n\
+ b,2024-02-21T00:00:03,5.0\n\
+ b,2024-02-21T00:00:04,6.0\n\
+ b,2024-02-21T00:00:05,6.0\n\
+ b,2024-02-21T00:00:06,6.0\n\
+ b,2024-02-21T00:00:07,6.0";
+ assert_eq!(actual, expected);
+
+ shutdown.cancel();
+ }
+
pub(crate) async fn write_lp(
server: impl Into<String> + Send,
database: impl Into<String> + Send,
lp: impl Into<String> + Send,
authorization: Option<&str>,
+ accept_partial: bool,
+ precision: impl Into<String> + Send,
) -> Response<Body> {
let server = server.into();
let client = Client::new();
- let url = format!("{}/api/v3/write_lp?db={}", server, database.into());
+ let url = format!(
+ "{}/api/v3/write_lp?db={}&accept_partial={accept_partial}&precision={}",
+ server,
+ database.into(),
+ precision.into(),
+ );
println!("{}", url);
let mut builder = Request::builder().uri(url).method("POST");
diff --git a/influxdb3_write/src/lib.rs b/influxdb3_write/src/lib.rs
index 34581a1750..596e2daa86 100644
--- a/influxdb3_write/src/lib.rs
+++ b/influxdb3_write/src/lib.rs
@@ -83,6 +83,8 @@ pub trait Bufferer: Debug + Send + Sync + 'static {
database: NamespaceName<'static>,
lp: &str,
default_time: i64,
+ accept_partial: bool,
+ precision: Precision,
) -> write_buffer::Result<BufferedWriteRequest>;
/// Closes the open segment and returns it so that it can be persisted or thrown away. A new segment will be opened
@@ -342,3 +344,51 @@ pub struct ParquetFile {
pub min_time: i64,
pub max_time: i64,
}
+
+/// The summary data for a persisted parquet file in a segment.
+#[derive(Clone, Copy, Debug, Serialize, Deserialize)]
+#[serde(rename_all = "lowercase")]
+pub enum Precision {
+ Auto,
+ Second,
+ Millisecond,
+ Microsecond,
+ Nanosecond,
+}
+
+impl Default for Precision {
+ fn default() -> Self {
+ Self::Auto
+ }
+}
+
+/// Guess precision based off of a given timestamp.
+// Note that this will fail in June 2128, but that's not our problem
+pub(crate) fn guess_precision(timestamp: i64) -> Precision {
+ const NANO_SECS_PER_SEC: i64 = 1_000_000_000;
+ // Get the absolute value of the timestamp so we can work with negative
+ // numbers
+ let val = timestamp.abs() / NANO_SECS_PER_SEC;
+
+ if val < 5 {
+ // If the time sent to us is in seconds then this will be a number less than
+ // 5 so for example if the time in seconds is 1_708_976_567 then it will be
+ // 1 (due to integer truncation) and be less than 5
+ Precision::Second
+ } else if val < 5_000 {
+ // If however the value is milliseconds and not seconds than the same number
+ // for time but now in milliseconds 1_708_976_567_000 when divided will now
+ // be 1708 which is bigger than the previous if statement but less than this
+ // one and so we return milliseconds
+ Precision::Millisecond
+ } else if val < 5_000_000 {
+ // If we do the same thing here by going up another order of magnitude then
+ // 1_708_976_567_000_000 when divided will be 1708976 which is large enough
+ // for this if statement
+ Precision::Microsecond
+ } else {
+ // Anything else we can assume is large enough of a number that it must
+ // be nanoseconds
+ Precision::Nanosecond
+ }
+}
diff --git a/influxdb3_write/src/write_buffer/buffer_segment.rs b/influxdb3_write/src/write_buffer/buffer_segment.rs
index 8b107b50b8..e7ca94d4be 100644
--- a/influxdb3_write/src/write_buffer/buffer_segment.rs
+++ b/influxdb3_write/src/write_buffer/buffer_segment.rs
@@ -455,6 +455,7 @@ mod tests {
use crate::wal::WalSegmentWriterNoopImpl;
use crate::write_buffer::tests::lp_to_table_batches;
use crate::write_buffer::{parse_validate_and_update_schema, Partitioner};
+ use crate::Precision;
use crate::{LpWriteOp, PersistedCatalog};
use bytes::Bytes;
use datafusion::execution::SendableRecordBatchStream;
@@ -644,7 +645,15 @@ mod tests {
let mut write_batch = WriteBatch::default();
let (seq, db) = catalog.db_or_create(db_name);
let partitioner = Partitioner::new_per_day_partitioner();
- let result = parse_validate_and_update_schema(lp, &db, &partitioner, 0).unwrap();
+ let result = parse_validate_and_update_schema(
+ lp,
+ &db,
+ &partitioner,
+ 0,
+ false,
+ Precision::Nanosecond,
+ )
+ .unwrap();
if let Some(db) = result.schema {
catalog.replace_database(seq, Arc::new(db)).unwrap();
}
diff --git a/influxdb3_write/src/write_buffer/mod.rs b/influxdb3_write/src/write_buffer/mod.rs
index c1fd83785c..c62bd21c24 100644
--- a/influxdb3_write/src/write_buffer/mod.rs
+++ b/influxdb3_write/src/write_buffer/mod.rs
@@ -8,8 +8,8 @@ use crate::wal::WalSegmentWriterNoopImpl;
use crate::write_buffer::buffer_segment::{ClosedBufferSegment, OpenBufferSegment, TableBuffer};
use crate::write_buffer::flusher::WriteBufferFlusher;
use crate::{
- BufferSegment, BufferedWriteRequest, Bufferer, ChunkContainer, LpWriteOp, SegmentId, Wal,
- WalOp, WriteBuffer,
+ BufferSegment, BufferedWriteRequest, Bufferer, ChunkContainer, LpWriteOp, Precision, SegmentId,
+ Wal, WalOp, WriteBuffer, WriteLineError,
};
use arrow::record_batch::RecordBatch;
use async_trait::async_trait;
@@ -36,8 +36,8 @@ use thiserror::Error;
#[derive(Debug, Error)]
pub enum Error {
- #[error("error parsing line {line_number}: {message}")]
- ParseError { line_number: usize, message: String },
+ #[error("parsing for line protocol failed")]
+ ParseError(WriteLineError),
#[error("column type mismatch for column {name}: existing: {existing:?}, new: {new:?}")]
ColumnTypeMismatch {
@@ -121,14 +121,22 @@ impl<W: Wal> WriteBufferImpl<W> {
db_name: NamespaceName<'static>,
lp: &str,
default_time: i64,
+ accept_partial: bool,
+ precision: Precision,
) -> Result<BufferedWriteRequest> {
debug!("write_lp to {} in writebuffer", db_name);
- let result = self.parse_validate_and_update_schema(db_name.clone(), lp, default_time)?;
+ let result = self.parse_validate_and_update_schema(
+ db_name.clone(),
+ lp,
+ default_time,
+ accept_partial,
+ precision,
+ )?;
let wal_op = WalOp::LpWrite(LpWriteOp {
db_name: db_name.to_string(),
- lp: lp.to_string(),
+ lp: result.lp_valid,
default_time,
});
@@ -139,7 +147,7 @@ impl<W: Wal> WriteBufferImpl<W> {
Ok(BufferedWriteRequest {
db_name,
- invalid_lines: vec![],
+ invalid_lines: result.errors,
line_count: result.line_count,
field_count: result.field_count,
tag_count: result.tag_count,
@@ -154,6 +162,8 @@ impl<W: Wal> WriteBufferImpl<W> {
db_name: NamespaceName<'static>,
lp: &str,
default_time: i64,
+ accept_partial: bool,
+ precision: Precision,
) -> Result<ValidationResult> {
let (sequence, db) = self.catalog.db_or_create(db_name.as_str());
let mut result = parse_validate_and_update_schema(
@@ -161,6 +171,8 @@ impl<W: Wal> WriteBufferImpl<W> {
&db,
&Partitioner::new_per_day_partitioner(),
default_time,
+ accept_partial,
+ precision,
)?;
if let Some(schema) = result.schema.take() {
@@ -253,8 +265,11 @@ impl<W: Wal> Bufferer for WriteBufferImpl<W> {
database: NamespaceName<'static>,
lp: &str,
default_time: i64,
+ accept_partial: bool,
+ precision: Precision,
) -> Result<BufferedWriteRequest> {
- self.write_lp(database, lp, default_time).await
+ self.write_lp(database, lp, default_time, accept_partial, precision)
+ .await
}
async fn close_open_segment(&self) -> crate::Result<Arc<dyn BufferSegment>> {
@@ -358,17 +373,50 @@ pub(crate) fn parse_validate_and_update_schema(
schema: &DatabaseSchema,
partitioner: &Partitioner,
default_time: i64,
+ accept_partial: bool,
+ precision: Precision,
) -> Result<ValidationResult> {
let mut lines = vec![];
+ let mut errors = vec![];
+ let mut valid_lines = vec![];
+ let mut lp_lines = lp.lines();
+
for (line_idx, maybe_line) in parse_lines(lp).enumerate() {
- let line = maybe_line.map_err(|e| Error::ParseError {
- line_number: line_idx + 1,
- message: e.to_string(),
- })?;
+ let line = match maybe_line {
+ Ok(line) => line,
+ Err(e) => {
+ if !accept_partial {
+ return Err(Error::ParseError(WriteLineError {
+ // This unwrap is fine because we're moving line by line
+ // alongside the output from parse_lines
+ original_line: lp_lines.next().unwrap().to_string(),
+ line_number: line_idx + 1,
+ error_message: e.to_string(),
+ }));
+ } else {
+ errors.push(WriteLineError {
+ original_line: lp_lines.next().unwrap().to_string(),
+ // This unwrap is fine because we're moving line by line
+ // alongside the output from parse_lines
+ line_number: line_idx + 1,
+ error_message: e.to_string(),
+ });
+ }
+ continue;
+ }
+ };
+ // This unwrap is fine because we're moving line by line
+ // alongside the output from parse_lines
+ valid_lines.push(lp_lines.next().unwrap());
lines.push(line);
}
- validate_or_insert_schema_and_partitions(lines, schema, partitioner, default_time)
+ validate_or_insert_schema_and_partitions(lines, schema, partitioner, default_time, precision)
+ .map(move |mut result| {
+ result.lp_valid = valid_lines.join("\n");
+ result.errors = errors;
+ result
+ })
}
/// Takes parsed lines, validates their schema. If new tables or columns are defined, they
@@ -380,6 +428,7 @@ pub(crate) fn validate_or_insert_schema_and_partitions(
schema: &DatabaseSchema,
partitioner: &Partitioner,
default_time: i64,
+ precision: Precision,
) -> Result<ValidationResult> {
// The (potentially updated) DatabaseSchema to return to the caller.
let mut schema = Cow::Borrowed(schema);
@@ -401,6 +450,7 @@ pub(crate) fn validate_or_insert_schema_and_partitions(
&mut schema,
partitioner,
default_time,
+ precision,
)?;
}
@@ -415,6 +465,8 @@ pub(crate) fn validate_or_insert_schema_and_partitions(
line_count,
field_count,
tag_count,
+ errors: vec![],
+ lp_valid: String::new(),
})
}
@@ -426,6 +478,7 @@ fn validate_and_convert_parsed_line(
schema: &mut Cow<'_, DatabaseSchema>,
partitioner: &Partitioner,
default_time: i64,
+ precision: Precision,
) -> Result<()> {
let table_name = line.series.measurement.as_str();
@@ -512,7 +565,27 @@ fn validate_and_convert_parsed_line(
}
// set the time value
- let time_value = line.timestamp.unwrap_or(default_time);
+ let time_value = line
+ .timestamp
+ .map(|ts| {
+ let multiplier = match precision {
+ Precision::Auto => match crate::guess_precision(ts) {
+ Precision::Second => 1_000_000_000,
+ Precision::Millisecond => 1_000_000,
+ Precision::Microsecond => 1_000,
+ Precision::Nanosecond => 1,
+
+ Precision::Auto => unreachable!(),
+ },
+ Precision::Second => 1_000_000_000,
+ Precision::Millisecond => 1_000_000,
+ Precision::Microsecond => 1_000,
+ Precision::Nanosecond => 1,
+ };
+
+ ts * multiplier
+ })
+ .unwrap_or(default_time);
values.push(Field {
name: TIME_COLUMN_NAME.to_string(),
value: FieldData::Timestamp(time_value),
@@ -585,6 +658,10 @@ pub(crate) struct ValidationResult {
pub(crate) field_count: usize,
/// Number of tags passed in
pub(crate) tag_count: usize,
+ /// Any errors that ocurred while parsing the lines
+ pub(crate) errors: Vec<crate::WriteLineError>,
+ /// Only valid lines from what was passed in to validate
+ pub(crate) lp_valid: String,
}
/// Generates the partition key for a given line or row
@@ -628,7 +705,15 @@ mod tests {
let db = Arc::new(DatabaseSchema::new("foo"));
let partitioner = Partitioner::new_per_day_partitioner();
let lp = "cpu,region=west user=23.2 100\nfoo f1=1i";
- let result = parse_validate_and_update_schema(lp, &db, &partitioner, 0).unwrap();
+ let result = parse_validate_and_update_schema(
+ lp,
+ &db,
+ &partitioner,
+ 0,
+ false,
+ Precision::Nanosecond,
+ )
+ .unwrap();
println!("result: {:#?}", result);
let db = result.schema.unwrap();
@@ -647,7 +732,13 @@ mod tests {
WriteBufferImpl::new(catalog, Some(Arc::new(wal)), SegmentId::new(0)).unwrap();
let summary = write_buffer
- .write_lp(NamespaceName::new("foo").unwrap(), "cpu bar=1 10", 123)
+ .write_lp(
+ NamespaceName::new("foo").unwrap(),
+ "cpu bar=1 10",
+ 123,
+ false,
+ Precision::Nanosecond,
+ )
.await
.unwrap();
assert_eq!(summary.line_count, 1);
@@ -686,7 +777,15 @@ mod tests {
pub(crate) fn lp_to_table_batches(lp: &str, default_time: i64) -> HashMap<String, TableBatch> {
let db = Arc::new(DatabaseSchema::new("foo"));
let partitioner = Partitioner::new_per_day_partitioner();
- let result = parse_validate_and_update_schema(lp, &db, &partitioner, default_time).unwrap();
+ let result = parse_validate_and_update_schema(
+ lp,
+ &db,
+ &partitioner,
+ default_time,
+ false,
+ Precision::Nanosecond,
+ )
+ .unwrap();
result.table_batches
}
diff --git a/workspace-hack/Cargo.toml b/workspace-hack/Cargo.toml
index 734a5aa4b1..fb589cecc7 100644
--- a/workspace-hack/Cargo.toml
+++ b/workspace-hack/Cargo.toml
@@ -83,7 +83,7 @@ sqlx-postgres = { version = "0.7", default-features = false, features = ["any",
sqlx-sqlite = { version = "0.7", default-features = false, features = ["any", "json", "migrate", "offline", "uuid"] }
strum = { version = "0.25", features = ["derive"] }
thrift = { version = "0.17" }
-tokio = { version = "1", features = ["full", "tracing"] }
+tokio = { version = "1", features = ["fs", "io-std", "io-util", "macros", "net", "parking_lot", "rt-multi-thread", "signal", "sync", "time", "tracing"] }
tokio-stream = { version = "0.1", features = ["fs", "net"] }
tokio-util = { version = "0.7", features = ["codec", "compat", "io", "time"] }
tower = { version = "0.4", features = ["balance", "buffer", "filter", "limit", "timeout", "util"] }
@@ -151,7 +151,7 @@ sqlx-postgres = { version = "0.7", default-features = false, features = ["any",
sqlx-sqlite = { version = "0.7", default-features = false, features = ["any", "json", "migrate", "offline", "uuid"] }
syn-dff4ba8e3ae991db = { package = "syn", version = "1", features = ["extra-traits", "full", "visit", "visit-mut"] }
syn-f595c2ba2a3f28df = { package = "syn", version = "2", features = ["extra-traits", "full", "visit-mut"] }
-tokio = { version = "1", features = ["full", "tracing"] }
+tokio = { version = "1", features = ["fs", "io-std", "io-util", "macros", "net", "parking_lot", "rt-multi-thread", "signal", "sync", "time", "tracing"] }
tokio-stream = { version = "0.1", features = ["fs", "net"] }
tracing = { version = "0.1", features = ["log", "max_level_trace", "release_max_level_trace"] }
tracing-core = { version = "0.1" }
@@ -196,12 +196,12 @@ spin = { version = "0.9" }
hyper-rustls = { version = "0.24" }
spin = { version = "0.9" }
winapi = { version = "0.3", default-features = false, features = ["cfg", "consoleapi", "errhandlingapi", "evntrace", "fileapi", "handleapi", "in6addr", "inaddr", "minwinbase", "minwindef", "ntsecapi", "ntstatus", "processenv", "profileapi", "std", "sysinfoapi", "winbase", "wincon", "windef", "winerror", "winioctl", "winnt"] }
-windows-sys-b21d60becc0929df = { package = "windows-sys", version = "0.52", features = ["Win32_Foundation", "Win32_NetworkManagement_IpHelper", "Win32_Networking_WinSock", "Win32_Security_Authentication_Identity", "Win32_Security_Credentials", "Win32_Security_Cryptography", "Win32_Storage_FileSystem", "Win32_System_Com", "Win32_System_Console", "Win32_System_Diagnostics_Debug", "Win32_System_Memory", "Win32_System_Threading", "Win32_UI_Input_KeyboardAndMouse", "Win32_UI_Shell"] }
+windows-sys-b21d60becc0929df = { package = "windows-sys", version = "0.52", features = ["Win32_Foundation", "Win32_NetworkManagement_IpHelper", "Win32_Networking_WinSock", "Win32_Security_Authentication_Identity", "Win32_Security_Credentials", "Win32_Security_Cryptography", "Win32_Storage_FileSystem", "Win32_System_Com", "Win32_System_Console", "Win32_System_Diagnostics_Debug", "Win32_System_IO", "Win32_System_Memory", "Win32_System_Threading", "Win32_System_WindowsProgramming", "Win32_UI_Input_KeyboardAndMouse", "Win32_UI_Shell"] }
windows-sys-c8eced492e86ede7 = { package = "windows-sys", version = "0.48", features = ["Win32_Foundation", "Win32_Networking_WinSock", "Win32_Security", "Win32_Storage_FileSystem", "Win32_System_Console", "Win32_System_Diagnostics_Debug", "Win32_System_IO", "Win32_System_Pipes", "Win32_System_Registry", "Win32_System_SystemServices", "Win32_System_Threading", "Win32_System_Time", "Win32_System_WindowsProgramming", "Win32_UI_Shell"] }
[target.x86_64-pc-windows-msvc.build-dependencies]
spin = { version = "0.9" }
-windows-sys-b21d60becc0929df = { package = "windows-sys", version = "0.52", features = ["Win32_Foundation", "Win32_NetworkManagement_IpHelper", "Win32_Networking_WinSock", "Win32_Security_Authentication_Identity", "Win32_Security_Credentials", "Win32_Security_Cryptography", "Win32_Storage_FileSystem", "Win32_System_Com", "Win32_System_Console", "Win32_System_Diagnostics_Debug", "Win32_System_Memory", "Win32_System_Threading", "Win32_UI_Input_KeyboardAndMouse", "Win32_UI_Shell"] }
+windows-sys-b21d60becc0929df = { package = "windows-sys", version = "0.52", features = ["Win32_Foundation", "Win32_NetworkManagement_IpHelper", "Win32_Networking_WinSock", "Win32_Security_Authentication_Identity", "Win32_Security_Credentials", "Win32_Security_Cryptography", "Win32_Storage_FileSystem", "Win32_System_Com", "Win32_System_Console", "Win32_System_Diagnostics_Debug", "Win32_System_IO", "Win32_System_Memory", "Win32_System_Threading", "Win32_System_WindowsProgramming", "Win32_UI_Input_KeyboardAndMouse", "Win32_UI_Shell"] }
windows-sys-c8eced492e86ede7 = { package = "windows-sys", version = "0.48", features = ["Win32_Foundation", "Win32_Networking_WinSock", "Win32_Security", "Win32_Storage_FileSystem", "Win32_System_Console", "Win32_System_Diagnostics_Debug", "Win32_System_IO", "Win32_System_Pipes", "Win32_System_Registry", "Win32_System_SystemServices", "Win32_System_Threading", "Win32_System_Time", "Win32_System_WindowsProgramming", "Win32_UI_Shell"] }
### END HAKARI SECTION
|
ed2050f448e00ceac468debd989a92d0164bbbde
|
Trevor Hilton
|
2024-09-13 08:36:59
|
split e2e test harness up for pro (#25322)
|
This makes some changes to the TestServer E2E framework, which is used
for running integration tests in the influxdb3 crate. These changes are
meant so that we can more easily split the code for pro.
| null |
test: split e2e test harness up for pro (#25322)
This makes some changes to the TestServer E2E framework, which is used
for running integration tests in the influxdb3 crate. These changes are
meant so that we can more easily split the code for pro.
|
diff --git a/influxdb3/tests/server/auth.rs b/influxdb3/tests/server/auth.rs
index 2d8a7516f6..ddcab4b45f 100644
--- a/influxdb3/tests/server/auth.rs
+++ b/influxdb3/tests/server/auth.rs
@@ -3,7 +3,7 @@ use arrow_util::assert_batches_sorted_eq;
use influxdb3_client::Precision;
use reqwest::StatusCode;
-use crate::{collect_stream, TestServer};
+use crate::{collect_stream, ConfigProvider, TestServer};
#[tokio::test]
async fn auth() {
@@ -11,7 +11,7 @@ async fn auth() {
const TOKEN: &str = "apiv3_mp75KQAhbqv0GeQXk8MPuZ3ztaLEaR5JzS8iifk1FwuroSVyXXyrJK1c4gEr1kHkmbgzDV-j3MvQpaIMVJBAiA";
let server = TestServer::configure()
- .auth_token(HASHED_TOKEN, TOKEN)
+ .with_auth_token(HASHED_TOKEN, TOKEN)
.spawn()
.await;
@@ -120,7 +120,7 @@ async fn auth_grpc() {
const TOKEN: &str = "apiv3_mp75KQAhbqv0GeQXk8MPuZ3ztaLEaR5JzS8iifk1FwuroSVyXXyrJK1c4gEr1kHkmbgzDV-j3MvQpaIMVJBAiA";
let server = TestServer::configure()
- .auth_token(HASHED_TOKEN, TOKEN)
+ .with_auth_token(HASHED_TOKEN, TOKEN)
.spawn()
.await;
@@ -216,7 +216,7 @@ async fn v1_password_parameter() {
const TOKEN: &str = "apiv3_mp75KQAhbqv0GeQXk8MPuZ3ztaLEaR5JzS8iifk1FwuroSVyXXyrJK1c4gEr1kHkmbgzDV-j3MvQpaIMVJBAiA";
let server = TestServer::configure()
- .auth_token(HASHED_TOKEN, TOKEN)
+ .with_auth_token(HASHED_TOKEN, TOKEN)
.spawn()
.await;
diff --git a/influxdb3/tests/server/main.rs b/influxdb3/tests/server/main.rs
index 4039e3b114..15943550da 100644
--- a/influxdb3/tests/server/main.rs
+++ b/influxdb3/tests/server/main.rs
@@ -23,15 +23,35 @@ mod query;
mod system_tables;
mod write;
+trait ConfigProvider {
+ /// Convert this to a set of command line arguments for `influxdb3 serve`
+ fn as_args(&self) -> Vec<String>;
+
+ /// Get the auth token from this config if it was set
+ fn auth_token(&self) -> Option<&str>;
+
+ /// Spawn a new [`TestServer`] with this configuration
+ ///
+ /// This will run the `influxdb3 serve` command and bind its HTTP address to a random port
+ /// on localhost.
+ async fn spawn(&self) -> TestServer
+ where
+ Self: Sized,
+ {
+ TestServer::spawn_inner(self).await
+ }
+}
+
/// Configuration for a [`TestServer`]
#[derive(Debug, Default)]
pub struct TestConfig {
auth_token: Option<(String, String)>,
+ host_id: Option<String>,
}
impl TestConfig {
/// Set the auth token for this [`TestServer`]
- pub fn auth_token<S: Into<String>, R: Into<String>>(
+ pub fn with_auth_token<S: Into<String>, R: Into<String>>(
mut self,
hashed_token: S,
raw_token: R,
@@ -40,21 +60,35 @@ impl TestConfig {
self
}
- /// Spawn a new [`TestServer`] with this configuration
- ///
- /// This will run the `influxdb3 serve` command, and bind its HTTP
- /// address to a random port on localhost.
- pub async fn spawn(self) -> TestServer {
- TestServer::spawn_inner(self).await
+ /// Set a host identifier prefix on the spawned [`TestServer`]
+ pub fn with_host_id<S: Into<String>>(mut self, host_id: S) -> Self {
+ self.host_id = Some(host_id.into());
+ self
}
+}
- fn as_args(&self) -> Vec<&str> {
+impl ConfigProvider for TestConfig {
+ fn as_args(&self) -> Vec<String> {
let mut args = vec![];
if let Some((token, _)) = &self.auth_token {
- args.append(&mut vec!["--bearer-token", token]);
+ args.append(&mut vec!["--bearer-token".to_string(), token.to_owned()]);
+ }
+ args.push("--host-id".to_string());
+ if let Some(host) = &self.host_id {
+ args.push(host.to_owned());
+ } else {
+ args.push("test-server".to_string());
}
+ args.append(&mut vec![
+ "--object-store".to_string(),
+ "memory".to_string(),
+ ]);
args
}
+
+ fn auth_token(&self) -> Option<&str> {
+ self.auth_token.as_ref().map(|(_, t)| t.as_str())
+ }
}
/// A running instance of the `influxdb3 serve` process
@@ -65,7 +99,7 @@ impl TestConfig {
/// TEST_LOG= cargo test
/// ```
pub struct TestServer {
- config: TestConfig,
+ auth_token: Option<String>,
bind_addr: SocketAddr,
server_process: Child,
http_client: reqwest::Client,
@@ -77,7 +111,7 @@ impl TestServer {
/// This will run the `influxdb3 serve` command, and bind its HTTP
/// address to a random port on localhost.
pub async fn spawn() -> Self {
- Self::spawn_inner(Default::default()).await
+ Self::spawn_inner(&TestConfig::default()).await
}
/// Configure a [`TestServer`] before spawning
@@ -85,15 +119,13 @@ impl TestServer {
TestConfig::default()
}
- async fn spawn_inner(config: TestConfig) -> Self {
+ async fn spawn_inner(config: &impl ConfigProvider) -> Self {
let bind_addr = get_local_bind_addr();
let mut command = Command::cargo_bin("influxdb3").expect("create the influxdb3 command");
let mut command = command
.arg("serve")
.args(["--http-bind", &bind_addr.to_string()])
- .args(["--object-store", "memory"])
.args(["--wal-flush-interval", "10ms"])
- .args(["--host-id", "test-server"])
.args(config.as_args());
// If TEST_LOG env var is not defined, discard stdout/stderr
@@ -104,7 +136,7 @@ impl TestServer {
let server_process = command.spawn().expect("spawn the influxdb3 server process");
let server = Self {
- config,
+ auth_token: config.auth_token().map(|s| s.to_owned()),
bind_addr,
server_process,
http_client: reqwest::Client::new(),
@@ -179,7 +211,7 @@ impl TestServer {
precision: Precision,
) -> Result<(), influxdb3_client::Error> {
let mut client = influxdb3_client::Client::new(self.client_addr()).unwrap();
- if let Some((_, token)) = &self.config.auth_token {
+ if let Some(token) = &self.auth_token {
client = client.with_auth_token(token);
}
client
|
e07a48e3508d7ab9d49b52d86af923a5d8a7eca0
|
Carol (Nichols || Goulding)
|
2023-07-21 14:07:27
|
Only send the hash ID from ingester to querier if in catalog
|
The catalog ID shouldn't be used anywhere, as the two fields get turned
into a TransitionPartitionId on the querier side.
This will enable us to not query the catalog if we're sure this
partition has a hash ID in the catalog.
| null |
feat: Only send the hash ID from ingester to querier if in catalog
The catalog ID shouldn't be used anywhere, as the two fields get turned
into a TransitionPartitionId on the querier side.
This will enable us to not query the catalog if we're sure this
partition has a hash ID in the catalog.
|
diff --git a/ingester/src/buffer_tree/partition.rs b/ingester/src/buffer_tree/partition.rs
index bfd8065cdb..817d639059 100644
--- a/ingester/src/buffer_tree/partition.rs
+++ b/ingester/src/buffer_tree/partition.rs
@@ -449,7 +449,7 @@ mod tests {
use super::*;
use crate::{
buffer_tree::partition::resolver::SortKeyResolver,
- test_util::{populate_catalog, PartitionDataBuilder, ARBITRARY_CATALOG_PARTITION_ID},
+ test_util::{populate_catalog, PartitionDataBuilder, ARBITRARY_TRANSITION_PARTITION_ID},
};
// Write some data and read it back from the buffer.
@@ -473,7 +473,7 @@ mod tests {
let data = p
.get_query_data(&OwnedProjection::default())
.expect("should return data");
- assert_eq!(data.partition_id(), ARBITRARY_CATALOG_PARTITION_ID);
+ assert_eq!(data.partition_id(), &*ARBITRARY_TRANSITION_PARTITION_ID);
let expected = [
"+--------+--------+----------+--------------------------------+",
@@ -496,7 +496,7 @@ mod tests {
let data = p
.get_query_data(&OwnedProjection::default())
.expect("should contain data");
- assert_eq!(data.partition_id(), ARBITRARY_CATALOG_PARTITION_ID);
+ assert_eq!(data.partition_id(), &*ARBITRARY_TRANSITION_PARTITION_ID);
let expected = [
"+--------+--------+----------+--------------------------------+",
@@ -532,7 +532,7 @@ mod tests {
// And validate the data being persisted.
assert_eq!(
persisting_data.partition_id(),
- ARBITRARY_CATALOG_PARTITION_ID
+ &*ARBITRARY_TRANSITION_PARTITION_ID
);
assert_eq!(persisting_data.record_batches().len(), 1);
let expected = [
@@ -561,7 +561,7 @@ mod tests {
let data = p
.get_query_data(&OwnedProjection::default())
.expect("must have data");
- assert_eq!(data.partition_id(), ARBITRARY_CATALOG_PARTITION_ID);
+ assert_eq!(data.partition_id(), &*ARBITRARY_TRANSITION_PARTITION_ID);
assert_eq!(data.record_batches().len(), 2);
let expected = [
"+--------+--------+----------+--------------------------------+",
@@ -589,7 +589,7 @@ mod tests {
let data = p
.get_query_data(&OwnedProjection::default())
.expect("must have data");
- assert_eq!(data.partition_id(), ARBITRARY_CATALOG_PARTITION_ID);
+ assert_eq!(data.partition_id(), &*ARBITRARY_TRANSITION_PARTITION_ID);
assert_eq!(data.record_batches().len(), 1);
let expected = [
"+--------+--------+---------+--------------------------------+",
diff --git a/ingester/src/buffer_tree/root.rs b/ingester/src/buffer_tree/root.rs
index 7b461e04b9..96a2782325 100644
--- a/ingester/src/buffer_tree/root.rs
+++ b/ingester/src/buffer_tree/root.rs
@@ -238,7 +238,7 @@ mod tests {
use assert_matches::assert_matches;
use data_types::{
partition_template::{test_table_partition_override, TemplatePart},
- PartitionHashId, PartitionId, PartitionKey,
+ PartitionId, PartitionKey, TransitionPartitionId,
};
use datafusion::{
assert_batches_eq, assert_batches_sorted_eq,
@@ -265,7 +265,7 @@ mod tests {
defer_namespace_name_1_ms, make_write_op, PartitionDataBuilder,
ARBITRARY_CATALOG_PARTITION_ID, ARBITRARY_NAMESPACE_ID, ARBITRARY_NAMESPACE_NAME,
ARBITRARY_PARTITION_KEY, ARBITRARY_TABLE_ID, ARBITRARY_TABLE_NAME,
- ARBITRARY_TABLE_PROVIDER,
+ ARBITRARY_TABLE_PROVIDER, ARBITRARY_TRANSITION_PARTITION_ID,
},
};
@@ -1307,10 +1307,7 @@ mod tests {
let partition = partitions.pop().unwrap();
// Ensure the partition hash ID is sent.
- assert_eq!(
- partition.partition_hash_id().unwrap(),
- &PartitionHashId::new(ARBITRARY_TABLE_ID, &ARBITRARY_PARTITION_KEY)
- );
+ assert_eq!(partition.id(), &*ARBITRARY_TRANSITION_PARTITION_ID);
// Perform the partition read
let batches = partition.into_record_batches();
@@ -1383,6 +1380,9 @@ mod tests {
let partition = partitions.pop().unwrap();
// Ensure the partition hash ID is NOT sent.
- assert!(partition.partition_hash_id().is_none());
+ assert_eq!(
+ partition.id(),
+ &TransitionPartitionId::Deprecated(ARBITRARY_CATALOG_PARTITION_ID),
+ );
}
}
diff --git a/ingester/src/buffer_tree/table.rs b/ingester/src/buffer_tree/table.rs
index 2307b1a423..f20403fdcf 100644
--- a/ingester/src/buffer_tree/table.rs
+++ b/ingester/src/buffer_tree/table.rs
@@ -273,11 +273,10 @@ where
let partitions = self.partitions().into_iter().filter_map(move |p| {
let mut span = span.child("partition read");
- let (id, hash_id, completed_persistence_count, data, partition_key) = {
+ let (id, completed_persistence_count, data, partition_key) = {
let mut p = p.lock();
(
- p.partition_id(),
- p.partition_hash_id().cloned(),
+ p.transition_partition_id(),
p.completed_persistence_count(),
p.get_query_data(&projection),
p.partition_key().clone(),
@@ -286,7 +285,7 @@ where
let ret = match data {
Some(data) => {
- assert_eq!(id, data.partition_id());
+ assert_eq!(&id, data.partition_id());
// Potentially prune out this partition if the partition
// template & derived partition key can be used to match
@@ -324,11 +323,10 @@ where
PartitionResponse::new(
data.into_record_batches(),
id,
- hash_id,
completed_persistence_count,
)
}
- None => PartitionResponse::new(vec![], id, hash_id, completed_persistence_count),
+ None => PartitionResponse::new(vec![], id, completed_persistence_count),
};
span.ok("read partition data");
diff --git a/ingester/src/persist/context.rs b/ingester/src/persist/context.rs
index 21fa4d3f16..e9df2f89ae 100644
--- a/ingester/src/persist/context.rs
+++ b/ingester/src/persist/context.rs
@@ -1,9 +1,6 @@
use std::sync::Arc;
-use data_types::{
- NamespaceId, ParquetFileParams, PartitionHashId, PartitionId, PartitionKey, TableId,
- TransitionPartitionId,
-};
+use data_types::{NamespaceId, ParquetFileParams, PartitionKey, TableId, TransitionPartitionId};
use observability_deps::tracing::*;
use parking_lot::Mutex;
use schema::sort::SortKey;
@@ -68,8 +65,8 @@ impl PersistRequest {
)
}
- /// Return the partition ID of the persisting data.
- pub(super) fn partition_id(&self) -> PartitionId {
+ /// Return the partition identifier of the persisting data.
+ pub(super) fn partition_id(&self) -> &TransitionPartitionId {
self.data.partition_id()
}
}
@@ -88,8 +85,7 @@ pub(super) struct Context {
/// IDs loaded from the partition at construction time.
namespace_id: NamespaceId,
table_id: TableId,
- partition_id: PartitionId,
- partition_hash_id: Option<PartitionHashId>,
+ partition_id: TransitionPartitionId,
// The partition key for this partition
partition_key: PartitionKey,
@@ -137,7 +133,7 @@ impl Context {
/// Locks the [`PartitionData`] in `req` to read various properties which
/// are then cached in the [`Context`].
pub(super) fn new(req: PersistRequest) -> Self {
- let partition_id = req.data.partition_id();
+ let partition_id = req.data.partition_id().clone();
// Obtain the partition lock and load the immutable values that will be
// used during this persistence.
@@ -153,7 +149,7 @@ impl Context {
let p = Arc::clone(&partition);
let guard = p.lock();
- assert_eq!(partition_id, guard.partition_id());
+ assert_eq!(partition_id, guard.transition_partition_id());
Self {
partition,
@@ -161,7 +157,6 @@ impl Context {
namespace_id: guard.namespace_id(),
table_id: guard.table_id(),
partition_id,
- partition_hash_id: guard.partition_hash_id().cloned(),
partition_key: guard.partition_key().clone(),
namespace_name: Arc::clone(guard.namespace_name()),
table: Arc::clone(guard.table()),
@@ -292,16 +287,8 @@ impl Context {
self.table_id
}
- pub(super) fn partition_id(&self) -> PartitionId {
- self.partition_id
- }
-
- pub(super) fn partition_hash_id(&self) -> Option<PartitionHashId> {
- self.partition_hash_id.clone()
- }
-
- pub(super) fn transition_partition_id(&self) -> TransitionPartitionId {
- TransitionPartitionId::from((self.partition_id, self.partition_hash_id.as_ref()))
+ pub(super) fn partition_id(&self) -> &TransitionPartitionId {
+ &self.partition_id
}
pub(super) fn partition_key(&self) -> &PartitionKey {
diff --git a/ingester/src/persist/drain_buffer.rs b/ingester/src/persist/drain_buffer.rs
index 5c1c1e0e66..1493d9fc91 100644
--- a/ingester/src/persist/drain_buffer.rs
+++ b/ingester/src/persist/drain_buffer.rs
@@ -32,7 +32,7 @@ where
let data = p.lock().mark_persisting()?;
debug!(
- partition_id=data.partition_id().get(),
+ partition_id=%data.partition_id(),
lock_wait=?Instant::now().duration_since(t),
"read data for persistence"
);
diff --git a/ingester/src/persist/handle.rs b/ingester/src/persist/handle.rs
index ef32ee4c43..79719f2e3a 100644
--- a/ingester/src/persist/handle.rs
+++ b/ingester/src/persist/handle.rs
@@ -312,7 +312,7 @@ impl PersistHandle {
fn assign_worker(&self, r: PersistRequest) {
debug!(
- partition_id = r.partition_id().get(),
+ partition_id = %r.partition_id(),
"enqueue persist job to assigned worker"
);
@@ -357,8 +357,8 @@ impl PersistQueue for PersistHandle {
partition: Arc<Mutex<PartitionData>>,
data: PersistingData,
) -> oneshot::Receiver<()> {
- let partition_id = data.partition_id().get();
- debug!(partition_id, "enqueuing persistence task");
+ let partition_id = data.partition_id().clone();
+ debug!(%partition_id, "enqueuing persistence task");
// Record a starting timestamp, and increment the number of persist jobs
// before waiting on the semaphore - this ensures the difference between
@@ -435,7 +435,7 @@ impl PersistQueue for PersistHandle {
if let Some(new_sort_key) = adjust_sort_key_columns(&v, &data_primary_key).1 {
// This persist operation will require a sort key update.
trace!(
- partition_id,
+ %partition_id,
old_sort_key = %v,
%new_sort_key,
"persist job will require sort key update"
@@ -444,7 +444,7 @@ impl PersistQueue for PersistHandle {
} else {
// This persist operation will not require a sort key
// update.
- debug!(partition_id, "enqueue persist job to global work queue");
+ debug!(%partition_id, "enqueue persist job to global work queue");
self.global_queue.send(r).await.expect("no persist workers");
}
}
@@ -452,7 +452,7 @@ impl PersistQueue for PersistHandle {
// If no sort key is known (either because it was unresolved, or
// not yet set), the task must be serialised w.r.t other persist
// jobs for the same partition.
- trace!(partition_id, "persist job has no known sort key");
+ trace!(%partition_id, "persist job has no known sort key");
self.assign_worker(r);
}
}
@@ -499,9 +499,9 @@ mod tests {
tests::{assert_metric_counter, assert_metric_gauge},
},
test_util::{
- make_write_op, PartitionDataBuilder, ARBITRARY_CATALOG_PARTITION_ID,
- ARBITRARY_NAMESPACE_ID, ARBITRARY_NAMESPACE_NAME, ARBITRARY_PARTITION_KEY,
- ARBITRARY_TABLE_ID, ARBITRARY_TABLE_NAME, ARBITRARY_TABLE_PROVIDER,
+ make_write_op, PartitionDataBuilder, ARBITRARY_NAMESPACE_ID, ARBITRARY_NAMESPACE_NAME,
+ ARBITRARY_PARTITION_KEY, ARBITRARY_TABLE_ID, ARBITRARY_TABLE_NAME,
+ ARBITRARY_TABLE_PROVIDER, ARBITRARY_TRANSITION_PARTITION_ID,
},
};
@@ -591,7 +591,7 @@ mod tests {
.expect("message was not found in either worker")
}
};
- assert_eq!(msg.partition_id(), ARBITRARY_CATALOG_PARTITION_ID);
+ assert_eq!(msg.partition_id(), &*ARBITRARY_TRANSITION_PARTITION_ID);
// Drop the message, and ensure the notification becomes inactive.
drop(msg);
@@ -611,7 +611,7 @@ mod tests {
let msg = assigned_worker
.try_recv()
.expect("message was not found in either worker");
- assert_eq!(msg.partition_id(), ARBITRARY_CATALOG_PARTITION_ID);
+ assert_eq!(msg.partition_id(), &*ARBITRARY_TRANSITION_PARTITION_ID);
}
/// A test that ensures the correct destination of a partition that has no
@@ -677,7 +677,7 @@ mod tests {
.expect("message was not found in either worker")
}
};
- assert_eq!(msg.partition_id(), ARBITRARY_CATALOG_PARTITION_ID);
+ assert_eq!(msg.partition_id(), &*ARBITRARY_TRANSITION_PARTITION_ID);
// Drop the message, and ensure the notification becomes inactive.
drop(msg);
@@ -698,7 +698,7 @@ mod tests {
let msg = assigned_worker
.try_recv()
.expect("message was not found in either worker");
- assert_eq!(msg.partition_id(), ARBITRARY_CATALOG_PARTITION_ID);
+ assert_eq!(msg.partition_id(), &*ARBITRARY_TRANSITION_PARTITION_ID);
}
/// A test that ensures the correct destination of a partition that has an
@@ -765,7 +765,7 @@ mod tests {
.expect("message was not found in either worker")
}
};
- assert_eq!(msg.partition_id(), ARBITRARY_CATALOG_PARTITION_ID);
+ assert_eq!(msg.partition_id(), &*ARBITRARY_TRANSITION_PARTITION_ID);
// Drop the message, and ensure the notification becomes inactive.
drop(msg);
@@ -786,7 +786,7 @@ mod tests {
let msg = assigned_worker
.try_recv()
.expect("message was not found in either worker");
- assert_eq!(msg.partition_id(), ARBITRARY_CATALOG_PARTITION_ID);
+ assert_eq!(msg.partition_id(), &*ARBITRARY_TRANSITION_PARTITION_ID);
}
/// A test that a partition that does not require a sort key update is
@@ -845,7 +845,7 @@ mod tests {
let msg = global_rx
.try_recv()
.expect("task should be in global queue");
- assert_eq!(msg.partition_id(), ARBITRARY_CATALOG_PARTITION_ID);
+ assert_eq!(msg.partition_id(), &*ARBITRARY_TRANSITION_PARTITION_ID);
// Drop the message, and ensure the notification becomes inactive.
drop(msg);
@@ -866,7 +866,7 @@ mod tests {
let msg = global_rx
.try_recv()
.expect("task should be in global queue");
- assert_eq!(msg.partition_id(), ARBITRARY_CATALOG_PARTITION_ID);
+ assert_eq!(msg.partition_id(), &*ARBITRARY_TRANSITION_PARTITION_ID);
}
/// A test that a ensures tasks waiting to be enqueued (waiting on the
diff --git a/ingester/src/persist/worker.rs b/ingester/src/persist/worker.rs
index f640528a33..e5f61b4d95 100644
--- a/ingester/src/persist/worker.rs
+++ b/ingester/src/persist/worker.rs
@@ -278,12 +278,7 @@ where
let pool = worker_state.exec.pool();
let (md, file_size) = worker_state
.store
- .upload(
- record_stream,
- &ctx.transition_partition_id(),
- &iox_metadata,
- pool,
- )
+ .upload(record_stream, &ctx.partition_id(), &iox_metadata, pool)
.await
.expect("unexpected fatal persist error");
@@ -376,11 +371,7 @@ where
let mut repos = catalog.repositories().await;
match repos
.partitions()
- .cas_sort_key(
- &ctx.transition_partition_id(),
- old_sort_key.clone(),
- &new_sort_key_str,
- )
+ .cas_sort_key(&ctx.partition_id(), old_sort_key.clone(), &new_sort_key_str)
.await
{
Ok(_) => ControlFlow::Break(Ok(())),
diff --git a/ingester/src/query/partition_response.rs b/ingester/src/query/partition_response.rs
index 910d83c9c9..9b8d8eab07 100644
--- a/ingester/src/query/partition_response.rs
+++ b/ingester/src/query/partition_response.rs
@@ -3,7 +3,7 @@
//! [`QueryResponse`]: super::response::QueryResponse
use arrow::record_batch::RecordBatch;
-use data_types::{PartitionHashId, PartitionId};
+use data_types::TransitionPartitionId;
/// Response data for a single partition.
#[derive(Debug)]
@@ -12,10 +12,7 @@ pub(crate) struct PartitionResponse {
batches: Vec<RecordBatch>,
/// Partition ID.
- id: PartitionId,
-
- /// Partition hash ID, if stored in the database.
- partition_hash_id: Option<PartitionHashId>,
+ id: TransitionPartitionId,
/// Count of persisted Parquet files for this partition by this ingester instance.
completed_persistence_count: u64,
@@ -24,24 +21,18 @@ pub(crate) struct PartitionResponse {
impl PartitionResponse {
pub(crate) fn new(
data: Vec<RecordBatch>,
- id: PartitionId,
- partition_hash_id: Option<PartitionHashId>,
+ id: TransitionPartitionId,
completed_persistence_count: u64,
) -> Self {
Self {
batches: data,
id,
- partition_hash_id,
completed_persistence_count,
}
}
- pub(crate) fn id(&self) -> PartitionId {
- self.id
- }
-
- pub(crate) fn partition_hash_id(&self) -> Option<&PartitionHashId> {
- self.partition_hash_id.as_ref()
+ pub(crate) fn id(&self) -> &TransitionPartitionId {
+ &self.id
}
pub(crate) fn completed_persistence_count(&self) -> u64 {
diff --git a/ingester/src/query/result_instrumentation.rs b/ingester/src/query/result_instrumentation.rs
index 50c0abf636..dafff05190 100644
--- a/ingester/src/query/result_instrumentation.rs
+++ b/ingester/src/query/result_instrumentation.rs
@@ -332,8 +332,7 @@ where
*this.partition_count += 1;
// Extract all the fields of the PartitionResponse
- let id = p.id();
- let hash_id = p.partition_hash_id().cloned();
+ let id = p.id().clone();
let persist_count = p.completed_persistence_count();
// And wrap the underlying stream of RecordBatch for this
@@ -346,12 +345,7 @@ where
this.record_batch_count
.fetch_add(data.len(), Ordering::Relaxed);
- Poll::Ready(Some(PartitionResponse::new(
- data,
- id,
- hash_id,
- persist_count,
- )))
+ Poll::Ready(Some(PartitionResponse::new(data, id, persist_count)))
}
Poll::Ready(None) => {
// Record the wall clock timestamp of the stream end.
@@ -435,12 +429,10 @@ mod tests {
make_batch, make_partition_stream,
query::mock_query_exec::MockQueryExec,
test_util::{
- ARBITRARY_CATALOG_PARTITION_ID, ARBITRARY_NAMESPACE_ID, ARBITRARY_PARTITION_HASH_ID,
- ARBITRARY_TABLE_ID,
+ ARBITRARY_NAMESPACE_ID, ARBITRARY_TABLE_ID, ARBITRARY_TRANSITION_PARTITION_ID,
},
};
use arrow::array::{Float32Array, Int64Array};
- use data_types::PartitionHashId;
use futures::{stream, StreamExt};
use iox_time::MockProvider;
use metric::{assert_histogram, Attributes};
@@ -457,8 +449,7 @@ mod tests {
// Construct a stream with no batches.
let stream = PartitionStream::new(stream::iter([PartitionResponse::new(
vec![],
- ARBITRARY_CATALOG_PARTITION_ID,
- Some(ARBITRARY_PARTITION_HASH_ID.clone()),
+ ARBITRARY_TRANSITION_PARTITION_ID.clone(),
42,
)]));
diff --git a/ingester/src/query_adaptor.rs b/ingester/src/query_adaptor.rs
index 34895513f6..70c30a5e94 100644
--- a/ingester/src/query_adaptor.rs
+++ b/ingester/src/query_adaptor.rs
@@ -88,10 +88,10 @@ impl QueryAdaptor {
self.data
}
- /// Returns the partition ID from which the data this [`QueryAdaptor`] was
+ /// Returns the partition identifier from which the data this [`QueryAdaptor`] was
/// sourced from.
- pub(crate) fn partition_id(&self) -> PartitionId {
- self.partition_id
+ pub(crate) fn partition_id(&self) -> &TransitionPartitionId {
+ &self.transition_partition_id
}
/// Number of rows, useful for building stats
diff --git a/ingester/src/server/grpc/query.rs b/ingester/src/server/grpc/query.rs
index 0b8fd037f6..f1a1c3fe5f 100644
--- a/ingester/src/server/grpc/query.rs
+++ b/ingester/src/server/grpc/query.rs
@@ -6,7 +6,7 @@ use arrow_flight::{
FlightData, FlightDescriptor, FlightInfo, HandshakeRequest, HandshakeResponse, PutResult,
SchemaResult, Ticket,
};
-use data_types::{NamespaceId, PartitionHashId, PartitionId, TableId};
+use data_types::{NamespaceId, TableId, TransitionPartitionId};
use flatbuffers::FlatBufferBuilder;
use futures::{Stream, StreamExt, TryStreamExt};
use ingester_query_grpc::influxdata::iox::ingester::v1 as proto;
@@ -303,10 +303,8 @@ where
/// Encode the partition information as a None flight data with meatadata
fn encode_partition(
- // Partition ID.
- partition_id: PartitionId,
- // Partition hash ID.
- partition_hash_id: Option<PartitionHashId>,
+ // Partition identifier.
+ partition_id: TransitionPartitionId,
// Count of persisted Parquet files for the [`PartitionData`] instance this
// [`PartitionResponse`] was generated from.
//
@@ -316,9 +314,17 @@ fn encode_partition(
ingester_id: IngesterId,
) -> Result<FlightData, FlightError> {
let mut bytes = bytes::BytesMut::new();
+
+ let (partition_id, partition_hash_id) = match partition_id {
+ TransitionPartitionId::Deterministic(hash_id) => {
+ (None, Some(hash_id.as_bytes().to_owned()))
+ }
+ TransitionPartitionId::Deprecated(partition_id) => (Some(partition_id.get()), None),
+ };
+
let app_metadata = proto::IngesterQueryResponseMetadata {
- partition_id: Some(partition_id.get()),
- partition_hash_id: partition_hash_id.map(|hash_id| hash_id.as_bytes().to_owned()),
+ partition_id,
+ partition_hash_id,
ingester_uuid: ingester_id.to_string(),
completed_persistence_count,
};
@@ -352,18 +358,12 @@ fn encode_response(
frame_encoding_duration_metric: Arc<DurationHistogram>,
) -> impl Stream<Item = Result<FlightData, FlightError>> {
response.into_partition_stream().flat_map(move |partition| {
- let partition_id = partition.id();
- let partition_hash_id = partition.partition_hash_id().cloned();
+ let partition_id = partition.id().clone();
let completed_persistence_count = partition.completed_persistence_count();
// prefix payload data w/ metadata for that particular partition
let head = futures::stream::once(async move {
- encode_partition(
- partition_id,
- partition_hash_id,
- completed_persistence_count,
- ingester_id,
- )
+ encode_partition(partition_id, completed_persistence_count, ingester_id)
});
// An output vector of FlightDataEncoder streams, each entry stream with
@@ -402,25 +402,24 @@ mod tests {
mock_query_exec::MockQueryExec, partition_response::PartitionResponse,
response::PartitionStream,
},
- test_util::ARBITRARY_PARTITION_HASH_ID,
+ test_util::{ARBITRARY_PARTITION_HASH_ID, ARBITRARY_TRANSITION_PARTITION_ID},
};
use arrow::array::{Float64Array, Int32Array};
use arrow_flight::decode::{DecodedPayload, FlightRecordBatchStream};
use assert_matches::assert_matches;
use bytes::Bytes;
+ use data_types::PartitionId;
use tonic::Code;
#[tokio::test]
- async fn sends_partition_hash_id_if_present() {
+ async fn sends_only_partition_hash_id_if_present() {
let ingester_id = IngesterId::new();
- // let partition_hash_id = PartitionHashId::new(TableId::new(3), &ARBITRARY_PARTITION_KEY);
let flight = FlightService::new(
MockQueryExec::default().with_result(Ok(QueryResponse::new(PartitionStream::new(
futures::stream::iter([PartitionResponse::new(
vec![],
- PartitionId::new(2),
- Some(ARBITRARY_PARTITION_HASH_ID.clone()),
+ ARBITRARY_TRANSITION_PARTITION_ID.clone(),
42,
)]),
)))),
@@ -447,7 +446,7 @@ mod tests {
let md_actual =
proto::IngesterQueryResponseMetadata::decode(flight_data[0].app_metadata()).unwrap();
let md_expected = proto::IngesterQueryResponseMetadata {
- partition_id: Some(2),
+ partition_id: None,
partition_hash_id: Some(ARBITRARY_PARTITION_HASH_ID.as_bytes().to_vec()),
ingester_uuid: ingester_id.to_string(),
completed_persistence_count: 42,
@@ -462,8 +461,7 @@ mod tests {
MockQueryExec::default().with_result(Ok(QueryResponse::new(PartitionStream::new(
futures::stream::iter([PartitionResponse::new(
vec![],
- PartitionId::new(2),
- None,
+ TransitionPartitionId::Deprecated(PartitionId::new(2)),
42,
)]),
)))),
@@ -562,8 +560,7 @@ mod tests {
batch3.clone(),
batch4.clone(),
],
- PartitionId::new(2),
- partition_hash_id.clone(),
+ ARBITRARY_TRANSITION_PARTITION_ID.clone(),
42,
)]),
)))),
@@ -591,7 +588,7 @@ mod tests {
let md_actual =
proto::IngesterQueryResponseMetadata::decode(flight_data[0].app_metadata()).unwrap();
let md_expected = proto::IngesterQueryResponseMetadata {
- partition_id: Some(2),
+ partition_id: None,
partition_hash_id: partition_hash_id.map(|hash_id| hash_id.as_bytes().to_vec()),
ingester_uuid: ingester_id.to_string(),
completed_persistence_count: 42,
diff --git a/ingester/src/test_util.rs b/ingester/src/test_util.rs
index 7003c0749c..3d0e4b9523 100644
--- a/ingester/src/test_util.rs
+++ b/ingester/src/test_util.rs
@@ -256,6 +256,7 @@ macro_rules! make_partition_stream {
query::{response::PartitionStream, partition_response::PartitionResponse},
test_util::ARBITRARY_PARTITION_KEY,
};
+ use data_types::{PartitionHashId, TableId, TransitionPartitionId};
use futures::stream;
PartitionStream::new(stream::iter([
@@ -274,14 +275,7 @@ macro_rules! make_partition_stream {
PartitionResponse::new(
batches,
- // Using the $id as both the PartitionId and the TableId in the
- // PartitionHashId is a temporary way to reduce duplication in tests where
- // the important part is which batches are in the same partition and which
- // batches are in a different partition, not what the actual identifier
- // values are. This will go away when the ingester no longer sends
- // PartitionIds.
- data_types::PartitionId::new($id),
- Some(
+ TransitionPartitionId::Deterministic(
PartitionHashId::new(
TableId::new($id),
&*ARBITRARY_PARTITION_KEY
|
b6286767b0ba21bd5cc24c1719b2a47ac490fd40
|
Carol (Nichols || Goulding)
|
2022-11-16 16:32:18
|
Validating the schema in ingester tests isn't necessary
|
The router validates schemas; schema validation shouldn't be tested in
the ingester
| null |
fix: Validating the schema in ingester tests isn't necessary
The router validates schemas; schema validation shouldn't be tested in
the ingester
|
diff --git a/ingester/src/data/namespace.rs b/ingester/src/data/namespace.rs
index 6262390c42..90d816d9ba 100644
--- a/ingester/src/data/namespace.rs
+++ b/ingester/src/data/namespace.rs
@@ -328,13 +328,13 @@ mod tests {
};
use assert_matches::assert_matches;
use data_types::{
- ColumnId, ColumnSet, CompactionLevel, NamespaceSchema, ParquetFileParams, PartitionId,
+ ColumnId, ColumnSet, CompactionLevel, ParquetFileParams, PartitionId,
PartitionKey, ShardIndex, Timestamp,
};
- use iox_catalog::{interface::Catalog, mem::MemCatalog, validate_or_insert_schema};
+ use iox_catalog::{interface::Catalog, mem::MemCatalog};
use iox_time::SystemProvider;
use metric::{Attributes, Metric, MetricObserver, Observation};
- use std::{ops::DerefMut, sync::Arc, time::Duration};
+ use std::{sync::Arc, time::Duration};
use uuid::Uuid;
const SHARD_INDEX: ShardIndex = ShardIndex::new(24);
@@ -431,7 +431,6 @@ mod tests {
.create("foo", topic.id, query_pool.id)
.await
.unwrap();
- let schema = NamespaceSchema::new(namespace.id, topic.id, query_pool.id, 100);
let w1 = make_write_op(
&PartitionKey::from("1970-01-01"),
@@ -452,11 +451,6 @@ mod tests {
"test_table foo=1 10",
);
- let _ = validate_or_insert_schema(w1.tables(), &schema, repos.deref_mut())
- .await
- .unwrap()
- .unwrap();
-
// create some persisted state
let partition = repos
.partitions()
|
232eee059fc36d8ead3b6e454b9efc9dc008274b
|
Andrew Lamb
|
2023-08-10 09:54:52
|
Update DataFusion (#8460)
|
* chore: Update DataFusion
* chore: update for API changes
---------
|
Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
|
chore: Update DataFusion (#8460)
* chore: Update DataFusion
* chore: update for API changes
---------
Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
|
diff --git a/Cargo.lock b/Cargo.lock
index a59460e7c1..8c4158050b 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -1389,7 +1389,7 @@ dependencies = [
[[package]]
name = "datafusion"
version = "28.0.0"
-source = "git+https://github.com/apache/arrow-datafusion.git?rev=27c7ae8978205dfebe7a96d6c1e28779df670bc2#27c7ae8978205dfebe7a96d6c1e28779df670bc2"
+source = "git+https://github.com/apache/arrow-datafusion.git?rev=161c6d32824fc87307341f942ffad7b4d452c82f#161c6d32824fc87307341f942ffad7b4d452c82f"
dependencies = [
"ahash",
"arrow",
@@ -1437,7 +1437,7 @@ dependencies = [
[[package]]
name = "datafusion-common"
version = "28.0.0"
-source = "git+https://github.com/apache/arrow-datafusion.git?rev=27c7ae8978205dfebe7a96d6c1e28779df670bc2#27c7ae8978205dfebe7a96d6c1e28779df670bc2"
+source = "git+https://github.com/apache/arrow-datafusion.git?rev=161c6d32824fc87307341f942ffad7b4d452c82f#161c6d32824fc87307341f942ffad7b4d452c82f"
dependencies = [
"arrow",
"arrow-array",
@@ -1451,11 +1451,13 @@ dependencies = [
[[package]]
name = "datafusion-execution"
version = "28.0.0"
-source = "git+https://github.com/apache/arrow-datafusion.git?rev=27c7ae8978205dfebe7a96d6c1e28779df670bc2#27c7ae8978205dfebe7a96d6c1e28779df670bc2"
+source = "git+https://github.com/apache/arrow-datafusion.git?rev=161c6d32824fc87307341f942ffad7b4d452c82f#161c6d32824fc87307341f942ffad7b4d452c82f"
dependencies = [
+ "arrow",
"dashmap",
"datafusion-common",
"datafusion-expr",
+ "futures",
"hashbrown 0.14.0",
"log",
"object_store",
@@ -1468,7 +1470,7 @@ dependencies = [
[[package]]
name = "datafusion-expr"
version = "28.0.0"
-source = "git+https://github.com/apache/arrow-datafusion.git?rev=27c7ae8978205dfebe7a96d6c1e28779df670bc2#27c7ae8978205dfebe7a96d6c1e28779df670bc2"
+source = "git+https://github.com/apache/arrow-datafusion.git?rev=161c6d32824fc87307341f942ffad7b4d452c82f#161c6d32824fc87307341f942ffad7b4d452c82f"
dependencies = [
"ahash",
"arrow",
@@ -1482,7 +1484,7 @@ dependencies = [
[[package]]
name = "datafusion-optimizer"
version = "28.0.0"
-source = "git+https://github.com/apache/arrow-datafusion.git?rev=27c7ae8978205dfebe7a96d6c1e28779df670bc2#27c7ae8978205dfebe7a96d6c1e28779df670bc2"
+source = "git+https://github.com/apache/arrow-datafusion.git?rev=161c6d32824fc87307341f942ffad7b4d452c82f#161c6d32824fc87307341f942ffad7b4d452c82f"
dependencies = [
"arrow",
"async-trait",
@@ -1499,7 +1501,7 @@ dependencies = [
[[package]]
name = "datafusion-physical-expr"
version = "28.0.0"
-source = "git+https://github.com/apache/arrow-datafusion.git?rev=27c7ae8978205dfebe7a96d6c1e28779df670bc2#27c7ae8978205dfebe7a96d6c1e28779df670bc2"
+source = "git+https://github.com/apache/arrow-datafusion.git?rev=161c6d32824fc87307341f942ffad7b4d452c82f#161c6d32824fc87307341f942ffad7b4d452c82f"
dependencies = [
"ahash",
"arrow",
@@ -1533,7 +1535,7 @@ dependencies = [
[[package]]
name = "datafusion-proto"
version = "28.0.0"
-source = "git+https://github.com/apache/arrow-datafusion.git?rev=27c7ae8978205dfebe7a96d6c1e28779df670bc2#27c7ae8978205dfebe7a96d6c1e28779df670bc2"
+source = "git+https://github.com/apache/arrow-datafusion.git?rev=161c6d32824fc87307341f942ffad7b4d452c82f#161c6d32824fc87307341f942ffad7b4d452c82f"
dependencies = [
"arrow",
"chrono",
@@ -1547,7 +1549,7 @@ dependencies = [
[[package]]
name = "datafusion-sql"
version = "28.0.0"
-source = "git+https://github.com/apache/arrow-datafusion.git?rev=27c7ae8978205dfebe7a96d6c1e28779df670bc2#27c7ae8978205dfebe7a96d6c1e28779df670bc2"
+source = "git+https://github.com/apache/arrow-datafusion.git?rev=161c6d32824fc87307341f942ffad7b4d452c82f#161c6d32824fc87307341f942ffad7b4d452c82f"
dependencies = [
"arrow",
"arrow-schema",
diff --git a/Cargo.toml b/Cargo.toml
index 49401bbf59..841481664d 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -121,8 +121,8 @@ license = "MIT OR Apache-2.0"
[workspace.dependencies]
arrow = { version = "45.0.0" }
arrow-flight = { version = "45.0.0" }
-datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev = "27c7ae8978205dfebe7a96d6c1e28779df670bc2", default-features = false }
-datafusion-proto = { git = "https://github.com/apache/arrow-datafusion.git", rev = "27c7ae8978205dfebe7a96d6c1e28779df670bc2" }
+datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev = "161c6d32824fc87307341f942ffad7b4d452c82f", default-features = false }
+datafusion-proto = { git = "https://github.com/apache/arrow-datafusion.git", rev = "161c6d32824fc87307341f942ffad7b4d452c82f" }
hashbrown = { version = "0.14.0" }
object_store = { version = "0.6.0" }
diff --git a/iox_query/src/physical_optimizer/sort/push_sort_through_union.rs b/iox_query/src/physical_optimizer/sort/push_sort_through_union.rs
index d9ff701311..42b3ebd631 100644
--- a/iox_query/src/physical_optimizer/sort/push_sort_through_union.rs
+++ b/iox_query/src/physical_optimizer/sort/push_sort_through_union.rs
@@ -166,7 +166,7 @@ impl TreeNodeRewriter for SortRewriter {
Arc::clone(repartition_exec.input()),
repartition_exec.output_partitioning(),
)?
- .with_preserve_order(),
+ .with_preserve_order(true),
))
} else if let Some(union_exec) = plan.as_any().downcast_ref::<UnionExec>() {
// Any children of the UnionExec that are not already sorted,
diff --git a/workspace-hack/Cargo.toml b/workspace-hack/Cargo.toml
index 84e69ceb52..c473d5732d 100644
--- a/workspace-hack/Cargo.toml
+++ b/workspace-hack/Cargo.toml
@@ -28,9 +28,9 @@ bytes = { version = "1" }
chrono = { version = "0.4", default-features = false, features = ["alloc", "clock", "serde"] }
crossbeam-utils = { version = "0.8" }
crypto-common = { version = "0.1", default-features = false, features = ["std"] }
-datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev = "27c7ae8978205dfebe7a96d6c1e28779df670bc2" }
-datafusion-optimizer = { git = "https://github.com/apache/arrow-datafusion.git", rev = "27c7ae8978205dfebe7a96d6c1e28779df670bc2", default-features = false, features = ["crypto_expressions", "regex_expressions", "unicode_expressions"] }
-datafusion-physical-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "27c7ae8978205dfebe7a96d6c1e28779df670bc2", default-features = false, features = ["crypto_expressions", "encoding_expressions", "regex_expressions", "unicode_expressions"] }
+datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev = "161c6d32824fc87307341f942ffad7b4d452c82f" }
+datafusion-optimizer = { git = "https://github.com/apache/arrow-datafusion.git", rev = "161c6d32824fc87307341f942ffad7b4d452c82f", default-features = false, features = ["crypto_expressions", "regex_expressions", "unicode_expressions"] }
+datafusion-physical-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "161c6d32824fc87307341f942ffad7b4d452c82f", default-features = false, features = ["crypto_expressions", "encoding_expressions", "regex_expressions", "unicode_expressions"] }
digest = { version = "0.10", features = ["mac", "std"] }
either = { version = "1", features = ["serde"] }
fixedbitset = { version = "0.4" }
|
b883c7c55474834a5bd7ec3b3469167e80045af1
|
Marco Neumann
|
2023-07-25 11:56:55
|
manual cargo update (#8328)
|
* chore: manual cargo update
Dependabot seemed to have fallen behind a bit.
```console
β― cargo update
Updating crates.io index
Updating git repository `https://github.com/apache/arrow-datafusion.git`
Updating git repository `https://github.com/mkmik/heappy`
Updating allocator-api2 v0.2.15 -> v0.2.16
Updating anyhow v1.0.71 -> v1.0.72
Updating async-compression v0.4.0 -> v0.4.1
Updating axum v0.6.18 -> v0.6.19
Updating blake3 v1.4.0 -> v1.4.1
Updating bstr v1.5.0 -> v1.6.0
Updating constant_time_eq v0.2.6 -> v0.3.0
Updating cpufeatures v0.2.8 -> v0.2.9
Updating dashmap v5.4.0 -> v5.5.0
Updating equivalent v1.0.0 -> v1.0.1
Updating http-range-header v0.3.0 -> v0.3.1
Updating hyper-rustls v0.24.0 -> v0.24.1
Updating itoa v1.0.7 -> v1.0.9
Updating num v0.4.0 -> v0.4.1
Updating pest v2.7.0 -> v2.7.1
Updating pest_derive v2.7.0 -> v2.7.1
Updating pest_generator v2.7.0 -> v2.7.1
Updating pest_meta v2.7.0 -> v2.7.1
Updating proc-macro2 v1.0.63 -> v1.0.66
Updating quote v1.0.29 -> v1.0.32
Updating rustversion v1.0.12 -> v1.0.14
Updating ryu v1.0.13 -> v1.0.15
Updating semver v1.0.17 -> v1.0.18
Updating seq-macro v0.3.3 -> v0.3.5
Updating stringprep v0.1.2 -> v0.1.3
Updating strum_macros v0.25.0 -> v0.25.1
Updating symbolic-common v12.2.0 -> v12.3.0
Updating symbolic-demangle v12.2.0 -> v12.3.0
Updating syn v2.0.26 -> v2.0.27
Updating toml_edit v0.19.12 -> v0.19.14
Updating ucd-trie v0.1.5 -> v0.1.6
Updating unicode-ident v1.0.9 -> v1.0.11
Updating winnow v0.4.7 -> v0.5.1
```
* chore: Run cargo hakari tasks
---------
|
Co-authored-by: CircleCI[bot] <[email protected]>
|
chore: manual cargo update (#8328)
* chore: manual cargo update
Dependabot seemed to have fallen behind a bit.
```console
β― cargo update
Updating crates.io index
Updating git repository `https://github.com/apache/arrow-datafusion.git`
Updating git repository `https://github.com/mkmik/heappy`
Updating allocator-api2 v0.2.15 -> v0.2.16
Updating anyhow v1.0.71 -> v1.0.72
Updating async-compression v0.4.0 -> v0.4.1
Updating axum v0.6.18 -> v0.6.19
Updating blake3 v1.4.0 -> v1.4.1
Updating bstr v1.5.0 -> v1.6.0
Updating constant_time_eq v0.2.6 -> v0.3.0
Updating cpufeatures v0.2.8 -> v0.2.9
Updating dashmap v5.4.0 -> v5.5.0
Updating equivalent v1.0.0 -> v1.0.1
Updating http-range-header v0.3.0 -> v0.3.1
Updating hyper-rustls v0.24.0 -> v0.24.1
Updating itoa v1.0.7 -> v1.0.9
Updating num v0.4.0 -> v0.4.1
Updating pest v2.7.0 -> v2.7.1
Updating pest_derive v2.7.0 -> v2.7.1
Updating pest_generator v2.7.0 -> v2.7.1
Updating pest_meta v2.7.0 -> v2.7.1
Updating proc-macro2 v1.0.63 -> v1.0.66
Updating quote v1.0.29 -> v1.0.32
Updating rustversion v1.0.12 -> v1.0.14
Updating ryu v1.0.13 -> v1.0.15
Updating semver v1.0.17 -> v1.0.18
Updating seq-macro v0.3.3 -> v0.3.5
Updating stringprep v0.1.2 -> v0.1.3
Updating strum_macros v0.25.0 -> v0.25.1
Updating symbolic-common v12.2.0 -> v12.3.0
Updating symbolic-demangle v12.2.0 -> v12.3.0
Updating syn v2.0.26 -> v2.0.27
Updating toml_edit v0.19.12 -> v0.19.14
Updating ucd-trie v0.1.5 -> v0.1.6
Updating unicode-ident v1.0.9 -> v1.0.11
Updating winnow v0.4.7 -> v0.5.1
```
* chore: Run cargo hakari tasks
---------
Co-authored-by: CircleCI[bot] <[email protected]>
|
diff --git a/Cargo.lock b/Cargo.lock
index fc137884c5..dbff294ff2 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -56,9 +56,9 @@ dependencies = [
[[package]]
name = "allocator-api2"
-version = "0.2.15"
+version = "0.2.16"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "56fc6cf8dc8c4158eed8649f9b8b0ea1518eb62b544fe9490d66fa0b349eafe9"
+checksum = "0942ffc6dcaadf03badf6e6a2d0228460359d5e34b57ccdc720b7382dfbd5ec5"
[[package]]
name = "android-tzdata"
@@ -132,9 +132,9 @@ dependencies = [
[[package]]
name = "anyhow"
-version = "1.0.71"
+version = "1.0.72"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9c7d0618f0e0b7e8ff11427422b64564d5fb0be1940354bfe2e0529b18a9d9b8"
+checksum = "3b13c32d80ecc7ab747b80c3784bce54ee8a7a0cc4fbda9bf4cda2cf6fe90854"
[[package]]
name = "arrayref"
@@ -450,9 +450,9 @@ dependencies = [
[[package]]
name = "async-compression"
-version = "0.4.0"
+version = "0.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5b0122885821398cc923ece939e24d1056a2384ee719432397fa9db87230ff11"
+checksum = "62b74f44609f0f91493e3082d3734d98497e094777144380ea4db9f9905dd5b6"
dependencies = [
"bzip2",
"flate2",
@@ -485,7 +485,7 @@ checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.26",
+ "syn 2.0.27",
]
[[package]]
@@ -496,7 +496,7 @@ checksum = "cc6dde6e4ed435a4c1ee4e73592f5ba9da2151af10076cc04858746af9352d09"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.26",
+ "syn 2.0.27",
]
[[package]]
@@ -537,9 +537,9 @@ checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa"
[[package]]
name = "axum"
-version = "0.6.18"
+version = "0.6.19"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f8175979259124331c1d7bf6586ee7e0da434155e4b2d48ec2c8386281d8df39"
+checksum = "a6a1de45611fdb535bfde7b7de4fd54f4fd2b17b1737c0a59b69bf9b92074b8c"
dependencies = [
"async-trait",
"axum-core",
@@ -650,9 +650,9 @@ dependencies = [
[[package]]
name = "blake3"
-version = "1.4.0"
+version = "1.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "729b71f35bd3fa1a4c86b85d32c8b9069ea7fe14f7a53cfabb65f62d4265b888"
+checksum = "199c42ab6972d92c9f8995f086273d25c42fc0f7b2a1fcefba465c1352d25ba5"
dependencies = [
"arrayref",
"arrayvec",
@@ -694,13 +694,12 @@ dependencies = [
[[package]]
name = "bstr"
-version = "1.5.0"
+version = "1.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a246e68bb43f6cd9db24bea052a53e40405417c5fb372e3d1a8a7f770a564ef5"
+checksum = "6798148dccfbff0fae41c7574d2fa8f1ef3492fba0face179de5d8d447d67b05"
dependencies = [
"memchr",
- "once_cell",
- "regex-automata 0.1.10",
+ "regex-automata 0.3.3",
"serde",
]
@@ -907,7 +906,7 @@ dependencies = [
"heck",
"proc-macro2",
"quote",
- "syn 2.0.26",
+ "syn 2.0.27",
]
[[package]]
@@ -1134,9 +1133,9 @@ dependencies = [
[[package]]
name = "constant_time_eq"
-version = "0.2.6"
+version = "0.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "21a53c0a4d288377e7415b53dcfc3c04da5cdc2cc95c8d5ac178b58f0b861ad6"
+checksum = "f7144d30dcf0fafbce74250a3963025d8d52177934239851c917d29f1df280c2"
[[package]]
name = "core-foundation-sys"
@@ -1155,9 +1154,9 @@ dependencies = [
[[package]]
name = "cpufeatures"
-version = "0.2.8"
+version = "0.2.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "03e69e28e9f7f77debdedbaafa2866e1de9ba56df55a8bd7cfc724c25a09987c"
+checksum = "a17b76ff3a4162b0b27f354a0c87015ddad39d35f9c0c36607a3bdd175dde1f1"
dependencies = [
"libc",
]
@@ -1334,12 +1333,12 @@ dependencies = [
[[package]]
name = "dashmap"
-version = "5.4.0"
+version = "5.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "907076dfda823b0b36d2a1bb5f90c96660a5bbcd7729e10727f07858f22c4edc"
+checksum = "6943ae99c34386c84a470c499d3414f66502a41340aa895406e0d2e4a207b91d"
dependencies = [
"cfg-if",
- "hashbrown 0.12.3",
+ "hashbrown 0.14.0",
"lock_api",
"once_cell",
"parking_lot_core",
@@ -1461,7 +1460,7 @@ dependencies = [
"lazy_static",
"sqlparser 0.35.0",
"strum 0.25.0",
- "strum_macros 0.25.0",
+ "strum_macros 0.25.1",
]
[[package]]
@@ -1667,9 +1666,9 @@ dependencies = [
[[package]]
name = "equivalent"
-version = "1.0.0"
+version = "1.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "88bffebc5d80432c9b140ee17875ff173a8ab62faad5b257da912bd2f6c1c0a1"
+checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5"
[[package]]
name = "errno"
@@ -1924,7 +1923,7 @@ checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.26",
+ "syn 2.0.27",
]
[[package]]
@@ -2278,9 +2277,9 @@ dependencies = [
[[package]]
name = "http-range-header"
-version = "0.3.0"
+version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0bfe8eed0a9285ef776bb792479ea3834e8b94e13d615c2f66d03dd50a435a29"
+checksum = "add0ab9360ddbd88cfeb3bd9574a1d85cfdfa14db10b3e21d3700dbc4328758f"
[[package]]
name = "httparse"
@@ -2326,10 +2325,11 @@ dependencies = [
[[package]]
name = "hyper-rustls"
-version = "0.24.0"
+version = "0.24.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0646026eb1b3eea4cd9ba47912ea5ce9cc07713d105b1a14698f4e6433d348b7"
+checksum = "8d78e1e73ec14cf7375674f74d7dde185c8206fd9dea6fb6295e8a98098aaa97"
dependencies = [
+ "futures-util",
"http",
"hyper",
"rustls",
@@ -3173,9 +3173,9 @@ dependencies = [
[[package]]
name = "itoa"
-version = "1.0.7"
+version = "1.0.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c0aa48fab2893d8a49caa94082ae8488f4e1050d73b367881dcd2198f4199fd8"
+checksum = "af150ab688ff2122fcef229be89cb50dd66af9e01a4ff320cc137eecc9bacc38"
[[package]]
name = "jobserver"
@@ -3626,9 +3626,9 @@ dependencies = [
[[package]]
name = "num"
-version = "0.4.0"
+version = "0.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "43db66d1170d347f9a065114077f7dccb00c1b9478c89384490a3425279a4606"
+checksum = "b05180d69e3da0e530ba2a1dae5110317e49e3b7f3d41be227dc5f92e49ee7af"
dependencies = [
"num-bigint",
"num-complex",
@@ -4033,9 +4033,9 @@ checksum = "9b2a4787296e9989611394c33f193f676704af1686e70b8f8033ab5ba9a35a94"
[[package]]
name = "pest"
-version = "2.7.0"
+version = "2.7.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f73935e4d55e2abf7f130186537b19e7a4abc886a0252380b59248af473a3fc9"
+checksum = "0d2d1d55045829d65aad9d389139882ad623b33b904e7c9f1b10c5b8927298e5"
dependencies = [
"thiserror",
"ucd-trie",
@@ -4043,9 +4043,9 @@ dependencies = [
[[package]]
name = "pest_derive"
-version = "2.7.0"
+version = "2.7.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "aef623c9bbfa0eedf5a0efba11a5ee83209c326653ca31ff019bec3a95bfff2b"
+checksum = "5f94bca7e7a599d89dea5dfa309e217e7906c3c007fb9c3299c40b10d6a315d3"
dependencies = [
"pest",
"pest_generator",
@@ -4053,22 +4053,22 @@ dependencies = [
[[package]]
name = "pest_generator"
-version = "2.7.0"
+version = "2.7.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b3e8cba4ec22bada7fc55ffe51e2deb6a0e0db2d0b7ab0b103acc80d2510c190"
+checksum = "99d490fe7e8556575ff6911e45567ab95e71617f43781e5c05490dc8d75c965c"
dependencies = [
"pest",
"pest_meta",
"proc-macro2",
"quote",
- "syn 2.0.26",
+ "syn 2.0.27",
]
[[package]]
name = "pest_meta"
-version = "2.7.0"
+version = "2.7.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a01f71cb40bd8bb94232df14b946909e14660e33fc05db3e50ae2a82d7ea0ca0"
+checksum = "2674c66ebb4b4d9036012091b537aae5878970d6999f81a265034d85b136b341"
dependencies = [
"once_cell",
"pest",
@@ -4140,7 +4140,7 @@ checksum = "ec2e072ecce94ec471b13398d5402c188e76ac03cf74dd1a975161b23a3f6d9c"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.26",
+ "syn 2.0.27",
]
[[package]]
@@ -4292,9 +4292,9 @@ checksum = "dc375e1527247fe1a97d8b7156678dfe7c1af2fc075c9a4db3690ecd2a148068"
[[package]]
name = "proc-macro2"
-version = "1.0.63"
+version = "1.0.66"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7b368fba921b0dce7e60f5e04ec15e565b3303972b42bcfde1d0713b881959eb"
+checksum = "18fb31db3f9bddb2ea821cde30a9f70117e3f119938b5ee630b7403aa6e2ead9"
dependencies = [
"unicode-ident",
]
@@ -4480,9 +4480,9 @@ dependencies = [
[[package]]
name = "quote"
-version = "1.0.29"
+version = "1.0.32"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "573015e8ab27661678357f27dc26460738fd2b6c86e46f386fde94cb5d913105"
+checksum = "50f3b39ccfb720540debaa0164757101c08ecb8d326b15358ce76a62c7e85965"
dependencies = [
"proc-macro2",
]
@@ -4823,9 +4823,9 @@ dependencies = [
[[package]]
name = "rustversion"
-version = "1.0.12"
+version = "1.0.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4f3208ce4d8448b3f3e7d168a73f5e0c43a61e32930de3bceeccedb388b6bf06"
+checksum = "7ffc183a10b4478d04cbbbfc96d0873219d962dd5accaff2ffbd4ceb7df837f4"
[[package]]
name = "rustyline"
@@ -4850,9 +4850,9 @@ dependencies = [
[[package]]
name = "ryu"
-version = "1.0.13"
+version = "1.0.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f91339c0467de62360649f8d3e185ca8de4224ff281f66000de5eb2a77a79041"
+checksum = "1ad4cc8da4ef723ed60bced201181d83791ad433213d8c24efffda1eec85d741"
[[package]]
name = "same-file"
@@ -4893,15 +4893,15 @@ dependencies = [
[[package]]
name = "semver"
-version = "1.0.17"
+version = "1.0.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "bebd363326d05ec3e2f532ab7660680f3b02130d780c299bca73469d521bc0ed"
+checksum = "b0293b4b29daaf487284529cc2f5675b8e57c61f70167ba415a463651fd6a918"
[[package]]
name = "seq-macro"
-version = "0.3.3"
+version = "0.3.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e6b44e8fc93a14e66336d230954dda83d18b4605ccace8fe09bc7514a71ad0bc"
+checksum = "a3f0bf26fd526d2a95683cd0f87bf103b8539e2ca1ef48ce002d67aad59aa0b4"
[[package]]
name = "serde"
@@ -4920,7 +4920,7 @@ checksum = "b23f7ade6f110613c0d63858ddb8b94c1041f550eab58a16b371bdf2c9c80ab4"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.26",
+ "syn 2.0.27",
]
[[package]]
@@ -5570,9 +5570,9 @@ checksum = "9091b6114800a5f2141aee1d1b9d6ca3592ac062dc5decb3764ec5895a47b4eb"
[[package]]
name = "stringprep"
-version = "0.1.2"
+version = "0.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8ee348cb74b87454fff4b551cbf727025810a004f88aeacae7f85b87f4e9a1c1"
+checksum = "db3737bde7edce97102e0e2b15365bf7a20bfdb5f60f4f9e8d7004258a51a8da"
dependencies = [
"unicode-bidi",
"unicode-normalization",
@@ -5596,7 +5596,7 @@ version = "0.25.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "290d54ea6f91c969195bdbcd7442c8c2a2ba87da8bf60a7ee86a235d4bc1e125"
dependencies = [
- "strum_macros 0.25.0",
+ "strum_macros 0.25.1",
]
[[package]]
@@ -5614,15 +5614,15 @@ dependencies = [
[[package]]
name = "strum_macros"
-version = "0.25.0"
+version = "0.25.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "fe9f3bd7d2e45dcc5e265fbb88d6513e4747d8ef9444cf01a533119bce28a157"
+checksum = "6069ca09d878a33f883cc06aaa9718ede171841d3832450354410b718b097232"
dependencies = [
"heck",
"proc-macro2",
"quote",
"rustversion",
- "syn 2.0.26",
+ "syn 2.0.27",
]
[[package]]
@@ -5633,9 +5633,9 @@ checksum = "81cdd64d312baedb58e21336b31bc043b77e01cc99033ce76ef539f78e965ebc"
[[package]]
name = "symbolic-common"
-version = "12.2.0"
+version = "12.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "38f7afd8bcd36190409e6b71d89928f7f09d918a7aa3460d847bc49a538d672e"
+checksum = "167a4ffd7c35c143fd1030aa3c2caf76ba42220bd5a6b5f4781896434723b8c3"
dependencies = [
"debugid",
"memmap2",
@@ -5645,9 +5645,9 @@ dependencies = [
[[package]]
name = "symbolic-demangle"
-version = "12.2.0"
+version = "12.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ec64922563a36e3fe686b6d99f06f25dacad2a202ac7502ed642930a188fb20a"
+checksum = "e378c50e80686c1c5c205674e1f86a2858bec3d2a7dfdd690331a8a19330f293"
dependencies = [
"cpp_demangle",
"rustc-demangle",
@@ -5667,9 +5667,9 @@ dependencies = [
[[package]]
name = "syn"
-version = "2.0.26"
+version = "2.0.27"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "45c3457aacde3c65315de5031ec191ce46604304d2446e803d71ade03308d970"
+checksum = "b60f673f44a8255b9c8c657daf66a596d435f2da81a555b06dc644d080ba45e0"
dependencies = [
"proc-macro2",
"quote",
@@ -5792,7 +5792,7 @@ checksum = "090198534930841fab3a5d1bb637cde49e339654e606195f8d9c76eeb081dc96"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.26",
+ "syn 2.0.27",
]
[[package]]
@@ -5921,7 +5921,7 @@ checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.26",
+ "syn 2.0.27",
]
[[package]]
@@ -5993,9 +5993,9 @@ dependencies = [
[[package]]
name = "toml_edit"
-version = "0.19.12"
+version = "0.19.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c500344a19072298cd05a7224b3c0c629348b78692bf48466c5238656e315a78"
+checksum = "f8123f27e969974a3dfba720fdb560be359f57b44302d280ba72e76a74480e8a"
dependencies = [
"indexmap 2.0.0",
"serde",
@@ -6194,7 +6194,7 @@ checksum = "5f4f31f56159e98206da9efd823404b79b6ef3143b4a7ab76e67b1751b25a4ab"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.26",
+ "syn 2.0.27",
]
[[package]]
@@ -6309,9 +6309,9 @@ checksum = "497961ef93d974e23eb6f433eb5fe1b7930b659f06d12dec6fc44a8f554c0bba"
[[package]]
name = "ucd-trie"
-version = "0.1.5"
+version = "0.1.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9e79c4d996edb816c91e4308506774452e55e95c3c9de07b6729e17e15a5ef81"
+checksum = "ed646292ffc8188ef8ea4d1e0e0150fb15a5c2e12ad9b8fc191ae7a8a7f3c4b9"
[[package]]
name = "unarray"
@@ -6327,9 +6327,9 @@ checksum = "92888ba5573ff080736b3648696b70cafad7d250551175acbaa4e0385b3e1460"
[[package]]
name = "unicode-ident"
-version = "1.0.9"
+version = "1.0.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b15811caf2415fb889178633e7724bad2509101cde276048e013b9def5e51fa0"
+checksum = "301abaae475aa91687eb82514b328ab47a211a533026cb25fc3e519b86adfc3c"
[[package]]
name = "unicode-normalization"
@@ -6507,7 +6507,7 @@ dependencies = [
"once_cell",
"proc-macro2",
"quote",
- "syn 2.0.26",
+ "syn 2.0.27",
"wasm-bindgen-shared",
]
@@ -6541,7 +6541,7 @@ checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.26",
+ "syn 2.0.27",
"wasm-bindgen-backend",
"wasm-bindgen-shared",
]
@@ -6803,9 +6803,9 @@ checksum = "1a515f5799fe4961cb532f983ce2b23082366b898e52ffbce459c86f67c8378a"
[[package]]
name = "winnow"
-version = "0.4.7"
+version = "0.5.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ca0ace3845f0d96209f0375e6d367e3eb87eb65d27d445bdc9f1843a26f39448"
+checksum = "25b5872fa2e10bd067ae946f927e726d7d603eaeb6e02fa6a350e0722d2b8c11"
dependencies = [
"memchr",
]
@@ -6897,7 +6897,7 @@ dependencies = [
"sqlx-postgres",
"sqlx-sqlite",
"syn 1.0.109",
- "syn 2.0.26",
+ "syn 2.0.27",
"thrift",
"tokio",
"tokio-stream",
diff --git a/workspace-hack/Cargo.toml b/workspace-hack/Cargo.toml
index 5d2b5ae333..217a01eb44 100644
--- a/workspace-hack/Cargo.toml
+++ b/workspace-hack/Cargo.toml
@@ -67,7 +67,7 @@ prost-types = { version = "0.11" }
rand = { version = "0.8", features = ["small_rng"] }
rand_core = { version = "0.6", default-features = false, features = ["std"] }
regex = { version = "1" }
-regex-automata = { version = "0.3", default-features = false, features = ["dfa-onepass", "hybrid", "meta", "nfa-backtrack", "perf-inline", "perf-literal", "unicode"] }
+regex-automata = { version = "0.3", default-features = false, features = ["dfa-onepass", "dfa-search", "hybrid", "meta", "nfa-backtrack", "perf-inline", "perf-literal", "unicode"] }
regex-syntax = { version = "0.7" }
reqwest = { version = "0.11", default-features = false, features = ["json", "rustls-tls", "stream"] }
ring = { version = "0.16", features = ["std"] }
@@ -138,7 +138,7 @@ prost-types = { version = "0.11" }
rand = { version = "0.8", features = ["small_rng"] }
rand_core = { version = "0.6", default-features = false, features = ["std"] }
regex = { version = "1" }
-regex-automata = { version = "0.3", default-features = false, features = ["dfa-onepass", "hybrid", "meta", "nfa-backtrack", "perf-inline", "perf-literal", "unicode"] }
+regex-automata = { version = "0.3", default-features = false, features = ["dfa-onepass", "dfa-search", "hybrid", "meta", "nfa-backtrack", "perf-inline", "perf-literal", "unicode"] }
regex-syntax = { version = "0.7" }
ring = { version = "0.16", features = ["std"] }
rustls = { version = "0.21", default-features = false, features = ["dangerous_configuration", "logging", "tls12"] }
|
12636ca7597e28d036a5f7b6cfa5228fd2688f3a
|
Paul Dix
|
2024-03-25 15:40:21
|
loader error with single wal file (#24814)
|
Fixes a bug where the loader would error out if there was a wal segment file for a previous segment that hand't been persisted, and a new wal file had to be created for the new open segment. This would show up as an error if you started the server and then stopped and restarted it without writing any data.
| null |
fix: loader error with single wal file (#24814)
Fixes a bug where the loader would error out if there was a wal segment file for a previous segment that hand't been persisted, and a new wal file had to be created for the new open segment. This would show up as an error if you started the server and then stopped and restarted it without writing any data.
|
diff --git a/influxdb3_write/src/write_buffer/buffer_segment.rs b/influxdb3_write/src/write_buffer/buffer_segment.rs
index e8f9e7b6a2..405bee1158 100644
--- a/influxdb3_write/src/write_buffer/buffer_segment.rs
+++ b/influxdb3_write/src/write_buffer/buffer_segment.rs
@@ -70,11 +70,7 @@ impl OpenBufferSegment {
}
}
- #[allow(dead_code)]
- pub fn start_time_matches(&self, t: Time) -> bool {
- self.segment_range.start_time == t
- }
-
+ #[cfg(test)]
pub fn segment_id(&self) -> SegmentId {
self.segment_id
}
diff --git a/influxdb3_write/src/write_buffer/loader.rs b/influxdb3_write/src/write_buffer/loader.rs
index 8b4c5be148..7c9efdfdd1 100644
--- a/influxdb3_write/src/write_buffer/loader.rs
+++ b/influxdb3_write/src/write_buffer/loader.rs
@@ -52,12 +52,15 @@ where
let next_segment_range = current_segment_range.next();
let mut open_segments = Vec::new();
+ let mut max_segment_id = last_persisted_segment_id;
if let Some(wal) = wal {
// read any segments that don't show up in the list of persisted segments
let wal_segments = wal.segment_files()?;
for segment_file in wal_segments {
+ max_segment_id = max_segment_id.max(segment_file.segment_id);
+
// if persisted segments is empty, load all segments from the wal, otherwise
// only load segments that haven't been persisted yet
if segment_file.segment_id <= last_persisted_segment_id
@@ -93,7 +96,8 @@ where
if open_segments.is_empty() {
// ensure that we open up a segment for the "now" period of time
- let current_segment_id = last_persisted_segment_id.next();
+ let current_segment_id = max_segment_id.next();
+ max_segment_id = current_segment_id;
let current_segment = OpenBufferSegment::new(
current_segment_id,
@@ -109,6 +113,7 @@ where
} else {
// ensure that we open up a segment for the "now" period of time
let current_segment_id = last_persisted_segment_id.next();
+ max_segment_id = current_segment_id;
let current_segment = OpenBufferSegment::new(
current_segment_id,
@@ -122,22 +127,9 @@ where
open_segments.push(current_segment);
};
- let last_segment_id = open_segments
- .iter()
- .map(|s| s.segment_id())
- .max()
- .unwrap_or(SegmentId::new(0))
- .max(last_persisted_segment_id)
- .max(
- persisted_segments
- .last()
- .map(|s| s.segment_id)
- .unwrap_or(SegmentId::new(0)),
- );
-
Ok(LoadedState {
catalog,
- last_segment_id,
+ last_segment_id: max_segment_id,
open_segments,
persisting_buffer_segments,
persisted_segments,
@@ -633,4 +625,49 @@ mod tests {
assert_eq!(loaded_state.last_segment_id, SegmentId::new(2));
}
+
+ #[tokio::test]
+ async fn loads_with_persisting_wal_file_and_no_open_segment() {
+ let object_store: Arc<dyn ObjectStore> = Arc::new(InMemory::new());
+ let persister = Arc::new(PersisterImpl::new(Arc::clone(&object_store)));
+ let dir = test_helpers::tmp_dir().unwrap().into_path();
+ let wal = Arc::new(WalImpl::new(dir.clone()).unwrap());
+
+ let LoadedState {
+ mut open_segments, ..
+ } = load_starting_state(
+ Arc::clone(&persister),
+ Some(Arc::clone(&wal)),
+ Time::from_timestamp_nanos(0),
+ SegmentDuration::new_5m(),
+ )
+ .await
+ .unwrap();
+
+ let current_segment = open_segments.pop().unwrap();
+ let segment_id = current_segment.segment_id();
+ let next_segment_id = segment_id.next();
+
+ let mut loaded_state = load_starting_state(
+ persister,
+ Some(wal),
+ Time::from_timestamp(360, 0).unwrap(),
+ SegmentDuration::new_5m(),
+ )
+ .await
+ .unwrap();
+
+ assert_eq!(
+ loaded_state
+ .persisting_buffer_segments
+ .pop()
+ .unwrap()
+ .segment_id,
+ segment_id
+ );
+ assert_eq!(
+ loaded_state.open_segments.pop().unwrap().segment_id(),
+ next_segment_id
+ );
+ }
}
|
f026d546b0753d5d666dac3dec50bdc6a577a666
|
Stuart Carnie
|
2023-05-05 13:35:32
|
Refactor `select_statement_to_plan`
|
This is an improvement over the previous version, and prepares the
planner for implementing subqueries and passing schema to the
`project_select` function.
| null |
chore: Refactor `select_statement_to_plan`
This is an improvement over the previous version, and prepares the
planner for implementing subqueries and passing schema to the
`project_select` function.
|
diff --git a/iox_query_influxql/src/plan/planner.rs b/iox_query_influxql/src/plan/planner.rs
index b87dd68681..17f23bd4ca 100644
--- a/iox_query_influxql/src/plan/planner.rs
+++ b/iox_query_influxql/src/plan/planner.rs
@@ -73,7 +73,7 @@ use schema::{
InfluxColumnType, InfluxFieldType, Schema, INFLUXQL_MEASUREMENT_COLUMN_NAME,
INFLUXQL_METADATA_KEY,
};
-use std::collections::{HashSet, VecDeque};
+use std::collections::HashSet;
use std::fmt::Debug;
use std::iter;
use std::ops::{Bound, ControlFlow, Deref, Range};
@@ -272,8 +272,6 @@ impl<'a> InfluxQLToLogicalPlan<'a> {
/// Create a [`LogicalPlan`] from the specified InfluxQL `SELECT` statement.
fn select_statement_to_plan(&self, select: &Select) -> Result<LogicalPlan> {
- let mut plans = self.plan_from_tables(&select.from)?;
-
let ctx = Context::new(select_statement_info(select)?)
.with_timezone(select.timezone)
.with_group_by_fill(select);
@@ -338,65 +336,37 @@ impl<'a> InfluxQLToLogicalPlan<'a> {
fields.extend(fields_no_time.iter().cloned());
- // Build the first non-empty plan
let plan = {
- loop {
- match plans.pop_front() {
- Some((plan, proj)) => match self.project_select(
- &ctx,
+ let mut iter = select.from.iter();
+ let plan = match iter.next() {
+ Some(ds) => self.project_select(&ctx, ds, select, &fields, &group_by_tag_set),
+ None => {
+ // empty result, but let's at least have all the strictly necessary metadata
+ let schema = Arc::new(ArrowSchema::new(vec![ArrowField::new(
+ INFLUXQL_MEASUREMENT_COLUMN_NAME,
+ (&InfluxColumnType::Tag).into(),
+ false,
+ )]));
+ let plan = LogicalPlan::EmptyRelation(EmptyRelation {
+ produce_one_row: false,
+ schema: schema.to_dfschema_ref()?,
+ });
+ let plan = plan_with_metadata(
plan,
- proj,
- select,
- &fields,
- &group_by_tag_set,
- )? {
- // Exclude any plans that produce no data, which is
- // consistent with InfluxQL.
- LogicalPlan::EmptyRelation(EmptyRelation {
- produce_one_row: false,
- ..
- }) => continue,
- plan => break plan,
- },
- None => {
- // empty result, but let's at least have all the strictly necessary metadata
- let schema = Arc::new(ArrowSchema::new(vec![ArrowField::new(
- INFLUXQL_MEASUREMENT_COLUMN_NAME,
- (&InfluxColumnType::Tag).into(),
- false,
- )]));
- let plan = LogicalPlan::EmptyRelation(EmptyRelation {
- produce_one_row: false,
- schema: schema.to_dfschema_ref()?,
- });
- let plan = plan_with_metadata(
- plan,
- &InfluxQlMetadata {
- measurement_column_index: MEASUREMENT_COLUMN_INDEX,
- tag_key_columns: vec![],
- },
- )?;
- return Ok(plan);
- }
+ &InfluxQlMetadata {
+ measurement_column_index: MEASUREMENT_COLUMN_INDEX,
+ tag_key_columns: vec![],
+ },
+ )?;
+ return Ok(plan);
}
- }
- };
+ }?;
- // UNION the remaining plans
- let plan = plans.into_iter().try_fold(plan, |prev, (next, proj)| {
- let next = self.project_select(&ctx, next, proj, select, &fields, &group_by_tag_set)?;
- if let LogicalPlan::EmptyRelation(EmptyRelation {
- produce_one_row: false,
- ..
- }) = next
- {
- // Exclude any plans that produce no data, which is
- // consistent with InfluxQL.
- Ok(prev)
- } else {
+ iter.try_fold(plan, |prev, ds| {
+ let next = self.project_select(&ctx, ds, select, &fields, &group_by_tag_set)?;
LogicalPlanBuilder::from(prev).union(next)?.build()
- }
- })?;
+ })?
+ };
let plan = plan_with_metadata(
plan,
@@ -454,15 +424,16 @@ impl<'a> InfluxQLToLogicalPlan<'a> {
fn project_select(
&self,
ctx: &Context<'_>,
- input: LogicalPlan,
- proj: Vec<Expr>,
+ ds: &DataSource,
select: &Select,
fields: &[Field],
group_by_tag_set: &[&str],
) -> Result<LogicalPlan> {
- let schemas = Schemas::new(input.schema())?;
+ let (plan, proj) = self.plan_from_data_source(ds)?;
- let plan = self.plan_where_clause(ctx, &select.condition, input, &schemas)?;
+ let schemas = Schemas::new(plan.schema())?;
+
+ let plan = self.plan_where_clause(ctx, &select.condition, plan, &schemas)?;
// Transform InfluxQL AST field expressions to a list of DataFusion expressions.
let mut select_exprs = self.field_list_to_exprs(ctx, &plan, fields, &schemas)?;
@@ -1234,19 +1205,19 @@ impl<'a> InfluxQLToLogicalPlan<'a> {
/// Generate a list of logical plans for each of the tables references in the `FROM`
/// clause.
- fn plan_from_tables(&self, from: &[DataSource]) -> Result<VecDeque<(LogicalPlan, Vec<Expr>)>> {
- // A list of scans and their initial projections
- let mut table_projs = VecDeque::new();
- for ds in from.iter() {
- let Some(table_proj) = match ds {
- DataSource::Table(name) => self.create_table_ref(name),
- DataSource::Subquery(_) => error::not_implemented(
- "subquery in FROM clause",
- ),
- }? else { continue };
- table_projs.push_back(table_proj);
+ fn plan_from_data_source(&self, ds: &DataSource) -> Result<(LogicalPlan, Vec<Expr>)> {
+ match ds {
+ DataSource::Table(table_name) => {
+ // `rewrite_statement` guarantees the table should exist
+ let source = self.s.get_table_provider(table_name)?;
+ let table_ref = TableReference::bare(table_name.to_owned());
+ Ok((
+ LogicalPlanBuilder::scan(table_ref, source, None)?.build()?,
+ vec![lit_dict(table_name).alias(INFLUXQL_MEASUREMENT_COLUMN_NAME)],
+ ))
+ }
+ DataSource::Subquery(_) => error::not_implemented("subquery in FROM clause"),
}
- Ok(table_projs)
}
/// Create a [LogicalPlan] that refers to the specified `table_name`.
|
e4c12fa6a5e9b607673d545f0a4bce4232e6a2f5
|
Marco Neumann
|
2022-11-22 12:25:23
|
slice flight response batches (#6205)
|
* fix: slice flight response batches
Same as #6094 but for the Apache Flight interface.
Ref https://github.com/influxdata/idpe/issues/16073.
* refactor: use `RecordBatch::slice`
|
Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
|
fix: slice flight response batches (#6205)
* fix: slice flight response batches
Same as #6094 but for the Apache Flight interface.
Ref https://github.com/influxdata/idpe/issues/16073.
* refactor: use `RecordBatch::slice`
Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
|
diff --git a/arrow_util/src/optimize.rs b/arrow_util/src/optimize.rs
index 1efd634ac1..a111b305cd 100644
--- a/arrow_util/src/optimize.rs
+++ b/arrow_util/src/optimize.rs
@@ -186,6 +186,43 @@ pub fn optimize_schema(schema: &Schema) -> Schema {
Schema::new(fields)
}
+/// The size to which we limit our [`RecordBatch`] payloads.
+///
+/// We will slice up the returned [`RecordBatch]s (preserving order) to only produce objects of approximately
+/// this size (there's a bit of additional encoding overhead on top of that, but that should be OK).
+///
+/// This would normally be 4MB, but the size calculation for the record batches is rather inexact, so we set it to 2MB.
+const MAX_GRPC_RESPONSE_BATCH_SIZE: usize = 2097152;
+
+/// Split [`RecordBatch`] so it hopefully fits into a gRPC response.
+///
+/// Max size is controlled by [`MAX_GRPC_RESPONSE_BATCH_SIZE`].
+///
+/// Data is zero-copy sliced into batches.
+pub fn split_batch_for_grpc_response(batch: RecordBatch) -> Vec<RecordBatch> {
+ let size = batch
+ .columns()
+ .iter()
+ .map(|col| col.get_array_memory_size())
+ .sum::<usize>();
+
+ let n_batches = (size / MAX_GRPC_RESPONSE_BATCH_SIZE
+ + usize::from(size % MAX_GRPC_RESPONSE_BATCH_SIZE != 0))
+ .max(1);
+ let rows_per_batch = batch.num_rows() / n_batches;
+ let mut out = Vec::with_capacity(n_batches + 1);
+
+ let mut offset = 0;
+ while offset < batch.num_rows() {
+ let length = (offset + rows_per_batch).min(batch.num_rows() - offset);
+ out.push(batch.slice(offset, length));
+
+ offset += length;
+ }
+
+ out
+}
+
#[cfg(test)]
mod tests {
use super::*;
@@ -193,8 +230,9 @@ mod tests {
use crate::assert_batches_eq;
use arrow::array::{
ArrayDataBuilder, DictionaryArray, Float64Array, Int32Array, StringArray, UInt32Array,
+ UInt8Array,
};
- use arrow::compute::concat;
+ use arrow::compute::{concat, concat_batches};
use arrow_flight::utils::flight_data_to_arrow_batch;
use datafusion::physical_plan::limit::truncate_batch;
use std::iter::FromIterator;
@@ -501,4 +539,29 @@ mod tests {
]);
assert_eq!(array, &expected)
}
+
+ #[test]
+ fn test_split_batch_for_grpc_response() {
+ // no split
+ let c = UInt32Array::from(vec![1, 2, 3, 4, 5, 6]);
+ let batch = RecordBatch::try_from_iter(vec![("a", Arc::new(c) as ArrayRef)])
+ .expect("cannot create record batch");
+ let split = split_batch_for_grpc_response(batch.clone());
+ assert_eq!(split.len(), 1);
+ assert_eq!(batch, split[0]);
+
+ // split once
+ let n_rows = MAX_GRPC_RESPONSE_BATCH_SIZE + 1;
+ assert!(n_rows % 2 == 1, "should be an odd number");
+ let c = UInt8Array::from((0..n_rows).map(|i| (i % 256) as u8).collect::<Vec<_>>());
+ let batch = RecordBatch::try_from_iter(vec![("a", Arc::new(c) as ArrayRef)])
+ .expect("cannot create record batch");
+ let split = split_batch_for_grpc_response(batch.clone());
+ assert_eq!(split.len(), 2);
+ assert_eq!(
+ split.iter().map(|batch| batch.num_rows()).sum::<usize>(),
+ n_rows
+ );
+ assert_eq!(concat_batches(&batch.schema(), &split).unwrap(), batch);
+ }
}
diff --git a/ingester/src/querier_handler.rs b/ingester/src/querier_handler.rs
index a4c6fdf6d9..d08b9d2e93 100644
--- a/ingester/src/querier_handler.rs
+++ b/ingester/src/querier_handler.rs
@@ -3,7 +3,7 @@
use std::{pin::Pin, sync::Arc};
use arrow::{array::new_null_array, error::ArrowError, record_batch::RecordBatch};
-use arrow_util::optimize::{optimize_record_batch, optimize_schema};
+use arrow_util::optimize::{optimize_record_batch, optimize_schema, split_batch_for_grpc_response};
use data_types::{NamespaceId, PartitionId, SequenceNumber, TableId};
use datafusion::physical_plan::SendableRecordBatchStream;
use datafusion_util::MemoryStream;
@@ -142,11 +142,22 @@ impl IngesterQueryResponse {
})
});
- let tail = snapshot.map(move |batch_res| match batch_res {
- Ok(batch) => Ok(FlatIngesterQueryResponse::RecordBatch {
- batch: optimize_record_batch(&batch, Arc::clone(&schema))?,
- }),
- Err(e) => Err(e),
+ let tail = snapshot.flat_map(move |batch_res| match batch_res {
+ Ok(batch) => {
+ match optimize_record_batch(&batch, Arc::clone(&schema)) {
+ Ok(batch) => futures::stream::iter(
+ split_batch_for_grpc_response(batch),
+ )
+ .map(|batch| {
+ Ok(FlatIngesterQueryResponse::RecordBatch { batch })
+ })
+ .boxed(),
+ Err(e) => {
+ futures::stream::once(async { Err(e) }).boxed()
+ }
+ }
+ }
+ Err(e) => futures::stream::once(async { Err(e) }).boxed(),
});
head.chain(tail).boxed()
diff --git a/service_grpc_flight/src/lib.rs b/service_grpc_flight/src/lib.rs
index 3ab27e6804..b1cb3d6c56 100644
--- a/service_grpc_flight/src/lib.rs
+++ b/service_grpc_flight/src/lib.rs
@@ -6,7 +6,7 @@ use arrow_flight::{
Action, ActionType, Criteria, Empty, FlightData, FlightDescriptor, FlightInfo,
HandshakeRequest, HandshakeResponse, PutResult, SchemaAsIpc, SchemaResult, Ticket,
};
-use arrow_util::optimize::{optimize_record_batch, optimize_schema};
+use arrow_util::optimize::{optimize_record_batch, optimize_schema, split_batch_for_grpc_response};
use bytes::{Bytes, BytesMut};
use data_types::NamespaceNameError;
use datafusion::{error::DataFusionError, physical_plan::ExecutionPlan};
@@ -372,22 +372,24 @@ impl GetStream {
Ok(batch) => {
match optimize_record_batch(&batch, Arc::clone(&schema)) {
Ok(batch) => {
- let (flight_dictionaries, flight_batch) =
- arrow_flight::utils::flight_data_from_arrow_batch(
- &batch, &options,
- );
+ for batch in split_batch_for_grpc_response(batch) {
+ let (flight_dictionaries, flight_batch) =
+ arrow_flight::utils::flight_data_from_arrow_batch(
+ &batch, &options,
+ );
+
+ for dict in flight_dictionaries {
+ if tx.send(Ok(dict)).await.is_err() {
+ // receiver is gone
+ return;
+ }
+ }
- for dict in flight_dictionaries {
- if tx.send(Ok(dict)).await.is_err() {
+ if tx.send(Ok(flight_batch)).await.is_err() {
// receiver is gone
return;
}
}
-
- if tx.send(Ok(flight_batch)).await.is_err() {
- // receiver is gone
- return;
- }
}
Err(e) => {
// failure sending here is OK because we're cutting the stream anyways
|
eb80b96a2cc8334fa7d825626774cc135be04b36
|
Trevor Hilton
|
2024-04-15 10:58:36
|
QoL improvements to the load generator and analysis tools (#24914)
|
* feat: add seconds to generated load files
This adds seconds to the time string portion of the generated files from
load generation runs. Previously, if the generator was run more than once
in the same minute, latter runs would fail because the results files
already exist.
* refactor: make query/write/system graphs optional based on run
Made the analysis tool have optional graphs based on what was actually
generated.
* refactor: change the time string format in generated load files
| null |
feat: QoL improvements to the load generator and analysis tools (#24914)
* feat: add seconds to generated load files
This adds seconds to the time string portion of the generated files from
load generation runs. Previously, if the generator was run more than once
in the same minute, latter runs would fail because the results files
already exist.
* refactor: make query/write/system graphs optional based on run
Made the analysis tool have optional graphs based on what was actually
generated.
* refactor: change the time string format in generated load files
|
diff --git a/influxdb3_load_generator/analysis/app.py b/influxdb3_load_generator/analysis/app.py
index 084162cc6f..ccd4455e3f 100644
--- a/influxdb3_load_generator/analysis/app.py
+++ b/influxdb3_load_generator/analysis/app.py
@@ -30,7 +30,7 @@ def get_config_names():
if os.path.isdir(config_path):
run_times = set()
for file_name in os.listdir(config_path):
- match = re.search(r'_(\d{4}-\d{2}-\d{2}-\d{2}-\d{2})', file_name)
+ match = re.search(r'_(\d{4}-\d{2}-\d{2}T\d{2}-\d{2}-\d{2})', file_name)
if match:
run_time = match.group(1)
run_times.add(run_time)
@@ -49,22 +49,29 @@ def get_aggregated_data():
query_file = os.path.join(config_path, f'query_{run_time}.csv')
system_file = os.path.join(config_path, f'system_{run_time}.csv')
- if os.path.isfile(write_file) and os.path.isfile(query_file) and os.path.isfile(system_file):
+ if not os.path.isfile(write_file) and not os.path.isfile(query_file) and not os.path.isfile(system_file):
+ return jsonify({'error': 'Files not found for the specified configuration and run time'})
+
+ write_data = None
+ if os.path.isfile(write_file):
write_data = aggregate_data(write_file, 'lines', 'latency_ms')
+ query_data = None
+ if os.path.isfile(query_file):
query_data = aggregate_data(query_file, 'rows', 'response_ms')
+ system_data = None
+ if os.path.isfile(system_file):
system_data = aggregate_system_data(system_file)
+
- aggregated_data = {
- 'config_name': config_name,
- 'run_time': run_time,
- 'write_data': write_data,
- 'query_data': query_data,
- 'system_data': system_data
- }
+ aggregated_data = {
+ 'config_name': config_name,
+ 'run_time': run_time,
+ 'write_data': write_data,
+ 'query_data': query_data,
+ 'system_data': system_data
+ }
- return jsonify(aggregated_data)
- else:
- return jsonify({'error': 'Files not found for the specified configuration and run time'})
+ return jsonify(aggregated_data)
def aggregate_data(file_path, lines_field, latency_field):
aggregated_data = []
@@ -109,4 +116,4 @@ if __name__ == '__main__':
else:
RESULTS_DIRECTORY = sys.argv[1]
- app.run()
\ No newline at end of file
+ app.run()
diff --git a/influxdb3_load_generator/analysis/templates/index.html b/influxdb3_load_generator/analysis/templates/index.html
index 73064f6b1d..d2bfdce9b7 100644
--- a/influxdb3_load_generator/analysis/templates/index.html
+++ b/influxdb3_load_generator/analysis/templates/index.html
@@ -4,7 +4,7 @@
<title>Benchmark Results Comparison</title>
<script src="https://cdn.jsdelivr.net/npm/chart.js"></script>
<style>
- .graph-container {
+ #graph-container {
display: grid;
grid-template-columns: repeat(2, 1fr);
grid-template-rows: repeat(3, 1fr);
@@ -12,7 +12,7 @@
width: 100%;
height: 600px;
}
- .graph-container canvas {
+ #graph-container canvas {
width: 100% !important;
height: 100% !important;
}
@@ -33,14 +33,7 @@
<select id="config-name-2"></select>
</div>
<button id="compare-btn">Compare</button>
-<div class="graph-container">
- <canvas id="lines-per-second-chart"></canvas>
- <canvas id="write-latency-chart"></canvas>
- <canvas id="queries-per-second-chart"></canvas>
- <canvas id="query-latency-chart"></canvas>
- <canvas id="cpu-usage-chart"></canvas>
- <canvas id="memory-usage-chart"></canvas>
-</div>
+<div id="graph-container"></div>
<script>
const testNameSelect = document.getElementById('test-name');
const configName1Select = document.getElementById('config-name-1');
@@ -111,18 +104,27 @@
const config1Data = data[0];
const config2Data = data[1];
- renderGraph('lines-per-second-chart', 'Lines per Second', config1Data.write_data, config2Data.write_data, 'lines', 10000);
- renderGraph('write-latency-chart', 'Write Latency (ms)', config1Data.write_data, config2Data.write_data, 'latency', 10000, 'median');
- renderGraph('queries-per-second-chart', 'Queries per Second', config1Data.query_data, config2Data.query_data, 'lines', 10000);
- renderGraph('query-latency-chart', 'Query Latency (ms)', config1Data.query_data, config2Data.query_data, 'latency', 10000, 'median');
- renderGraph('cpu-usage-chart', 'CPU Usage (%)', config1Data.system_data, config2Data.system_data, 'cpu_usage');
- renderGraph('memory-usage-chart', 'Memory Usage (MB)', config1Data.system_data, config2Data.system_data, 'memory_bytes');
+ if (config1Data.write_data && config2Data.write_data) {
+ renderGraph('Lines per Second', config1Data.write_data, config2Data.write_data, 'lines', 10000);
+ renderGraph('Write Latency (ms)', config1Data.write_data, config2Data.write_data, 'latency', 10000, 'median');
+ }
+ if (config1Data.query_data && config2Data.query_data) {
+ renderGraph('Queries per Second', config1Data.query_data, config2Data.query_data, 'lines', 10000);
+ renderGraph('Query Latency (ms)', config1Data.query_data, config2Data.query_data, 'latency', 10000, 'median');
+ }
+ if (config1Data.system_data && config2Data.system_data) {
+ renderGraph('CPU Usage (%)', config1Data.system_data, config2Data.system_data, 'cpu_usage');
+ renderGraph('Memory Usage (MB)', config1Data.system_data, config2Data.system_data, 'memory_bytes');
+ }
});
});
// Render a graph using Chart.js
- function renderGraph(chartId, title, config1Data, config2Data, yAxisKey, interval = 10000, aggregateFunction = 'sum') {
- const ctx = document.getElementById(chartId).getContext('2d');
+ function renderGraph(title, config1Data, config2Data, yAxisKey, interval = 10000, aggregateFunction = 'sum') {
+ const container = document.getElementById('graph-container');
+ const canvas = document.createElement("canvas");
+ container.appendChild(canvas);
+ const ctx = canvas.getContext('2d');
const labels = getXLabels(config1Data, interval);
const config1Values = getYValues(config1Data, yAxisKey, interval, aggregateFunction);
@@ -232,4 +234,4 @@
}
</script>
</body>
-</html>
\ No newline at end of file
+</html>
diff --git a/influxdb3_load_generator/src/commands/common.rs b/influxdb3_load_generator/src/commands/common.rs
index 1b12dde733..d768664d75 100644
--- a/influxdb3_load_generator/src/commands/common.rs
+++ b/influxdb3_load_generator/src/commands/common.rs
@@ -369,7 +369,7 @@ impl InfluxDb3Config {
let built_in_specs = crate::specs::built_in_specs();
// sepcify a time string for generated results file names:
- let time_str = format!("{}", Local::now().format("%Y-%m-%d-%H-%M"));
+ let time_str = format!("{}", Local::now().format("%Y-%m-%dT%H-%M-%S"));
// initialize the influxdb3 client:
let client =
|
465851010211cd463dbc6b1bef2b1299d4b6e321
|
Carol (Nichols || Goulding)
|
2023-01-20 16:47:03
|
For Ingester2, persist a particular namespace on demand and share MiniClusters
|
This should hopefully help CI from running out of Postgres
connections π¬
The old architecture will still need to be non-shared and persist
everything.
| null |
fix: For Ingester2, persist a particular namespace on demand and share MiniClusters
This should hopefully help CI from running out of Postgres
connections π¬
The old architecture will still need to be non-shared and persist
everything.
|
diff --git a/generated_types/protos/influxdata/iox/ingester/v1/write.proto b/generated_types/protos/influxdata/iox/ingester/v1/write.proto
index db545d7a1f..2737124889 100644
--- a/generated_types/protos/influxdata/iox/ingester/v1/write.proto
+++ b/generated_types/protos/influxdata/iox/ingester/v1/write.proto
@@ -18,6 +18,9 @@ service PersistService {
rpc Persist(PersistRequest) returns (PersistResponse);
}
-message PersistRequest {}
+message PersistRequest {
+ // The namespace to persist
+ string namespace = 1;
+}
message PersistResponse {}
diff --git a/influxdb_iox/tests/end_to_end_cases/querier.rs b/influxdb_iox/tests/end_to_end_cases/querier.rs
index 2280561945..fb145b97f0 100644
--- a/influxdb_iox/tests/end_to_end_cases/querier.rs
+++ b/influxdb_iox/tests/end_to_end_cases/querier.rs
@@ -903,16 +903,7 @@ mod kafkaless_rpc_write {
let table_name = "the_table";
// Set up the cluster ====================================
- let ingester_config = TestConfig::new_ingester2_never_persist(&database_url);
- let router_config = TestConfig::new_router2(&ingester_config);
- let querier_config = TestConfig::new_querier2(&ingester_config);
- let mut cluster = MiniCluster::new()
- .with_ingester(ingester_config)
- .await
- .with_router(router_config)
- .await
- .with_querier(querier_config)
- .await;
+ let mut cluster = MiniCluster::create_shared2_never_persist(database_url).await;
StepTest::new(
&mut cluster,
@@ -951,16 +942,7 @@ mod kafkaless_rpc_write {
let table_name = "the_table";
// Set up the cluster ====================================
- let ingester_config = TestConfig::new_ingester2_never_persist(&database_url);
- let router_config = TestConfig::new_router2(&ingester_config);
- let querier_config = TestConfig::new_querier2(&ingester_config);
- let mut cluster = MiniCluster::new()
- .with_ingester(ingester_config)
- .await
- .with_router(router_config)
- .await
- .with_querier(querier_config)
- .await;
+ let mut cluster = MiniCluster::create_shared2_never_persist(database_url).await;
StepTest::new(
&mut cluster,
diff --git a/influxdb_iox/tests/end_to_end_cases/querier/influxrpc.rs b/influxdb_iox/tests/end_to_end_cases/querier/influxrpc.rs
index 8facdb19bb..d8e9156e4f 100644
--- a/influxdb_iox/tests/end_to_end_cases/querier/influxrpc.rs
+++ b/influxdb_iox/tests/end_to_end_cases/querier/influxrpc.rs
@@ -98,7 +98,7 @@ trait InfluxRpcTest: Send + Sync + 'static {
.await
}
IoxArchitecture::Kafkaless => {
- MiniCluster::create_non_shared2_never_persist(database_url.clone()).await
+ MiniCluster::create_shared2_never_persist(database_url.clone()).await
}
};
diff --git a/influxdb_iox/tests/query_tests2/framework.rs b/influxdb_iox/tests/query_tests2/framework.rs
index 67e566e55d..da5f9adc73 100644
--- a/influxdb_iox/tests/query_tests2/framework.rs
+++ b/influxdb_iox/tests/query_tests2/framework.rs
@@ -66,10 +66,10 @@ impl TestCase {
for chunk_stage in self.chunk_stage {
info!("Using IoxArchitecture::{arch:?} and ChunkStage::{chunk_stage:?}");
- // Setup that differs by architecture and chunk stage. These need to be non-shared
- // clusters; if they're shared, then the tests that run in parallel and persist at
- // particular times mess with each other because persistence applies to everything in
- // the ingester.
+ // Setup that differs by architecture and chunk stage. In the Kafka architecture,
+ // these need to be non-shared clusters; if they're shared, then the tests that run
+ // in parallel and persist at particular times mess with each other because
+ // persistence applies to everything in the ingester.
let mut cluster = match (arch, chunk_stage) {
(IoxArchitecture::Kafkaful, ChunkStage::Ingester) => {
MiniCluster::create_non_shared_standard_never_persist(database_url.clone())
@@ -79,10 +79,10 @@ impl TestCase {
MiniCluster::create_non_shared_standard(database_url.clone()).await
}
(IoxArchitecture::Kafkaless, ChunkStage::Ingester) => {
- MiniCluster::create_non_shared2_never_persist(database_url.clone()).await
+ MiniCluster::create_shared2_never_persist(database_url.clone()).await
}
(IoxArchitecture::Kafkaless, ChunkStage::Parquet) => {
- MiniCluster::create_non_shared2(database_url.clone()).await
+ MiniCluster::create_shared2(database_url.clone()).await
}
(_, ChunkStage::All) => unreachable!("See `impl IntoIterator for ChunkStage`"),
};
diff --git a/influxdb_iox_client/src/client/ingester.rs b/influxdb_iox_client/src/client/ingester.rs
index 3c550538a9..0d33f5bac0 100644
--- a/influxdb_iox_client/src/client/ingester.rs
+++ b/influxdb_iox_client/src/client/ingester.rs
@@ -21,11 +21,11 @@ impl Client {
}
}
- /// Instruct the ingester to persist its data to Parquet. Will block until the data has
- /// persisted, which is useful in tests asserting on persisted data. May behave in unexpected
- /// ways if used concurrently with writes and ingester WAL rotations.
- pub async fn persist(&mut self) -> Result<(), Error> {
- self.inner.persist(PersistRequest {}).await?;
+ /// Instruct the ingester to persist its data for the specified namespace to Parquet. Useful in
+ /// tests asserting on persisted data. May behave in unexpected ways if used concurrently with
+ /// writes and ingester WAL rotations.
+ pub async fn persist(&mut self, namespace: String) -> Result<(), Error> {
+ self.inner.persist(PersistRequest { namespace }).await?;
Ok(())
}
diff --git a/ingester/src/server/grpc/persist.rs b/ingester/src/server/grpc/persist.rs
index 9eaee5850b..bccfa52e31 100644
--- a/ingester/src/server/grpc/persist.rs
+++ b/ingester/src/server/grpc/persist.rs
@@ -25,6 +25,9 @@ impl<I: IngestHandler + 'static> PersistService for PersistHandler<I> {
&self,
_request: Request<proto::PersistRequest>,
) -> Result<Response<proto::PersistResponse>, tonic::Status> {
+ // Even though the request specifies the namespace, persist everything. This means tests
+ // that use this API need to be using non-shared MiniClusters in order to avoid messing
+ // with each others' states.
self.ingest_handler.persist_all().await;
Ok(Response::new(proto::PersistResponse {}))
diff --git a/ingester2/src/server/grpc.rs b/ingester2/src/server/grpc.rs
index 400392c551..69f0d58158 100644
--- a/ingester2/src/server/grpc.rs
+++ b/ingester2/src/server/grpc.rs
@@ -120,6 +120,7 @@ where
PersistServiceServer::new(PersistHandler::new(
Arc::clone(&self.buffer),
Arc::clone(&self.persist_handle),
+ Arc::clone(&self.catalog),
))
}
diff --git a/ingester2/src/server/grpc/persist.rs b/ingester2/src/server/grpc/persist.rs
index a9586dd437..2431132fa9 100644
--- a/ingester2/src/server/grpc/persist.rs
+++ b/ingester2/src/server/grpc/persist.rs
@@ -5,12 +5,15 @@ use crate::{
use generated_types::influxdata::iox::ingester::v1::{
self as proto, persist_service_server::PersistService,
};
+use iox_catalog::interface::Catalog;
+use std::sync::Arc;
use tonic::{Request, Response};
#[derive(Debug)]
pub(crate) struct PersistHandler<T, P> {
buffer: T,
persist_handle: P,
+ catalog: Arc<dyn Catalog>,
}
impl<T, P> PersistHandler<T, P>
@@ -18,10 +21,11 @@ where
T: PartitionIter + Sync + 'static,
P: PersistQueue + Clone + Sync + 'static,
{
- pub(crate) fn new(buffer: T, persist_handle: P) -> Self {
+ pub(crate) fn new(buffer: T, persist_handle: P, catalog: Arc<dyn Catalog>) -> Self {
Self {
buffer,
persist_handle,
+ catalog,
}
}
}
@@ -37,9 +41,27 @@ where
/// concurrently with writes and ingester WAL rotations.
async fn persist(
&self,
- _request: Request<proto::PersistRequest>,
+ request: Request<proto::PersistRequest>,
) -> Result<Response<proto::PersistResponse>, tonic::Status> {
- persist_partitions(self.buffer.partition_iter(), &self.persist_handle).await;
+ let request = request.into_inner();
+
+ let namespace = self
+ .catalog
+ .repositories()
+ .await
+ .namespaces()
+ .get_by_name(&request.namespace)
+ .await
+ .map_err(|e| tonic::Status::internal(e.to_string()))?
+ .ok_or_else(|| tonic::Status::not_found(&request.namespace))?;
+
+ persist_partitions(
+ self.buffer
+ .partition_iter()
+ .filter(|p| p.lock().namespace_id() == namespace.id),
+ &self.persist_handle,
+ )
+ .await;
Ok(Response::new(proto::PersistResponse {}))
}
diff --git a/test_helpers_end_to_end/src/mini_cluster.rs b/test_helpers_end_to_end/src/mini_cluster.rs
index b14feec9d4..b4c8b87fb0 100644
--- a/test_helpers_end_to_end/src/mini_cluster.rs
+++ b/test_helpers_end_to_end/src/mini_cluster.rs
@@ -494,7 +494,10 @@ impl MiniCluster {
let mut ingester_client =
influxdb_iox_client::ingester::Client::new(self.ingester().ingester_grpc_connection());
- ingester_client.persist().await.unwrap();
+ ingester_client
+ .persist(self.namespace().into())
+ .await
+ .unwrap();
}
/// Get a reference to the mini cluster's other servers.
|
2217799256ff7f0fd22c2113a6bb072f4d85a6aa
|
Andrew Lamb
|
2023-05-01 07:15:13
|
Add --partition_filter to `remote get table` command (#7685)
|
* feat: Add --partition_filter to `remote get table` command
* fix: update command description
---------
|
Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
|
feat: Add --partition_filter to `remote get table` command (#7685)
* feat: Add --partition_filter to `remote get table` command
* fix: update command description
---------
Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
|
diff --git a/influxdb_iox/src/commands/remote/partition.rs b/influxdb_iox/src/commands/remote/partition.rs
index c40f22b30f..6d36adab17 100644
--- a/influxdb_iox/src/commands/remote/partition.rs
+++ b/influxdb_iox/src/commands/remote/partition.rs
@@ -74,7 +74,7 @@ pub struct Config {
/// Show the parqet_files of a partition
#[derive(Debug, clap::Parser)]
struct Show {
- /// The id of the partition
+ /// The id of the partition. If not specified, all parquet files are shown
#[clap(action)]
id: i64,
}
diff --git a/influxdb_iox/src/commands/remote/store.rs b/influxdb_iox/src/commands/remote/store.rs
index ba41b645fb..197db8a482 100644
--- a/influxdb_iox/src/commands/remote/store.rs
+++ b/influxdb_iox/src/commands/remote/store.rs
@@ -2,7 +2,11 @@
use futures::StreamExt;
use futures_util::TryStreamExt;
-use influxdb_iox_client::{catalog, connection::Connection, store};
+use influxdb_iox_client::{
+ catalog::{self, generated_types::ParquetFile},
+ connection::Connection,
+ store,
+};
use std::path::PathBuf;
use thiserror::Error;
use tokio::{
@@ -54,6 +58,10 @@ struct GetTable {
#[clap(action)]
table: String,
+ /// If specified, only files from the specified partitions are downloaded
+ #[clap(action, short, long)]
+ partition_id: Option<i64>,
+
/// The output directory to use. If not specified, files will be placed in a directory named
/// after the table in the current working directory.
#[clap(action, short)]
@@ -83,31 +91,36 @@ pub async fn command(connection: Connection, config: Config) -> Result<(), Error
Ok(())
}
- Command::GetTable(get_table) => {
- let directory = get_table
- .output_directory
- .unwrap_or_else(|| PathBuf::from(&get_table.table));
+ Command::GetTable(GetTable {
+ namespace,
+ table,
+ partition_id,
+ output_directory,
+ }) => {
+ let directory = output_directory.unwrap_or_else(|| PathBuf::from(&table));
fs::create_dir_all(&directory).await?;
let mut catalog_client = catalog::Client::new(connection.clone());
let mut store_client = store::Client::new(connection);
let parquet_files = catalog_client
- .get_parquet_files_by_namespace_table(
- get_table.namespace.clone(),
- get_table.table.clone(),
- )
+ .get_parquet_files_by_namespace_table(namespace.clone(), table.clone())
.await?;
+
let num_parquet_files = parquet_files.len();
println!("found {num_parquet_files} Parquet files, downloading...");
let indexed_parquet_file_metadata = parquet_files.into_iter().enumerate();
+ if let Some(partition_id) = partition_id {
+ println!("Filtering by partition {partition_id}");
+ }
+
for (index, parquet_file) in indexed_parquet_file_metadata {
- let uuid = parquet_file.object_store_id;
- let partition_id = parquet_file.partition_id;
+ let uuid = &parquet_file.object_store_id;
+ let file_partition_id = parquet_file.partition_id;
let file_size_bytes = parquet_file.file_size_bytes as u64;
let index = index + 1;
- let filename = format!("{uuid}.{partition_id}.parquet");
+ let filename = format!("{uuid}.{file_partition_id}.parquet");
let file_path = directory.join(&filename);
if fs::metadata(&file_path)
@@ -117,6 +130,10 @@ pub async fn command(connection: Connection, config: Config) -> Result<(), Error
println!(
"skipping file {index} of {num_parquet_files} ({filename} already exists)"
);
+ } else if !download_partition(&parquet_file, partition_id) {
+ println!(
+ "skipping file {index} of {num_parquet_files} ({file_partition_id} does not match request)"
+ );
} else {
println!("downloading file {index} of {num_parquet_files} ({filename})...");
let mut response = store_client
@@ -137,3 +154,15 @@ pub async fn command(connection: Connection, config: Config) -> Result<(), Error
}
}
}
+
+/// evaluate the partition_filter on this file
+fn download_partition(parquet_file: &ParquetFile, partition_id: Option<i64>) -> bool {
+ partition_id
+ .map(|partition_id| {
+ // if a partition_id was specified, only download the file if
+ // the partition matches
+ parquet_file.partition_id == partition_id
+ })
+ // download files if there is no partition
+ .unwrap_or(true)
+}
|
a3fa986f6e47d61c02ba0a67137ea0ea038e7b6a
|
Carol (Nichols || Goulding)
|
2023-03-17 10:48:24
|
Give post-classification partition filter a separate trait
|
I want to change the types, but for now, this is mostly a copy of the
partition filter trait and setting up the associated
logging/metrics/mock.
| null |
refactor: Give post-classification partition filter a separate trait
I want to change the types, but for now, this is mostly a copy of the
partition filter trait and setting up the associated
logging/metrics/mock.
|
diff --git a/compactor2/src/components/hardcoded.rs b/compactor2/src/components/hardcoded.rs
index 54c0d8c806..36cc60e620 100644
--- a/compactor2/src/components/hardcoded.rs
+++ b/compactor2/src/components/hardcoded.rs
@@ -57,7 +57,7 @@ use super::{
has_files::HasFilesPartitionFilter, has_matching_file::HasMatchingFilePartitionFilter,
logging::LoggingPartitionFilterWrapper, max_num_columns::MaxNumColumnsPartitionFilter,
metrics::MetricsPartitionFilterWrapper, never_skipped::NeverSkippedPartitionFilter,
- or::OrPartitionFilter, possible_progress::PossibleProgressFilter, PartitionFilter,
+ or::OrPartitionFilter, PartitionFilter,
},
partition_info_source::sub_sources::SubSourcePartitionInfoSource,
partition_source::{
@@ -75,6 +75,10 @@ use super::{
not_empty::NotEmptyPartitionsSourceWrapper,
randomize_order::RandomizeOrderPartitionsSourcesWrapper, PartitionsSource,
},
+ post_classification_partition_filter::{
+ logging::LoggingPostClassificationFilterWrapper,
+ metrics::MetricsPostClassificationFilterWrapper, possible_progress::PossibleProgressFilter,
+ },
round_info_source::{LevelBasedRoundInfo, LoggingRoundInfoWrapper},
round_split::many_files::ManyFilesRoundSplit,
scratchpad::{noop::NoopScratchpadGen, prod::ProdScratchpadGen, ScratchpadGen},
@@ -324,14 +328,16 @@ pub fn hardcoded_components(config: &Config) -> Arc<Components> {
)),
),
))),
- post_classification_partition_filter: Arc::new(LoggingPartitionFilterWrapper::new(
- MetricsPartitionFilterWrapper::new(
- PossibleProgressFilter::new(config.max_compact_size_bytes()),
- &config.metric_registry,
+ post_classification_partition_filter: Arc::new(
+ LoggingPostClassificationFilterWrapper::new(
+ MetricsPostClassificationFilterWrapper::new(
+ PossibleProgressFilter::new(config.max_compact_size_bytes()),
+ &config.metric_registry,
+ partition_resource_limit_conditions,
+ ),
partition_resource_limit_conditions,
),
- partition_resource_limit_conditions,
- )),
+ ),
})
}
diff --git a/compactor2/src/components/mod.rs b/compactor2/src/components/mod.rs
index 3a892be67f..963ee5f347 100644
--- a/compactor2/src/components/mod.rs
+++ b/compactor2/src/components/mod.rs
@@ -6,6 +6,7 @@ use self::{
parquet_files_sink::ParquetFilesSink, partition_done_sink::PartitionDoneSink,
partition_files_source::PartitionFilesSource, partition_filter::PartitionFilter,
partition_info_source::PartitionInfoSource, partition_stream::PartitionStream,
+ post_classification_partition_filter::PostClassificationPartitionFilter,
round_info_source::RoundInfoSource, round_split::RoundSplit, scratchpad::ScratchpadGen,
};
@@ -30,6 +31,7 @@ pub mod partition_info_source;
pub mod partition_source;
pub mod partition_stream;
pub mod partitions_source;
+pub mod post_classification_partition_filter;
pub mod report;
pub mod round_info_source;
pub mod round_split;
@@ -55,7 +57,7 @@ pub struct Components {
/// stop condition for completing a partition compaction
pub partition_filter: Arc<dyn PartitionFilter>,
/// condition to avoid running out of resources during compaction
- pub post_classification_partition_filter: Arc<dyn PartitionFilter>,
+ pub post_classification_partition_filter: Arc<dyn PostClassificationPartitionFilter>,
/// Records "partition is done" status for given partition.
pub partition_done_sink: Arc<dyn PartitionDoneSink>,
/// Commits changes (i.e. deletion and creation) to the catalog.
diff --git a/compactor2/src/components/partition_filter/mod.rs b/compactor2/src/components/partition_filter/mod.rs
index adfe96b002..faf6fe4704 100644
--- a/compactor2/src/components/partition_filter/mod.rs
+++ b/compactor2/src/components/partition_filter/mod.rs
@@ -15,7 +15,6 @@ pub mod max_num_columns;
pub mod metrics;
pub mod never_skipped;
pub mod or;
-pub mod possible_progress;
/// Filters partition based on ID and parquet files.
///
diff --git a/compactor2/src/components/post_classification_partition_filter/logging.rs b/compactor2/src/components/post_classification_partition_filter/logging.rs
new file mode 100644
index 0000000000..39fe0c661f
--- /dev/null
+++ b/compactor2/src/components/post_classification_partition_filter/logging.rs
@@ -0,0 +1,141 @@
+use std::fmt::Display;
+
+use async_trait::async_trait;
+use data_types::ParquetFile;
+use observability_deps::tracing::{debug, error, info};
+
+use crate::{error::DynError, PartitionInfo};
+
+use super::PostClassificationPartitionFilter;
+
+#[derive(Debug)]
+pub struct LoggingPostClassificationFilterWrapper<T>
+where
+ T: PostClassificationPartitionFilter,
+{
+ inner: T,
+ filter_type: &'static str,
+}
+
+impl<T> LoggingPostClassificationFilterWrapper<T>
+where
+ T: PostClassificationPartitionFilter,
+{
+ pub fn new(inner: T, filter_type: &'static str) -> Self {
+ Self { inner, filter_type }
+ }
+}
+
+impl<T> Display for LoggingPostClassificationFilterWrapper<T>
+where
+ T: PostClassificationPartitionFilter,
+{
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ write!(f, "logging({}, {})", self.inner, self.filter_type)
+ }
+}
+
+#[async_trait]
+impl<T> PostClassificationPartitionFilter for LoggingPostClassificationFilterWrapper<T>
+where
+ T: PostClassificationPartitionFilter,
+{
+ async fn apply(
+ &self,
+ partition_info: &PartitionInfo,
+ files: &[ParquetFile],
+ ) -> Result<bool, DynError> {
+ let res = self.inner.apply(partition_info, files).await;
+ match &res {
+ Ok(true) => {
+ debug!(
+ partition_id = partition_info.partition_id.get(),
+ filter_type = self.filter_type,
+ "NOT filtered partition"
+ );
+ }
+ Ok(false) => {
+ info!(
+ partition_id = partition_info.partition_id.get(),
+ filter_type = self.filter_type,
+ "filtered partition"
+ );
+ }
+ Err(e) => {
+ error!(
+ partition_id = partition_info.partition_id.get(),
+ filter_type = self.filter_type,
+ %e,
+ "error filtering filtered partition"
+ );
+ }
+ }
+ res
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use std::sync::Arc;
+
+ use test_helpers::tracing::TracingCapture;
+
+ use crate::{
+ components::post_classification_partition_filter::mock::MockPostClassificationPartitionFilter,
+ test_utils::PartitionInfoBuilder,
+ };
+ use iox_tests::ParquetFileBuilder;
+
+ use super::*;
+
+ #[test]
+ fn test_display() {
+ let filter = LoggingPostClassificationFilterWrapper::new(
+ MockPostClassificationPartitionFilter::new(vec![Ok(true)]),
+ "test",
+ );
+ assert_eq!(filter.to_string(), "logging(mock, test)");
+ }
+
+ #[tokio::test]
+ async fn test_apply() {
+ let filter = LoggingPostClassificationFilterWrapper::new(
+ MockPostClassificationPartitionFilter::new(vec![
+ Ok(true),
+ Ok(false),
+ Err("problem".into()),
+ ]),
+ "test",
+ );
+ let f = ParquetFileBuilder::new(0).build();
+ let p_info1 = Arc::new(PartitionInfoBuilder::new().with_partition_id(1).build());
+ let p_info2 = Arc::new(PartitionInfoBuilder::new().with_partition_id(2).build());
+ let p_info3 = Arc::new(PartitionInfoBuilder::new().with_partition_id(3).build());
+
+ let capture = TracingCapture::new();
+
+ assert!(filter.apply(&p_info1, &[]).await.unwrap());
+ assert!(!filter.apply(&p_info2, &[f]).await.unwrap());
+ assert_eq!(
+ filter.apply(&p_info3, &[]).await.unwrap_err().to_string(),
+ "problem"
+ );
+
+ assert_eq!(
+ capture.to_string(),
+ "level = DEBUG; \
+ message = NOT filtered partition; \
+ partition_id = 1; \
+ filter_type = \"test\"; \n\
+ level = INFO; \
+ message = filtered partition; \
+ partition_id = 2; \
+ filter_type = \"test\"; \n\
+ level = ERROR; \
+ message = error filtering filtered partition; \
+ partition_id = 3; \
+ filter_type = \"test\"; \
+ e = problem; ",
+ );
+ }
+}
diff --git a/compactor2/src/components/post_classification_partition_filter/metrics.rs b/compactor2/src/components/post_classification_partition_filter/metrics.rs
new file mode 100644
index 0000000000..b296137a7a
--- /dev/null
+++ b/compactor2/src/components/post_classification_partition_filter/metrics.rs
@@ -0,0 +1,170 @@
+use std::fmt::Display;
+
+use async_trait::async_trait;
+use data_types::ParquetFile;
+use metric::{Registry, U64Counter};
+
+use crate::{error::DynError, PartitionInfo};
+
+use super::PostClassificationPartitionFilter;
+
+const METRIC_NAME_PARTITION_FILTER_COUNT: &str =
+ "iox_compactor_post_classification_partition_filter_count";
+
+#[derive(Debug)]
+pub struct MetricsPostClassificationFilterWrapper<T>
+where
+ T: PostClassificationPartitionFilter,
+{
+ pass_counter: U64Counter,
+ filter_counter: U64Counter,
+ error_counter: U64Counter,
+ inner: T,
+ filter_type: &'static str,
+}
+
+impl<T> MetricsPostClassificationFilterWrapper<T>
+where
+ T: PostClassificationPartitionFilter,
+{
+ pub fn new(inner: T, registry: &Registry, filter_type: &'static str) -> Self {
+ let metric = registry.register_metric::<U64Counter>(
+ METRIC_NAME_PARTITION_FILTER_COUNT,
+ "Number of times the compactor filtered partitions after its files were classified",
+ );
+
+ let pass_counter = metric.recorder(&[("result", "pass"), ("filter_type", filter_type)]);
+ let filter_counter = metric.recorder(&[("result", "filter"), ("filter_type", filter_type)]);
+ let error_counter = metric.recorder(&[("result", "error"), ("filter_type", filter_type)]);
+
+ Self {
+ pass_counter,
+ filter_counter,
+ error_counter,
+ inner,
+ filter_type,
+ }
+ }
+}
+
+impl<T> Display for MetricsPostClassificationFilterWrapper<T>
+where
+ T: PostClassificationPartitionFilter,
+{
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ write!(f, "metrics({}, {})", self.inner, self.filter_type)
+ }
+}
+
+#[async_trait]
+impl<T> PostClassificationPartitionFilter for MetricsPostClassificationFilterWrapper<T>
+where
+ T: PostClassificationPartitionFilter,
+{
+ async fn apply(
+ &self,
+ partition_info: &PartitionInfo,
+ files: &[ParquetFile],
+ ) -> Result<bool, DynError> {
+ let res = self.inner.apply(partition_info, files).await;
+ match res {
+ Ok(true) => {
+ self.pass_counter.inc(1);
+ }
+ Ok(false) => {
+ self.filter_counter.inc(1);
+ }
+ Err(_) => {
+ self.error_counter.inc(1);
+ }
+ }
+ res
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use std::sync::Arc;
+
+ use metric::{assert_counter, Attributes};
+
+ use crate::{
+ components::post_classification_partition_filter::mock::MockPostClassificationPartitionFilter,
+ test_utils::PartitionInfoBuilder,
+ };
+ use iox_tests::ParquetFileBuilder;
+
+ use super::*;
+
+ #[test]
+ fn test_display() {
+ let registry = Registry::new();
+ let filter = MetricsPostClassificationFilterWrapper::new(
+ MockPostClassificationPartitionFilter::new(vec![Ok(true)]),
+ ®istry,
+ "test",
+ );
+ assert_eq!(filter.to_string(), "metrics(mock, test)",);
+ }
+
+ #[tokio::test]
+ async fn test_apply() {
+ let registry = Registry::new();
+ let filter = MetricsPostClassificationFilterWrapper::new(
+ MockPostClassificationPartitionFilter::new(vec![
+ Ok(true),
+ Ok(false),
+ Err("problem".into()),
+ ]),
+ ®istry,
+ "test",
+ );
+ let p_info = Arc::new(PartitionInfoBuilder::new().with_partition_id(1).build());
+ let f = ParquetFileBuilder::new(0).build();
+
+ assert_pass_counter(®istry, 0);
+ assert_filter_counter(®istry, 0);
+ assert_error_counter(®istry, 0);
+
+ assert!(filter.apply(&p_info, &[]).await.unwrap());
+ assert!(!filter.apply(&p_info, &[f]).await.unwrap());
+ assert_eq!(
+ filter.apply(&p_info, &[]).await.unwrap_err().to_string(),
+ "problem"
+ );
+
+ assert_pass_counter(®istry, 1);
+ assert_filter_counter(®istry, 1);
+ assert_error_counter(®istry, 1);
+ }
+
+ fn assert_pass_counter(registry: &Registry, value: u64) {
+ assert_counter!(
+ registry,
+ U64Counter,
+ METRIC_NAME_PARTITION_FILTER_COUNT,
+ labels = Attributes::from(&[("result", "pass"), ("filter_type", "test")]),
+ value = value,
+ );
+ }
+
+ fn assert_filter_counter(registry: &Registry, value: u64) {
+ assert_counter!(
+ registry,
+ U64Counter,
+ METRIC_NAME_PARTITION_FILTER_COUNT,
+ labels = Attributes::from(&[("result", "filter"), ("filter_type", "test")]),
+ value = value,
+ );
+ }
+
+ fn assert_error_counter(registry: &Registry, value: u64) {
+ assert_counter!(
+ registry,
+ U64Counter,
+ METRIC_NAME_PARTITION_FILTER_COUNT,
+ labels = Attributes::from(&[("result", "error"), ("filter_type", "test")]),
+ value = value,
+ );
+ }
+}
diff --git a/compactor2/src/components/post_classification_partition_filter/mock.rs b/compactor2/src/components/post_classification_partition_filter/mock.rs
new file mode 100644
index 0000000000..22110e22fe
--- /dev/null
+++ b/compactor2/src/components/post_classification_partition_filter/mock.rs
@@ -0,0 +1,61 @@
+use std::{
+ fmt::{Debug, Display},
+ sync::Mutex,
+};
+
+use async_trait::async_trait;
+use data_types::ParquetFile;
+
+use crate::{error::DynError, PartitionInfo};
+
+use super::PostClassificationPartitionFilter;
+
+pub struct MockPostClassificationPartitionFilter {
+ return_values: Mutex<Box<dyn Iterator<Item = Result<bool, DynError>> + Send>>,
+}
+
+impl MockPostClassificationPartitionFilter {
+ #[cfg(test)]
+ pub fn new(return_values: Vec<Result<bool, DynError>>) -> Self {
+ Self {
+ return_values: Mutex::new(Box::new(return_values.into_iter())),
+ }
+ }
+}
+
+impl Display for MockPostClassificationPartitionFilter {
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ write!(f, "mock")
+ }
+}
+
+impl Debug for MockPostClassificationPartitionFilter {
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ write!(f, "mock")
+ }
+}
+
+#[async_trait]
+impl PostClassificationPartitionFilter for MockPostClassificationPartitionFilter {
+ async fn apply(
+ &self,
+ _partition_info: &PartitionInfo,
+ _files: &[ParquetFile],
+ ) -> Result<bool, DynError> {
+ self.return_values.lock().unwrap().next().unwrap()
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn test_display() {
+ assert_eq!(
+ MockPostClassificationPartitionFilter::new(vec![Ok(true), Err("problem".into())])
+ .to_string(),
+ "mock"
+ );
+ }
+}
diff --git a/compactor2/src/components/post_classification_partition_filter/mod.rs b/compactor2/src/components/post_classification_partition_filter/mod.rs
new file mode 100644
index 0000000000..638d91c2ee
--- /dev/null
+++ b/compactor2/src/components/post_classification_partition_filter/mod.rs
@@ -0,0 +1,26 @@
+use std::fmt::{Debug, Display};
+
+use async_trait::async_trait;
+use data_types::ParquetFile;
+
+use crate::{error::DynError, PartitionInfo};
+
+pub mod logging;
+pub mod metrics;
+pub mod mock;
+pub mod possible_progress;
+
+/// Filters partition based on ID and parquet files after the files have been classified.
+///
+/// May return an error. In this case, the partition will be marked as "skipped".
+#[async_trait]
+pub trait PostClassificationPartitionFilter: Debug + Display + Send + Sync {
+ /// Return `true` if the if the compactor should run a
+ /// compaction on this partition. Return `false` if this partition
+ /// does not need any more compaction.
+ async fn apply(
+ &self,
+ partition_info: &PartitionInfo,
+ files: &[ParquetFile],
+ ) -> Result<bool, DynError>;
+}
diff --git a/compactor2/src/components/partition_filter/possible_progress.rs b/compactor2/src/components/post_classification_partition_filter/possible_progress.rs
similarity index 95%
rename from compactor2/src/components/partition_filter/possible_progress.rs
rename to compactor2/src/components/post_classification_partition_filter/possible_progress.rs
index ff08fc2f18..eac788db28 100644
--- a/compactor2/src/components/partition_filter/possible_progress.rs
+++ b/compactor2/src/components/post_classification_partition_filter/possible_progress.rs
@@ -8,7 +8,7 @@ use crate::{
PartitionInfo,
};
-use super::PartitionFilter;
+use super::PostClassificationPartitionFilter;
#[derive(Debug)]
pub struct PossibleProgressFilter {
@@ -28,7 +28,7 @@ impl Display for PossibleProgressFilter {
}
#[async_trait]
-impl PartitionFilter for PossibleProgressFilter {
+impl PostClassificationPartitionFilter for PossibleProgressFilter {
async fn apply(
&self,
partition_info: &PartitionInfo,
|
f5e7c65fef4d73a1f6e76e55fdb3cc29d0743569
|
Dom Dwyer
|
2023-02-17 23:55:45
|
instrument ingester query response
|
Adds a QueryExec decorator that transparently injects instrumentation
into an Ingester query response stream.
This captures the wall-clock duration of time a query stream has taken
to be read to completion (or aborted) by the caller, faceted by
stream completed / dropped and batch error / all OK.
Also records the distribution of row, record batch, and partition count
per query to quantify the amount of data being read per query.
| null |
feat(metrics): instrument ingester query response
Adds a QueryExec decorator that transparently injects instrumentation
into an Ingester query response stream.
This captures the wall-clock duration of time a query stream has taken
to be read to completion (or aborted) by the caller, faceted by
stream completed / dropped and batch error / all OK.
Also records the distribution of row, record batch, and partition count
per query to quantify the amount of data being read per query.
|
diff --git a/ingester2/src/query/mod.rs b/ingester2/src/query/mod.rs
index 34262518cd..cb3f79b9ec 100644
--- a/ingester2/src/query/mod.rs
+++ b/ingester2/src/query/mod.rs
@@ -9,6 +9,7 @@ pub(crate) mod response;
// Instrumentation
pub(crate) mod exec_instrumentation;
+pub(crate) mod result_instrumentation;
pub(crate) mod tracing;
#[cfg(test)]
diff --git a/ingester2/src/query/result_instrumentation.rs b/ingester2/src/query/result_instrumentation.rs
new file mode 100644
index 0000000000..fba3ad98f4
--- /dev/null
+++ b/ingester2/src/query/result_instrumentation.rs
@@ -0,0 +1,1147 @@
+//! Instrumentation of query results streamed from a [`QueryExec`]
+//! implementation.
+//!
+//! The top-level [`QueryResultInstrumentation`] decorator implements the
+//! [`QueryExec`] trait, wrapping the response of the inner implementation with
+//! instrumentation.
+//!
+//! ```text
+//! β QueryResultInstrumentation ββββ
+//! β β
+//! β βββββββββββββββββββββ β
+//! βββββββββββ β Inner QueryExec β β
+//! β β βββββββββββββββββββββ β
+//! β βββββββββββββββββββββββββββββββββ
+//! Injects β
+//! β β
+//! β β
+//! βΌ β Observe β βΌ β β β β β β
+//! ββββββββββββββββββββ βββββββββββββββββββββ
+//! βQueryMetricContextββ β β€ β QueryResponse β β
+//! ββββββββββββββββββββ βββββββββββββββββββββ
+//! β β β β β β β β¬ β β β β β β
+//! β
+//! β β
+//! βΌ
+//! β β Observe ββ±β΄β²β β β β β β
+//! βββββββββββββββββββββ
+//! β β β β β β β βΆ β PartitionResponse β ββββββββββββββββ
+//! βββββββββββββββββββββ Injects
+//! β β β β β β β¬ β β β β β β β
+//! β βΌ
+//! β βββββββββββββββββββββ
+//! βΌ βBatchStreamRecorderβ
+//! β Observe ββ±β΄β²β β β β β β βββββββββββββββββββββ
+//! βββββββββββββββββββββ β
+//! β β RecordBatchStream β β β β β β β β β
+//! βββββββββββββββββββββ
+//! β β β β β β β β β β β β β
+//! ```
+//!
+//! The [`QueryMetricContext`] is injected into the [`QueryResponse`], recording
+//! the lifetime of the [`QueryResponse`] itself, and further injecting
+//! instances of [`BatchStreamRecorder`] into each [`PartitionResponse`] to
+//! observe the per-partition stream of [`RecordBatch`] that are yielded from
+//! it.
+
+use std::{
+ pin::Pin,
+ sync::{
+ atomic::{AtomicBool, AtomicUsize, Ordering},
+ Arc,
+ },
+ task::{Context, Poll},
+};
+
+use arrow::record_batch::RecordBatch;
+use async_trait::async_trait;
+use data_types::{NamespaceId, TableId};
+use datafusion::{
+ error::DataFusionError,
+ physical_plan::{RecordBatchStream, SendableRecordBatchStream},
+};
+use futures::Stream;
+use iox_time::{SystemProvider, Time, TimeProvider};
+use metric::{DurationHistogram, Metric, U64Histogram, U64HistogramOptions};
+use observability_deps::tracing::debug;
+use pin_project::{pin_project, pinned_drop};
+use trace::span::Span;
+
+use crate::query::{
+ partition_response::PartitionResponse,
+ response::{PartitionStream, QueryResponse},
+ QueryError, QueryExec,
+};
+
+/// A [`QueryExec`] decorator adding instrumentation to the [`QueryResponse`]
+/// returned by the inner implementation.
+///
+/// The wall-clock duration of time taken for the caller to consume or drop the
+/// query results is recorded, faceted by success/error and completion state
+/// (fully consumed all [`RecordBatch`], or dropped before the stream ended).
+///
+/// Additionally the distribution of row, partition and [`RecordBatch`] counts
+/// are recorded.
+#[derive(Debug, Clone)]
+pub(crate) struct QueryResultInstrumentation<T, P = SystemProvider> {
+ inner: T,
+ time_provider: P,
+
+ /// A histogram to capture the consume time for a stream that was entirely
+ /// consumed (yielded [`Poll::Ready(None)`]) without ever observing an
+ /// [`Err`].
+ completed_ok: DurationHistogram,
+
+ /// As above but the stream returned at least one [`Err`] item; the stream
+ /// was still consumed to completion.
+ completed_err: DurationHistogram,
+
+ /// Like [`Self::completed_ok`], but for a stream that was not consumed to
+ /// completion (dropped before returning [`Poll::Ready(None)`])]).
+ aborted_ok: DurationHistogram,
+ aborted_err: DurationHistogram,
+
+ // Histograms to capture the distribution of row/batch/partition
+ // counts per query at the end of the query.
+ row_hist: U64Histogram,
+ record_batch_hist: U64Histogram,
+ partition_hist: U64Histogram,
+}
+
+impl<T> QueryResultInstrumentation<T> {
+ pub(crate) fn new(inner: T, metrics: &metric::Registry) -> Self {
+ let duration: Metric<DurationHistogram> = metrics.register_metric(
+ "ingester_query_stream_duration",
+ "duration of time RPC clients take to stream results for a single query",
+ );
+
+ // A wide range of bucket values to capture the highly variable row
+ // count.
+ let row_hist: U64Histogram = metrics
+ .register_metric_with_options::<U64Histogram, _>(
+ "ingester_query_result_row",
+ "distribution of query result row count",
+ || {
+ U64HistogramOptions::new([
+ 1 << 5, // 32
+ 1 << 6, // 64
+ 1 << 7, // 128
+ 1 << 8, // 256
+ 1 << 9, // 512
+ 1 << 10, // 1,024
+ 1 << 11, // 2,048
+ 1 << 12, // 4,096
+ 1 << 13, // 8,192
+ 1 << 14, // 16,384
+ 1 << 15, // 32,768
+ 1 << 16, // 65,536
+ 1 << 17, // 131,072
+ 1 << 18, // 262,144
+ ])
+ },
+ )
+ .recorder(&[]);
+
+ let record_batch_hist: U64Histogram = metrics
+ .register_metric_with_options::<U64Histogram, _>(
+ "ingester_query_result_record_batch",
+ "distribution of query result record batch count",
+ || {
+ U64HistogramOptions::new([
+ 1 << 1, // 2
+ 1 << 2, // 4
+ 1 << 3, // 8
+ 1 << 4, // 16
+ 1 << 5, // 32
+ 1 << 6, // 64
+ 1 << 7, // 128
+ 1 << 8, // 256
+ ])
+ },
+ )
+ .recorder(&[]);
+
+ // And finally, the number of partitions
+ let partition_hist: U64Histogram = metrics
+ .register_metric_with_options::<U64Histogram, _>(
+ "ingester_query_result_partition",
+ "distribution of query result partition count",
+ || U64HistogramOptions::new([1, 2, 3, 4, 5]),
+ )
+ .recorder(&[]);
+
+ Self {
+ inner,
+ time_provider: Default::default(),
+ completed_ok: duration.recorder(&[("request", "complete"), ("has_error", "false")]),
+ completed_err: duration.recorder(&[("request", "complete"), ("has_error", "true")]),
+ aborted_ok: duration.recorder(&[("request", "incomplete"), ("has_error", "false")]),
+ aborted_err: duration.recorder(&[("request", "incomplete"), ("has_error", "true")]),
+ row_hist,
+ record_batch_hist,
+ partition_hist,
+ }
+ }
+}
+
+impl<T, P> QueryResultInstrumentation<T, P> {
+ #[cfg(test)]
+ fn with_time_provider<U>(self, time_provider: U) -> QueryResultInstrumentation<T, U>
+ where
+ U: TimeProvider,
+ {
+ QueryResultInstrumentation {
+ inner: self.inner,
+ time_provider,
+ completed_ok: self.completed_ok,
+ completed_err: self.completed_err,
+ aborted_ok: self.aborted_ok,
+ aborted_err: self.aborted_err,
+ row_hist: self.row_hist,
+ record_batch_hist: self.record_batch_hist,
+ partition_hist: self.partition_hist,
+ }
+ }
+}
+
+#[async_trait]
+impl<T, P> QueryExec for QueryResultInstrumentation<T, P>
+where
+ T: QueryExec<Response = QueryResponse>,
+ P: TimeProvider + Clone,
+{
+ type Response = QueryResponse;
+
+ async fn query_exec(
+ &self,
+ namespace_id: NamespaceId,
+ table_id: TableId,
+ columns: Vec<String>,
+ span: Option<Span>,
+ ) -> Result<Self::Response, QueryError> {
+ let started_at = self.time_provider.now();
+
+ let stream = self
+ .inner
+ .query_exec(namespace_id, table_id, columns, span)
+ .await?;
+
+ let stream = QueryMetricContext::new(
+ stream.into_partition_stream(),
+ started_at,
+ self.time_provider.clone(),
+ self.completed_ok.clone(),
+ self.completed_err.clone(),
+ self.aborted_ok.clone(),
+ self.aborted_err.clone(),
+ self.row_hist.clone(),
+ self.record_batch_hist.clone(),
+ self.partition_hist.clone(),
+ );
+
+ Ok(QueryResponse::new(PartitionStream::new(stream)))
+ }
+}
+
+/// A metric context for the lifetime of a [`QueryResponse`].
+///
+/// Once the last [`PartitionResponse`] is consumed to completion, this type is
+/// dropped and the metrics it has gathered are emitted at drop time.
+///
+/// This type is responsible for decorating all [`PartitionResponse`] yielded
+/// from the result stream with [`BatchStreamRecorder`] instances, in turn
+/// capturing the statistics of each [`RecordBatch`] in the
+/// [`PartitionResponse`].
+#[pin_project(PinnedDrop)]
+#[derive(Debug)]
+struct QueryMetricContext<S, P = SystemProvider>
+where
+ P: TimeProvider,
+{
+ time_provider: P,
+
+ /// The instrumented stream.
+ #[pin]
+ inner: S,
+
+ /// The metric state shared with child [`BatchStreamRecorder`] instances.
+ state: Arc<MetricState>,
+
+ /// The timestamp at which the read request began, inclusive of the work
+ /// required to acquire the inner stream (which may involve fetching all the
+ /// data if the result is only pretending to be a stream).
+ started_at: Time,
+ /// The timestamp at which the stream completed (yielding
+ /// [`Poll::Ready(None)`]).
+ ///
+ /// [`None`] if the stream has not yet completed.
+ completed_at: Option<Time>,
+
+ /// The running count of partitions yielded by this query.
+ partition_count: usize,
+
+ /// The latency histograms faceted by completion/error state.
+ completed_ok: DurationHistogram,
+ completed_err: DurationHistogram,
+ aborted_ok: DurationHistogram,
+ aborted_err: DurationHistogram,
+
+ /// Row/record batch/partition count distribution histograms.
+ row_hist: U64Histogram,
+ record_batch_hist: U64Histogram,
+ partition_hist: U64Histogram,
+}
+
+impl<S, P> QueryMetricContext<S, P>
+where
+ P: TimeProvider,
+{
+ #[allow(clippy::too_many_arguments)]
+ fn new(
+ stream: S,
+ started_at: Time,
+ time_provider: P,
+ completed_ok: DurationHistogram,
+ completed_err: DurationHistogram,
+ aborted_ok: DurationHistogram,
+ aborted_err: DurationHistogram,
+ row_hist: U64Histogram,
+ record_batch_hist: U64Histogram,
+ partition_hist: U64Histogram,
+ ) -> Self {
+ Self {
+ inner: stream,
+ time_provider,
+ started_at,
+ completed_at: None,
+ completed_ok,
+ completed_err,
+ aborted_ok,
+ aborted_err,
+ row_hist,
+ record_batch_hist,
+ partition_hist,
+ partition_count: 0,
+ state: Default::default(),
+ }
+ }
+}
+
+impl<S, P> Stream for QueryMetricContext<S, P>
+where
+ S: Stream<Item = PartitionResponse> + Send,
+ P: TimeProvider,
+{
+ type Item = S::Item;
+
+ fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
+ let this = self.project();
+
+ match this.inner.poll_next(cx) {
+ Poll::Ready(Some(p)) => {
+ // Instrument the RecordBatch stream in this partition.
+ *this.partition_count += 1;
+
+ // Extract all the fields of the PartitionResponse
+ let id = p.id();
+ let persist_count = p.completed_persistence_count();
+
+ // And wrap the underlying stream of RecordBatch for this
+ // partition with a metric observer.
+ let record_stream = p.into_record_batch_stream().map(|s| {
+ Box::pin(BatchStreamRecorder::new(s, Arc::clone(this.state)))
+ as SendableRecordBatchStream
+ });
+
+ Poll::Ready(Some(PartitionResponse::new(
+ record_stream,
+ id,
+ persist_count,
+ )))
+ }
+ Poll::Ready(None) => {
+ // Record the wall clock timestamp of the stream end.
+ *this.completed_at = Some(this.time_provider.now());
+ Poll::Ready(None)
+ }
+ Poll::Pending => Poll::Pending,
+ }
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.inner.size_hint()
+ }
+}
+
+#[pinned_drop]
+impl<S, P> PinnedDrop for QueryMetricContext<S, P>
+where
+ P: TimeProvider,
+{
+ fn drop(self: Pin<&mut Self>) {
+ // Record the captured metrics.
+ let did_observe_error = self.state.did_observe_error.load(Ordering::Relaxed);
+ let row_count = self.state.row_count.load(Ordering::Relaxed) as u64;
+ let record_batch_count = self.state.record_batch_count.load(Ordering::Relaxed) as u64;
+ let partition_count = self.partition_count;
+
+ // Record the row/record batch/partition counts for this query.
+ self.row_hist.record(row_count);
+ self.record_batch_hist.record(record_batch_count);
+ self.partition_hist.record(partition_count as _);
+
+ // Select the appropriate histogram based off of the completion & error
+ // state.
+ //
+ // If completed_at is None, the stream was aborted before completion.
+ let hist = match self.completed_at {
+ Some(_) if !did_observe_error => &self.completed_ok,
+ Some(_) => &self.completed_err,
+ None if !did_observe_error => &self.aborted_ok,
+ None => &self.aborted_err,
+ };
+
+ // Record the duration, either up to the time of stream completion, or
+ // now if the stream did not complete.
+ let duration = self
+ .completed_at
+ .unwrap_or_else(|| self.time_provider.now())
+ .checked_duration_since(self.started_at);
+ if let Some(d) = duration {
+ hist.record(d)
+ }
+
+ // Log a helpful debug message for query correlation purposes.
+ match self.completed_at {
+ Some(_) => debug!(
+ ?duration,
+ did_observe_error,
+ row_count,
+ record_batch_count,
+ partition_count,
+ "completed streaming query results",
+ ),
+ None => debug!(
+ ?duration,
+ did_observe_error,
+ row_count,
+ record_batch_count,
+ partition_count,
+ "aborted streaming query results",
+ ),
+ };
+ }
+}
+
+/// State shared between the parent [`QueryMetricContext`] and all of the child
+/// [`BatchStreamRecorder`] it has instantiated.
+#[derive(Debug, Default)]
+struct MetricState {
+ /// True if at least one [`Result`] yielded by this result stream so far has
+ /// been an [`Err`].
+ //
+ /// This is used to select the correct success/error histogram which records
+ /// the operation duration.
+ did_observe_error: AtomicBool,
+
+ /// Running counts of row, partition, and [`RecordBatch`]
+ /// returned for this query so far.
+ row_count: AtomicUsize,
+ record_batch_count: AtomicUsize,
+}
+
+/// Capture row/[`RecordBatch`]/error statistics.
+///
+/// Inspects each [`RecordBatch`] yielded in the result stream, scoped to a
+/// single [`PartitionResponse`].
+#[pin_project]
+struct BatchStreamRecorder {
+ #[pin]
+ inner: SendableRecordBatchStream,
+ shared_state: Arc<MetricState>,
+}
+
+impl BatchStreamRecorder {
+ fn new(stream: SendableRecordBatchStream, shared_state: Arc<MetricState>) -> Self {
+ Self {
+ inner: stream,
+ shared_state,
+ }
+ }
+}
+
+impl RecordBatchStream for BatchStreamRecorder {
+ fn schema(&self) -> arrow::datatypes::SchemaRef {
+ self.inner.schema()
+ }
+}
+
+impl Stream for BatchStreamRecorder {
+ type Item = Result<RecordBatch, DataFusionError>;
+
+ fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
+ let this = self.project();
+
+ let res = this.inner.poll_next(cx);
+ match &res {
+ Poll::Ready(Some(Ok(batch))) => {
+ // Record the count statistics in this batch.
+ this.shared_state
+ .row_count
+ .fetch_add(batch.num_rows(), Ordering::Relaxed);
+ this.shared_state
+ .record_batch_count
+ .fetch_add(1, Ordering::Relaxed);
+ }
+ Poll::Ready(Some(Err(_e))) => {
+ // Record that at least one poll returned an error.
+ this.shared_state
+ .did_observe_error
+ .store(true, Ordering::Relaxed);
+ }
+ Poll::Ready(None) => {}
+ Poll::Pending => {}
+ }
+
+ res
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ // Impl the default size_hint() so this wrapper doesn't mask the size
+ // hint from the inner stream, if any.
+ self.inner.size_hint()
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use std::{sync::Arc, time::Duration};
+
+ use crate::{make_batch, make_partition_stream, query::mock_query_exec::MockQueryExec};
+
+ use super::*;
+
+ use arrow::array::{Float32Array, Int64Array};
+ use data_types::PartitionId;
+ use datafusion::physical_plan::stream::RecordBatchStreamAdapter;
+ use futures::{stream, StreamExt};
+ use iox_time::MockProvider;
+ use metric::Attributes;
+
+ const NAMESPACE_ID: NamespaceId = NamespaceId::new(42);
+ const TABLE_ID: TableId = TableId::new(42);
+ const TIME_STEP: Duration = Duration::from_secs(42);
+
+ /// A concise helper to assert the value of a metric histogram, regardless
+ /// of underlying type.
+ macro_rules! assert_histogram {
+ (
+ $metrics:ident,
+ $hist:ty,
+ $name:literal,
+ $(labels = $attr:expr,)*
+ $(samples = $samples:expr,)*
+ $(sum = $sum:expr,)*
+ ) => {
+ // Default to an empty set of attributes if not specified.
+ #[allow(unused)]
+ let mut attr = None;
+ $(attr = Some($attr);)*
+ let attr = attr.unwrap_or_else(|| Attributes::from(&[]));
+
+ let hist = $metrics
+ .get_instrument::<Metric<$hist>>($name)
+ .expect("failed to find metric with provided name")
+ .get_observer(&attr)
+ .expect("failed to find metric with provided attributes")
+ .fetch();
+
+ $(assert_eq!(hist.sample_count(), $samples, "sample count mismatch");)*
+ $(assert_eq!(hist.total, $sum, "sum value mismatch");)*
+ };
+ }
+
+ /// A query against a table that has been persisted / no longer contains any
+ /// data (only metadata).
+ #[tokio::test]
+ async fn test_multi_partition_stream_no_batches() {
+ let metrics = metric::Registry::default();
+
+ // Construct a stream with no batches.
+ let stream = PartitionStream::new(stream::iter([PartitionResponse::new(
+ None,
+ PartitionId::new(42),
+ 42,
+ )]));
+
+ let mock_time = Arc::new(MockProvider::new(Time::MIN));
+ let mock_inner = MockQueryExec::default().with_result(Ok(QueryResponse::new(stream)));
+ let layer = QueryResultInstrumentation::new(mock_inner, &metrics)
+ .with_time_provider(Arc::clone(&mock_time));
+
+ let response = layer
+ .query_exec(NAMESPACE_ID, TABLE_ID, vec![], None)
+ .await
+ .expect("query should succeed");
+
+ // Now the response has been created, advance the clock
+ mock_time.inc(TIME_STEP);
+
+ // Drain the query results, moving past any errors, and collecting the
+ // final set of all Ok record batches for comparison.
+ let _batches = response
+ .into_record_batches()
+ .filter_map(|v| async { v.ok() })
+ .collect::<Vec<_>>()
+ .await;
+
+ assert_histogram!(
+ metrics,
+ U64Histogram,
+ "ingester_query_result_row",
+ samples = 1,
+ sum = 0,
+ );
+ assert_histogram!(
+ metrics,
+ U64Histogram,
+ "ingester_query_result_record_batch",
+ samples = 1,
+ sum = 0,
+ );
+ assert_histogram!(
+ metrics,
+ U64Histogram,
+ "ingester_query_result_partition",
+ samples = 1,
+ sum = 1, // A partition was yielded, but contained no data
+ );
+ assert_histogram!(
+ metrics,
+ DurationHistogram,
+ "ingester_query_stream_duration",
+ labels = Attributes::from(&[("request", "complete"), ("has_error", "false")]),
+ samples = 1,
+ sum = TIME_STEP,
+ );
+ assert_histogram!(
+ metrics,
+ DurationHistogram,
+ "ingester_query_stream_duration",
+ labels = Attributes::from(&[("request", "complete"), ("has_error", "true")]),
+ samples = 0,
+ );
+ assert_histogram!(
+ metrics,
+ DurationHistogram,
+ "ingester_query_stream_duration",
+ labels = Attributes::from(&[("request", "incomplete"), ("has_error", "false")]),
+ samples = 0,
+ );
+ assert_histogram!(
+ metrics,
+ DurationHistogram,
+ "ingester_query_stream_duration",
+ labels = Attributes::from(&[("request", "incomplete"), ("has_error", "true")]),
+ samples = 0,
+ );
+ }
+
+ /// A happy path test - a stream is initialised and read to completion.
+ ///
+ /// The response includes data from multiple partitions, with multiple
+ /// record batches.
+ #[tokio::test]
+ async fn test_multi_partition_stream_ok() {
+ let metrics = metric::Registry::default();
+
+ // Construct the set of partitions and their record batches
+ let stream = make_partition_stream!(
+ PartitionId::new(1) => [
+ make_batch!(
+ Int64Array("a" => vec![1, 2, 3, 4, 5]),
+ Float32Array("b" => vec![4.1, 4.2, 4.3, 4.4, 5.0]),
+ ),
+ make_batch!(
+ Int64Array("c" => vec![1, 2, 3, 4, 5]),
+ ),
+ ],
+ PartitionId::new(2) => [
+ make_batch!(
+ Float32Array("d" => vec![1.1]),
+ ),
+ ],
+ );
+
+ let mock_time = Arc::new(MockProvider::new(Time::MIN));
+ let mock_inner = MockQueryExec::default().with_result(Ok(QueryResponse::new(stream)));
+ let layer = QueryResultInstrumentation::new(mock_inner, &metrics)
+ .with_time_provider(Arc::clone(&mock_time));
+
+ let response = layer
+ .query_exec(NAMESPACE_ID, TABLE_ID, vec![], None)
+ .await
+ .expect("query should succeed");
+
+ // Now the response has been created, advance the clock
+ mock_time.inc(TIME_STEP);
+
+ // Drain the query results, moving past any errors, and collecting the
+ // final set of all Ok record batches for comparison.
+ let _batches = response
+ .into_record_batches()
+ .filter_map(|v| async { v.ok() })
+ .collect::<Vec<_>>()
+ .await;
+
+ assert_histogram!(
+ metrics,
+ U64Histogram,
+ "ingester_query_result_row",
+ samples = 1,
+ sum = 11, // 5 + 5 + 1
+ );
+ assert_histogram!(
+ metrics,
+ U64Histogram,
+ "ingester_query_result_record_batch",
+ samples = 1,
+ sum = 3,
+ );
+ assert_histogram!(
+ metrics,
+ U64Histogram,
+ "ingester_query_result_partition",
+ samples = 1,
+ sum = 2,
+ );
+ assert_histogram!(
+ metrics,
+ DurationHistogram,
+ "ingester_query_stream_duration",
+ labels = Attributes::from(&[("request", "complete"), ("has_error", "false")]),
+ samples = 1,
+ sum = TIME_STEP,
+ );
+ assert_histogram!(
+ metrics,
+ DurationHistogram,
+ "ingester_query_stream_duration",
+ labels = Attributes::from(&[("request", "complete"), ("has_error", "true")]),
+ samples = 0,
+ );
+ assert_histogram!(
+ metrics,
+ DurationHistogram,
+ "ingester_query_stream_duration",
+ labels = Attributes::from(&[("request", "incomplete"), ("has_error", "false")]),
+ samples = 0,
+ );
+ assert_histogram!(
+ metrics,
+ DurationHistogram,
+ "ingester_query_stream_duration",
+ labels = Attributes::from(&[("request", "incomplete"), ("has_error", "true")]),
+ samples = 0,
+ );
+ }
+
+ /// A query result which is dropped immediately does not record any
+ /// rows/batches/etc (the client did not see them) but DOES record the wall
+ /// clock duration between obtaining the query result and aborting the read.
+ #[tokio::test]
+ async fn test_multi_partition_stream_aborted_immediately() {
+ let metrics = metric::Registry::default();
+
+ // Construct the set of partitions and their record batches
+ let stream = make_partition_stream!(
+ PartitionId::new(1) => [
+ make_batch!(
+ Int64Array("a" => vec![1, 2, 3, 4, 5]),
+ Float32Array("b" => vec![4.1, 4.2, 4.3, 4.4, 5.0]),
+ ),
+ make_batch!(
+ Int64Array("c" => vec![1, 2, 3, 4, 5]),
+ ),
+ ],
+ PartitionId::new(2) => [
+ make_batch!(
+ Float32Array("d" => vec![1.1]),
+ ),
+ ],
+ );
+
+ let mock_time = Arc::new(MockProvider::new(Time::MIN));
+ let mock_inner = MockQueryExec::default().with_result(Ok(QueryResponse::new(stream)));
+ let layer = QueryResultInstrumentation::new(mock_inner, &metrics)
+ .with_time_provider(Arc::clone(&mock_time));
+
+ let response = layer
+ .query_exec(NAMESPACE_ID, TABLE_ID, vec![], None)
+ .await
+ .expect("query should succeed");
+
+ // Now the response has been created, advance the clock
+ mock_time.inc(TIME_STEP);
+
+ // Drop the response without reading it to completion (or at all,
+ // really...)
+ drop(response);
+
+ assert_histogram!(
+ metrics,
+ U64Histogram,
+ "ingester_query_result_row",
+ samples = 1,
+ sum = 0,
+ );
+ assert_histogram!(
+ metrics,
+ U64Histogram,
+ "ingester_query_result_record_batch",
+ samples = 1,
+ sum = 0,
+ );
+ assert_histogram!(
+ metrics,
+ U64Histogram,
+ "ingester_query_result_partition",
+ samples = 1,
+ sum = 0,
+ );
+ assert_histogram!(
+ metrics,
+ DurationHistogram,
+ "ingester_query_stream_duration",
+ labels = Attributes::from(&[("request", "complete"), ("has_error", "false")]),
+ samples = 0,
+ );
+ assert_histogram!(
+ metrics,
+ DurationHistogram,
+ "ingester_query_stream_duration",
+ labels = Attributes::from(&[("request", "complete"), ("has_error", "true")]),
+ samples = 0,
+ );
+ assert_histogram!(
+ metrics,
+ DurationHistogram,
+ "ingester_query_stream_duration",
+ labels = Attributes::from(&[("request", "incomplete"), ("has_error", "false")]),
+ samples = 1,
+ sum = TIME_STEP, // It was recorded as an incomplete request
+ );
+ assert_histogram!(
+ metrics,
+ DurationHistogram,
+ "ingester_query_stream_duration",
+ labels = Attributes::from(&[("request", "incomplete"), ("has_error", "true")]),
+ samples = 0,
+ );
+ }
+
+ /// A query result which is dropped after partially reading the data should
+ /// record rows/batches/etc (as many as the client saw) and record the wall
+ /// clock duration between obtaining the query result and aborting the read.
+ #[tokio::test]
+ async fn test_multi_partition_stream_aborted_after_read() {
+ let metrics = metric::Registry::default();
+
+ // Construct the set of partitions and their record batches
+ let stream = make_partition_stream!(
+ PartitionId::new(1) => [
+ make_batch!(
+ Int64Array("a" => vec![1, 2, 3, 4, 5]),
+ Float32Array("b" => vec![4.1, 4.2, 4.3, 4.4, 5.0]),
+ ),
+ make_batch!(
+ Int64Array("c" => vec![1, 2, 3, 4, 5]),
+ ),
+ ],
+ PartitionId::new(2) => [
+ make_batch!(
+ Float32Array("d" => vec![1.1]),
+ ),
+ ],
+ );
+
+ let mock_time = Arc::new(MockProvider::new(Time::MIN));
+ let mock_inner = MockQueryExec::default().with_result(Ok(QueryResponse::new(stream)));
+ let layer = QueryResultInstrumentation::new(mock_inner, &metrics)
+ .with_time_provider(Arc::clone(&mock_time));
+
+ let response = layer
+ .query_exec(NAMESPACE_ID, TABLE_ID, vec![], None)
+ .await
+ .expect("query should succeed");
+
+ // Now the response has been created, advance the clock
+ mock_time.inc(TIME_STEP);
+
+ let mut response = response.into_record_batches();
+ let got = response
+ .next()
+ .await
+ .expect("should yield first batch")
+ .expect("mock doesn't return error");
+ drop(response);
+
+ assert_histogram!(
+ metrics,
+ U64Histogram,
+ "ingester_query_result_row",
+ samples = 1,
+ sum = got.num_rows() as u64,
+ );
+ assert_histogram!(
+ metrics,
+ U64Histogram,
+ "ingester_query_result_record_batch",
+ samples = 1,
+ sum = 1,
+ );
+ assert_histogram!(
+ metrics,
+ U64Histogram,
+ "ingester_query_result_partition",
+ samples = 1,
+ sum = 1,
+ );
+ assert_histogram!(
+ metrics,
+ DurationHistogram,
+ "ingester_query_stream_duration",
+ labels = Attributes::from(&[("request", "complete"), ("has_error", "false")]),
+ samples = 0,
+ );
+ assert_histogram!(
+ metrics,
+ DurationHistogram,
+ "ingester_query_stream_duration",
+ labels = Attributes::from(&[("request", "complete"), ("has_error", "true")]),
+ samples = 0,
+ );
+ assert_histogram!(
+ metrics,
+ DurationHistogram,
+ "ingester_query_stream_duration",
+ labels = Attributes::from(&[("request", "incomplete"), ("has_error", "false")]),
+ samples = 1,
+ sum = TIME_STEP, // It was recorded as an incomplete request
+ );
+ assert_histogram!(
+ metrics,
+ DurationHistogram,
+ "ingester_query_stream_duration",
+ labels = Attributes::from(&[("request", "incomplete"), ("has_error", "true")]),
+ samples = 0,
+ );
+ }
+
+ /// A query result which is dropped when observing an error should record
+ /// the various count statistics from any yielded batches and categorise the
+ /// result as having observed an error.
+ #[tokio::test]
+ async fn test_multi_partition_stream_with_error_abort() {
+ let metrics = metric::Registry::default();
+
+ // Construct the set of partitions and their record batches
+ let (ok_batch, schema) = make_batch!(
+ Int64Array("c" => vec![1, 2, 3, 4, 5]),
+ );
+
+ let stream = Box::pin(RecordBatchStreamAdapter::new(
+ schema,
+ stream::iter([
+ Ok(ok_batch.clone()),
+ Err(DataFusionError::Internal("bananas".to_string())),
+ Ok(ok_batch),
+ ]),
+ )) as SendableRecordBatchStream;
+
+ let stream = PartitionStream::new(stream::iter([PartitionResponse::new(
+ Some(stream),
+ PartitionId::new(1),
+ 42,
+ )]));
+
+ let mock_time = Arc::new(MockProvider::new(Time::MIN));
+ let mock_inner = MockQueryExec::default().with_result(Ok(QueryResponse::new(stream)));
+ let layer = QueryResultInstrumentation::new(mock_inner, &metrics)
+ .with_time_provider(Arc::clone(&mock_time));
+
+ let response = layer
+ .query_exec(NAMESPACE_ID, TABLE_ID, vec![], None)
+ .await
+ .expect("query should succeed");
+
+ // Now the response has been created, advance the clock
+ mock_time.inc(TIME_STEP);
+
+ let mut response = response.into_record_batches();
+ let got = response
+ .next()
+ .await
+ .expect("should yield first batch")
+ .expect("mock doesn't return error");
+
+ response
+ .next()
+ .await
+ .expect("more results should be available")
+ .expect_err("this batch should be an error");
+
+ // Drop the rest of the batches after observing an error.
+ drop(response);
+
+ assert_histogram!(
+ metrics,
+ U64Histogram,
+ "ingester_query_result_row",
+ samples = 1,
+ sum = got.num_rows() as u64,
+ );
+ assert_histogram!(
+ metrics,
+ U64Histogram,
+ "ingester_query_result_record_batch",
+ samples = 1,
+ sum = 1,
+ );
+ assert_histogram!(
+ metrics,
+ U64Histogram,
+ "ingester_query_result_partition",
+ samples = 1,
+ sum = 1,
+ );
+ assert_histogram!(
+ metrics,
+ DurationHistogram,
+ "ingester_query_stream_duration",
+ labels = Attributes::from(&[("request", "complete"), ("has_error", "false")]),
+ samples = 0,
+ );
+ assert_histogram!(
+ metrics,
+ DurationHistogram,
+ "ingester_query_stream_duration",
+ labels = Attributes::from(&[("request", "complete"), ("has_error", "true")]),
+ samples = 0,
+ );
+ assert_histogram!(
+ metrics,
+ DurationHistogram,
+ "ingester_query_stream_duration",
+ labels = Attributes::from(&[("request", "incomplete"), ("has_error", "false")]),
+ samples = 0,
+ );
+ assert_histogram!(
+ metrics,
+ DurationHistogram,
+ "ingester_query_stream_duration",
+ labels = Attributes::from(&[("request", "incomplete"), ("has_error", "true")]),
+ samples = 1,
+ sum = TIME_STEP, // Recorded as an incomplete request with error
+ );
+ }
+
+ /// A query result which is consumed to completion even after observing an
+ /// error should be correctly catagorised.
+ #[tokio::test]
+ async fn test_multi_partition_stream_with_error_completion() {
+ let metrics = metric::Registry::default();
+
+ // Construct the set of partitions and their record batches
+ let (ok_batch, schema) = make_batch!(
+ Int64Array("c" => vec![1, 2, 3, 4, 5]),
+ );
+
+ let stream = Box::pin(RecordBatchStreamAdapter::new(
+ schema,
+ stream::iter([
+ Ok(ok_batch.clone()),
+ Err(DataFusionError::Internal("bananas".to_string())),
+ Ok(ok_batch),
+ ]),
+ )) as SendableRecordBatchStream;
+
+ let stream = PartitionStream::new(stream::iter([PartitionResponse::new(
+ Some(stream),
+ PartitionId::new(1),
+ 42,
+ )]));
+
+ let mock_time = Arc::new(MockProvider::new(Time::MIN));
+ let mock_inner = MockQueryExec::default().with_result(Ok(QueryResponse::new(stream)));
+ let layer = QueryResultInstrumentation::new(mock_inner, &metrics)
+ .with_time_provider(Arc::clone(&mock_time));
+
+ let response = layer
+ .query_exec(NAMESPACE_ID, TABLE_ID, vec![], None)
+ .await
+ .expect("query should succeed");
+
+ // Now the response has been created, advance the clock
+ mock_time.inc(TIME_STEP);
+
+ // Drain the query results, moving past any errors, and collecting the
+ // final set of all Ok record batches for comparison.
+ let _batches = response
+ .into_record_batches()
+ .filter_map(|v| async { v.ok() })
+ .collect::<Vec<_>>()
+ .await;
+
+ assert_histogram!(
+ metrics,
+ U64Histogram,
+ "ingester_query_result_row",
+ samples = 1,
+ sum = 10, // 5 + 5
+ );
+ assert_histogram!(
+ metrics,
+ U64Histogram,
+ "ingester_query_result_record_batch",
+ samples = 1,
+ sum = 2,
+ );
+ assert_histogram!(
+ metrics,
+ U64Histogram,
+ "ingester_query_result_partition",
+ samples = 1,
+ sum = 1,
+ );
+ assert_histogram!(
+ metrics,
+ DurationHistogram,
+ "ingester_query_stream_duration",
+ labels = Attributes::from(&[("request", "complete"), ("has_error", "false")]),
+ samples = 0,
+ );
+ assert_histogram!(
+ metrics,
+ DurationHistogram,
+ "ingester_query_stream_duration",
+ labels = Attributes::from(&[("request", "complete"), ("has_error", "true")]),
+ samples = 1,
+ sum = TIME_STEP, // Recorded as a complete request with error
+ );
+ assert_histogram!(
+ metrics,
+ DurationHistogram,
+ "ingester_query_stream_duration",
+ labels = Attributes::from(&[("request", "incomplete"), ("has_error", "false")]),
+ samples = 0,
+ );
+ assert_histogram!(
+ metrics,
+ DurationHistogram,
+ "ingester_query_stream_duration",
+ labels = Attributes::from(&[("request", "incomplete"), ("has_error", "true")]),
+ samples = 0,
+ );
+ }
+}
|
435499e9d79eb0f2584f08cbdf5f37d543dfd854
|
Dom Dwyer
|
2023-04-14 14:08:42
|
resolve Arc-wrapped PartitionData
|
Changes the PartitionResolver trait to return a ref-counted
PartitionData instance, instead of a plain PartitionData (which is then
wrapped in an Arc anyway).
This allows resolver implementations to return multiple references to
the same physical instance.
| null |
refactor: resolve Arc-wrapped PartitionData
Changes the PartitionResolver trait to return a ref-counted
PartitionData instance, instead of a plain PartitionData (which is then
wrapped in an Arc anyway).
This allows resolver implementations to return multiple references to
the same physical instance.
|
diff --git a/ingester2/src/buffer_tree/partition/resolver/cache.rs b/ingester2/src/buffer_tree/partition/resolver/cache.rs
index 9cc2c546d4..360d1c4a7f 100644
--- a/ingester2/src/buffer_tree/partition/resolver/cache.rs
+++ b/ingester2/src/buffer_tree/partition/resolver/cache.rs
@@ -167,7 +167,7 @@ where
table_id: TableId,
table_name: Arc<DeferredLoad<TableName>>,
transition_shard_id: ShardId,
- ) -> PartitionData {
+ ) -> Arc<Mutex<PartitionData>> {
// Use the cached PartitionKey instead of the caller's partition_key,
// instead preferring to reuse the already-shared Arc<str> in the cache.
@@ -188,7 +188,7 @@ where
// Use the returned partition key instead of the callers - this
// allows the backing str memory to be reused across all partitions
// using the same key!
- return PartitionData::new(
+ return Arc::new(Mutex::new(PartitionData::new(
partition_id,
key,
namespace_id,
@@ -197,7 +197,7 @@ where
table_name,
SortKeyState::Deferred(Arc::new(sort_key_resolver)),
transition_shard_id,
- );
+ )));
}
debug!(%table_id, %partition_key, "partition cache miss");
@@ -218,6 +218,9 @@ where
#[cfg(test)]
mod tests {
+ // Harmless in tests - saves a bunch of extra vars.
+ #![allow(clippy::await_holding_lock)]
+
use data_types::ShardId;
use iox_catalog::mem::MemCatalog;
@@ -282,10 +285,10 @@ mod tests {
)
.await;
- assert_eq!(got.partition_id(), PARTITION_ID);
- assert_eq!(got.table_id(), TABLE_ID);
- assert_eq!(&**got.table_name().get().await, TABLE_NAME);
- assert_eq!(&**got.namespace_name().get().await, NAMESPACE_NAME);
+ assert_eq!(got.lock().partition_id(), PARTITION_ID);
+ assert_eq!(got.lock().table_id(), TABLE_ID);
+ assert_eq!(&**got.lock().table_name().get().await, TABLE_NAME);
+ assert_eq!(&**got.lock().namespace_name().get().await, NAMESPACE_NAME);
assert!(cache.inner.is_empty());
}
@@ -322,11 +325,14 @@ mod tests {
)
.await;
- assert_eq!(got.partition_id(), PARTITION_ID);
- assert_eq!(got.table_id(), TABLE_ID);
- assert_eq!(&**got.table_name().get().await, TABLE_NAME);
- assert_eq!(&**got.namespace_name().get().await, NAMESPACE_NAME);
- assert_eq!(*got.partition_key(), PartitionKey::from(PARTITION_KEY));
+ assert_eq!(got.lock().partition_id(), PARTITION_ID);
+ assert_eq!(got.lock().table_id(), TABLE_ID);
+ assert_eq!(&**got.lock().table_name().get().await, TABLE_NAME);
+ assert_eq!(&**got.lock().namespace_name().get().await, NAMESPACE_NAME);
+ assert_eq!(
+ *got.lock().partition_key(),
+ PartitionKey::from(PARTITION_KEY)
+ );
// The cache should have been cleaned up as it was consumed.
assert!(cache.entries.lock().is_empty());
@@ -334,10 +340,10 @@ mod tests {
// Assert the partition key from the cache was used for the lifetime of
// the partition, so that it is shared with the cache + other partitions
// that share the same partition key across all tables.
- assert!(got.partition_key().ptr_eq(&stored_partition_key));
+ assert!(got.lock().partition_key().ptr_eq(&stored_partition_key));
// It does not use the short-lived caller's partition key (derived from
// the DML op it is processing).
- assert!(!got.partition_key().ptr_eq(&callers_partition_key));
+ assert!(!got.lock().partition_key().ptr_eq(&callers_partition_key));
}
#[tokio::test]
@@ -385,9 +391,9 @@ mod tests {
)
.await;
- assert_eq!(got.partition_id(), other_key_id);
- assert_eq!(got.table_id(), TABLE_ID);
- assert_eq!(&**got.table_name().get().await, TABLE_NAME);
+ assert_eq!(got.lock().partition_id(), other_key_id);
+ assert_eq!(got.lock().table_id(), TABLE_ID);
+ assert_eq!(&**got.lock().table_name().get().await, TABLE_NAME);
}
#[tokio::test]
@@ -434,8 +440,8 @@ mod tests {
)
.await;
- assert_eq!(got.partition_id(), PARTITION_ID);
- assert_eq!(got.table_id(), other_table);
- assert_eq!(&**got.table_name().get().await, TABLE_NAME);
+ assert_eq!(got.lock().partition_id(), PARTITION_ID);
+ assert_eq!(got.lock().table_id(), other_table);
+ assert_eq!(&**got.lock().table_name().get().await, TABLE_NAME);
}
}
diff --git a/ingester2/src/buffer_tree/partition/resolver/catalog.rs b/ingester2/src/buffer_tree/partition/resolver/catalog.rs
index c105fda28a..e00103b877 100644
--- a/ingester2/src/buffer_tree/partition/resolver/catalog.rs
+++ b/ingester2/src/buffer_tree/partition/resolver/catalog.rs
@@ -8,6 +8,7 @@ use backoff::{Backoff, BackoffConfig};
use data_types::{NamespaceId, Partition, PartitionKey, ShardId, TableId};
use iox_catalog::interface::Catalog;
use observability_deps::tracing::debug;
+use parking_lot::Mutex;
use super::r#trait::PartitionProvider;
use crate::{
@@ -63,7 +64,7 @@ impl PartitionProvider for CatalogPartitionResolver {
table_id: TableId,
table_name: Arc<DeferredLoad<TableName>>,
transition_shard_id: ShardId,
- ) -> PartitionData {
+ ) -> Arc<Mutex<PartitionData>> {
debug!(
%partition_key,
%table_id,
@@ -78,7 +79,7 @@ impl PartitionProvider for CatalogPartitionResolver {
.await
.expect("retry forever");
- PartitionData::new(
+ Arc::new(Mutex::new(PartitionData::new(
p.id,
// Use the caller's partition key instance, as it MAY be shared with
// other instance, but the instance returned from the catalog
@@ -90,12 +91,15 @@ impl PartitionProvider for CatalogPartitionResolver {
table_name,
SortKeyState::Provided(p.sort_key()),
transition_shard_id,
- )
+ )))
}
}
#[cfg(test)]
mod tests {
+ // Harmless in tests - saves a bunch of extra vars.
+ #![allow(clippy::await_holding_lock)]
+
use std::{sync::Arc, time::Duration};
use assert_matches::assert_matches;
@@ -157,18 +161,18 @@ mod tests {
.await;
// Ensure the table name is available.
- let _ = got.table_name().get().await;
+ let _ = got.lock().table_name().get().await;
- assert_eq!(got.namespace_id(), namespace_id);
- assert_eq!(got.table_name().to_string(), table_name.to_string());
- assert_matches!(got.sort_key(), SortKeyState::Provided(None));
- assert!(got.partition_key.ptr_eq(&callers_partition_key));
+ assert_eq!(got.lock().namespace_id(), namespace_id);
+ assert_eq!(got.lock().table_name().to_string(), table_name.to_string());
+ assert_matches!(got.lock().sort_key(), SortKeyState::Provided(None));
+ assert!(got.lock().partition_key.ptr_eq(&callers_partition_key));
let got = catalog
.repositories()
.await
.partitions()
- .get_by_id(got.partition_id)
+ .get_by_id(got.lock().partition_id)
.await
.unwrap()
.expect("partition not created");
diff --git a/ingester2/src/buffer_tree/partition/resolver/mock.rs b/ingester2/src/buffer_tree/partition/resolver/mock.rs
index 9b25d8554e..a50940ce10 100644
--- a/ingester2/src/buffer_tree/partition/resolver/mock.rs
+++ b/ingester2/src/buffer_tree/partition/resolver/mock.rs
@@ -55,7 +55,7 @@ impl PartitionProvider for MockPartitionProvider {
table_id: TableId,
table_name: Arc<DeferredLoad<TableName>>,
_transition_shard_id: ShardId,
- ) -> PartitionData {
+ ) -> Arc<Mutex<PartitionData>> {
let p = self
.partitions
.lock()
@@ -67,6 +67,6 @@ impl PartitionProvider for MockPartitionProvider {
assert_eq!(p.namespace_id(), namespace_id);
assert_eq!(p.namespace_name().to_string(), namespace_name.to_string());
assert_eq!(p.table_name().to_string(), table_name.to_string());
- p
+ Arc::new(Mutex::new(p))
}
}
diff --git a/ingester2/src/buffer_tree/partition/resolver/trait.rs b/ingester2/src/buffer_tree/partition/resolver/trait.rs
index 80322380b9..a40f1fd4dc 100644
--- a/ingester2/src/buffer_tree/partition/resolver/trait.rs
+++ b/ingester2/src/buffer_tree/partition/resolver/trait.rs
@@ -2,6 +2,7 @@ use std::{fmt::Debug, sync::Arc};
use async_trait::async_trait;
use data_types::{NamespaceId, PartitionKey, ShardId, TableId};
+use parking_lot::Mutex;
use crate::{
buffer_tree::{namespace::NamespaceName, partition::PartitionData, table::TableName},
@@ -25,7 +26,7 @@ pub(crate) trait PartitionProvider: Send + Sync + Debug {
table_id: TableId,
table_name: Arc<DeferredLoad<TableName>>,
transition_shard_id: ShardId,
- ) -> PartitionData;
+ ) -> Arc<Mutex<PartitionData>>;
}
#[async_trait]
@@ -41,7 +42,7 @@ where
table_id: TableId,
table_name: Arc<DeferredLoad<TableName>>,
transition_shard_id: ShardId,
- ) -> PartitionData {
+ ) -> Arc<Mutex<PartitionData>> {
(**self)
.get_partition(
partition_key,
@@ -101,9 +102,12 @@ mod tests {
TRANSITION_SHARD_ID,
)
.await;
- assert_eq!(got.partition_id(), partition);
- assert_eq!(got.namespace_id(), namespace_id);
- assert_eq!(got.namespace_name().to_string(), namespace_name.to_string());
- assert_eq!(got.table_name().to_string(), table_name.to_string());
+ assert_eq!(got.lock().partition_id(), partition);
+ assert_eq!(got.lock().namespace_id(), namespace_id);
+ assert_eq!(
+ got.lock().namespace_name().to_string(),
+ namespace_name.to_string()
+ );
+ assert_eq!(got.lock().table_name().to_string(), table_name.to_string());
}
}
diff --git a/ingester2/src/buffer_tree/table.rs b/ingester2/src/buffer_tree/table.rs
index cda86a311c..0a07ce8b88 100644
--- a/ingester2/src/buffer_tree/table.rs
+++ b/ingester2/src/buffer_tree/table.rs
@@ -183,8 +183,7 @@ where
//
// This MAY return a different instance than `p` if another
// thread has already initialised the partition.
- self.partition_data
- .get_or_insert_with(&partition_key, || Arc::new(Mutex::new(p)))
+ self.partition_data.get_or_insert_with(&partition_key, || p)
}
};
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.