Commit Hash
stringlengths 40
40
| Author
stringclasses 38
values | Date
stringlengths 19
19
| Description
stringlengths 8
113
| Body
stringlengths 10
22.2k
| Footers
stringclasses 56
values | Commit Message
stringlengths 28
22.3k
| Git Diff
stringlengths 140
3.61M
⌀ |
|---|---|---|---|---|---|---|---|
8bcc7522d06e023cb92e63cd831a408b270c6be3
|
Paul Dix
|
2024-08-09 08:46:35
|
Add last cache create/delete to WAL (#25233)
|
* feat: Add last cache create/delete to WAL
This moves the LastCacheDefinition into the WAL so that it can be serialized there. This ended up being a pretty large refactor to get the last cache creation to work through the WAL.
I think I also stumbled on a bug where the last cache wasn't getting initialized from the catalog on reboot so that it wouldn't actually end up caching values. The refactored last cache persistence test in write_buffer/mod.rs surfaced this.
Finally, I also had to update the WAL so that it would persist if there were only catalog updates and no writes.
Fixes #25203
* fix: typos
| null |
feat: Add last cache create/delete to WAL (#25233)
* feat: Add last cache create/delete to WAL
This moves the LastCacheDefinition into the WAL so that it can be serialized there. This ended up being a pretty large refactor to get the last cache creation to work through the WAL.
I think I also stumbled on a bug where the last cache wasn't getting initialized from the catalog on reboot so that it wouldn't actually end up caching values. The refactored last cache persistence test in write_buffer/mod.rs surfaced this.
Finally, I also had to update the WAL so that it would persist if there were only catalog updates and no writes.
Fixes #25203
* fix: typos
|
diff --git a/influxdb3_catalog/src/catalog.rs b/influxdb3_catalog/src/catalog.rs
index c6b999b3fc..46ba356c4b 100644
--- a/influxdb3_catalog/src/catalog.rs
+++ b/influxdb3_catalog/src/catalog.rs
@@ -1,6 +1,6 @@
//! Implementation of the Catalog that sits entirely in memory.
-use influxdb3_wal::{CatalogBatch, CatalogOp};
+use influxdb3_wal::{CatalogBatch, CatalogOp, LastCacheDefinition};
use influxdb_line_protocol::FieldValue;
use observability_deps::tracing::info;
use parking_lot::RwLock;
@@ -32,9 +32,6 @@ pub enum Error {
Catalog::NUM_DBS_LIMIT
)]
TooManyDbs,
-
- #[error("last cache size must be from 1 to 10")]
- InvalidLastCacheSize,
}
pub type Result<T, E = Error> = std::result::Result<T, E>;
@@ -337,6 +334,25 @@ impl DatabaseSchema {
CatalogOp::CreateDatabase(_) => {
// Do nothing
}
+ CatalogOp::CreateLastCache(definition) => {
+ let table_name: Arc<str> = definition.table.as_str().into();
+ let table = tables.get_mut(table_name.as_ref());
+ match table {
+ Some(table) => {
+ table
+ .last_caches
+ .insert(definition.name.clone(), definition.clone());
+ }
+ None => panic!("table must exist before last cache creation"),
+ }
+ }
+ CatalogOp::DeleteLastCache(definition) => {
+ let table_name: Arc<str> = definition.table.as_str().into();
+ let table = tables.get_mut(table_name.as_ref());
+ if let Some(table) = table {
+ table.last_caches.remove(&definition.name);
+ }
+ }
}
}
@@ -503,129 +519,6 @@ impl TableDefinition {
}
}
-/// Defines a last cache in a given table and database
-#[derive(Debug, Serialize, Deserialize, Eq, PartialEq, Clone)]
-pub struct LastCacheDefinition {
- /// The table name the cache is associated with
- pub table: String,
- /// Given name of the cache
- pub name: String,
- /// Columns intended to be used as predicates in the cache
- pub key_columns: Vec<String>,
- /// Columns that store values in the cache
- pub value_columns: LastCacheValueColumnsDef,
- /// The number of last values to hold in the cache
- pub count: LastCacheSize,
- /// The time-to-live (TTL) in seconds for entries in the cache
- pub ttl: u64,
-}
-
-impl LastCacheDefinition {
- /// Create a new [`LastCacheDefinition`] with explicit value columns
- pub fn new_with_explicit_value_columns(
- table: impl Into<String>,
- name: impl Into<String>,
- key_columns: impl IntoIterator<Item: Into<String>>,
- value_columns: impl IntoIterator<Item: Into<String>>,
- count: usize,
- ttl: u64,
- ) -> Result<Self, Error> {
- Ok(Self {
- table: table.into(),
- name: name.into(),
- key_columns: key_columns.into_iter().map(Into::into).collect(),
- value_columns: LastCacheValueColumnsDef::Explicit {
- columns: value_columns.into_iter().map(Into::into).collect(),
- },
- count: count.try_into()?,
- ttl,
- })
- }
-
- /// Create a new [`LastCacheDefinition`] with explicit value columns
- pub fn new_all_non_key_value_columns(
- table: impl Into<String>,
- name: impl Into<String>,
- key_columns: impl IntoIterator<Item: Into<String>>,
- count: usize,
- ttl: u64,
- ) -> Result<Self, Error> {
- Ok(Self {
- table: table.into(),
- name: name.into(),
- key_columns: key_columns.into_iter().map(Into::into).collect(),
- value_columns: LastCacheValueColumnsDef::AllNonKeyColumns,
- count: count.try_into()?,
- ttl,
- })
- }
-}
-
-/// A last cache will either store values for an explicit set of columns, or will accept all
-/// non-key columns
-#[derive(Debug, Serialize, Deserialize, Eq, PartialEq, Clone)]
-#[serde(tag = "type", rename_all = "snake_case")]
-pub enum LastCacheValueColumnsDef {
- /// Explicit list of column names
- Explicit { columns: Vec<String> },
- /// Stores all non-key columns
- AllNonKeyColumns,
-}
-
-/// The maximum allowed size for a last cache
-pub const LAST_CACHE_MAX_SIZE: usize = 10;
-
-/// The size of the last cache
-///
-/// Must be between 1 and [`LAST_CACHE_MAX_SIZE`]
-#[derive(Debug, Serialize, Deserialize, Eq, PartialEq, Clone, Copy)]
-pub struct LastCacheSize(usize);
-
-impl LastCacheSize {
- pub fn new(size: usize) -> Result<Self, Error> {
- if size == 0 || size > LAST_CACHE_MAX_SIZE {
- Err(Error::InvalidLastCacheSize)
- } else {
- Ok(Self(size))
- }
- }
-}
-
-impl TryFrom<usize> for LastCacheSize {
- type Error = Error;
-
- fn try_from(value: usize) -> Result<Self, Self::Error> {
- Self::new(value)
- }
-}
-
-impl From<LastCacheSize> for usize {
- fn from(value: LastCacheSize) -> Self {
- value.0
- }
-}
-
-impl From<LastCacheSize> for u64 {
- fn from(value: LastCacheSize) -> Self {
- value
- .0
- .try_into()
- .expect("usize fits into a 64 bit unsigned integer")
- }
-}
-
-impl PartialEq<usize> for LastCacheSize {
- fn eq(&self, other: &usize) -> bool {
- self.0.eq(other)
- }
-}
-
-impl PartialEq<LastCacheSize> for usize {
- fn eq(&self, other: &LastCacheSize) -> bool {
- self.eq(&other.0)
- }
-}
-
pub fn influx_column_type_from_field_value(fv: &FieldValue<'_>) -> InfluxColumnType {
match fv {
FieldValue::I64(_) => InfluxColumnType::Field(InfluxFieldType::Integer),
diff --git a/influxdb3_catalog/src/serialize.rs b/influxdb3_catalog/src/serialize.rs
index 2e1848703e..635762e4a7 100644
--- a/influxdb3_catalog/src/serialize.rs
+++ b/influxdb3_catalog/src/serialize.rs
@@ -1,10 +1,10 @@
+use crate::catalog::TableDefinition;
use arrow::datatypes::DataType as ArrowDataType;
+use influxdb3_wal::{LastCacheDefinition, LastCacheValueColumnsDef};
use schema::{InfluxColumnType, SchemaBuilder};
use serde::{Deserialize, Serialize};
use std::collections::BTreeMap;
-use crate::catalog::{LastCacheDefinition, LastCacheValueColumnsDef, TableDefinition};
-
impl Serialize for TableDefinition {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
diff --git a/influxdb3_server/src/http.rs b/influxdb3_server/src/http.rs
index 7524c41059..5b4f45e2f0 100644
--- a/influxdb3_server/src/http.rs
+++ b/influxdb3_server/src/http.rs
@@ -20,8 +20,9 @@ use hyper::header::CONTENT_TYPE;
use hyper::http::HeaderValue;
use hyper::HeaderMap;
use hyper::{Body, Method, Request, Response, StatusCode};
-use influxdb3_catalog::catalog::{Error as CatalogError, LastCacheDefinition};
+use influxdb3_catalog::catalog::Error as CatalogError;
use influxdb3_process::{INFLUXDB3_GIT_HASH_SHORT, INFLUXDB3_VERSION};
+use influxdb3_wal::LastCacheDefinition;
use influxdb3_write::last_cache;
use influxdb3_write::persister::TrackedMemoryArrowWriter;
use influxdb3_write::write_buffer::Error as WriteBufferError;
diff --git a/influxdb3_server/src/system_tables/last_caches.rs b/influxdb3_server/src/system_tables/last_caches.rs
index 627e4a0607..170dbc6aa1 100644
--- a/influxdb3_server/src/system_tables/last_caches.rs
+++ b/influxdb3_server/src/system_tables/last_caches.rs
@@ -4,7 +4,7 @@ use arrow::array::{GenericListBuilder, StringBuilder};
use arrow_array::{ArrayRef, RecordBatch, StringArray, UInt64Array};
use arrow_schema::{DataType, Field, Schema, SchemaRef};
use datafusion::{error::DataFusionError, logical_expr::Expr};
-use influxdb3_catalog::catalog::{LastCacheDefinition, LastCacheValueColumnsDef};
+use influxdb3_wal::{LastCacheDefinition, LastCacheValueColumnsDef};
use influxdb3_write::last_cache::LastCacheProvider;
use iox_system_tables::IoxSystemTable;
diff --git a/influxdb3_wal/src/lib.rs b/influxdb3_wal/src/lib.rs
index 58627e6c00..2d20474c3d 100644
--- a/influxdb3_wal/src/lib.rs
+++ b/influxdb3_wal/src/lib.rs
@@ -44,6 +44,9 @@ pub enum Error {
#[error("invalid level 0 duration {0}. Must be one of 1m, 5m, 10m")]
InvalidLevel0Duration(String),
+
+ #[error("last cache size must be from 1 to 10")]
+ InvalidLastCacheSize,
}
pub type Result<T, E = Error> = std::result::Result<T, E>;
@@ -203,6 +206,7 @@ pub enum WalOp {
#[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)]
pub struct CatalogBatch {
pub database_name: Arc<str>,
+ pub time_ns: i64,
pub ops: Vec<CatalogOp>,
}
@@ -211,6 +215,8 @@ pub enum CatalogOp {
CreateDatabase(DatabaseDefinition),
CreateTable(TableDefinition),
AddFields(FieldAdditions),
+ CreateLastCache(LastCacheDefinition),
+ DeleteLastCache(LastCacheDelete),
}
#[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)]
@@ -281,6 +287,135 @@ impl From<FieldDataType> for InfluxColumnType {
}
}
+/// Defines a last cache in a given table and database
+#[derive(Debug, Serialize, Deserialize, Eq, PartialEq, Clone)]
+pub struct LastCacheDefinition {
+ /// The table name the cache is associated with
+ pub table: String,
+ /// Given name of the cache
+ pub name: String,
+ /// Columns intended to be used as predicates in the cache
+ pub key_columns: Vec<String>,
+ /// Columns that store values in the cache
+ pub value_columns: LastCacheValueColumnsDef,
+ /// The number of last values to hold in the cache
+ pub count: LastCacheSize,
+ /// The time-to-live (TTL) in seconds for entries in the cache
+ pub ttl: u64,
+}
+
+impl LastCacheDefinition {
+ /// Create a new [`LastCacheDefinition`] with explicit value columns
+ pub fn new_with_explicit_value_columns(
+ table: impl Into<String>,
+ name: impl Into<String>,
+ key_columns: impl IntoIterator<Item: Into<String>>,
+ value_columns: impl IntoIterator<Item: Into<String>>,
+ count: usize,
+ ttl: u64,
+ ) -> Result<Self, Error> {
+ Ok(Self {
+ table: table.into(),
+ name: name.into(),
+ key_columns: key_columns.into_iter().map(Into::into).collect(),
+ value_columns: LastCacheValueColumnsDef::Explicit {
+ columns: value_columns.into_iter().map(Into::into).collect(),
+ },
+ count: count.try_into()?,
+ ttl,
+ })
+ }
+
+ /// Create a new [`LastCacheDefinition`] with explicit value columns
+ pub fn new_all_non_key_value_columns(
+ table: impl Into<String>,
+ name: impl Into<String>,
+ key_columns: impl IntoIterator<Item: Into<String>>,
+ count: usize,
+ ttl: u64,
+ ) -> Result<Self, Error> {
+ Ok(Self {
+ table: table.into(),
+ name: name.into(),
+ key_columns: key_columns.into_iter().map(Into::into).collect(),
+ value_columns: LastCacheValueColumnsDef::AllNonKeyColumns,
+ count: count.try_into()?,
+ ttl,
+ })
+ }
+}
+
+/// A last cache will either store values for an explicit set of columns, or will accept all
+/// non-key columns
+#[derive(Debug, Serialize, Deserialize, Eq, PartialEq, Clone)]
+#[serde(tag = "type", rename_all = "snake_case")]
+pub enum LastCacheValueColumnsDef {
+ /// Explicit list of column names
+ Explicit { columns: Vec<String> },
+ /// Stores all non-key columns
+ AllNonKeyColumns,
+}
+
+/// The maximum allowed size for a last cache
+pub const LAST_CACHE_MAX_SIZE: usize = 10;
+
+/// The size of the last cache
+///
+/// Must be between 1 and [`LAST_CACHE_MAX_SIZE`]
+#[derive(Debug, Serialize, Deserialize, Eq, PartialEq, Clone, Copy)]
+pub struct LastCacheSize(usize);
+
+impl LastCacheSize {
+ pub fn new(size: usize) -> Result<Self, Error> {
+ if size == 0 || size > LAST_CACHE_MAX_SIZE {
+ Err(Error::InvalidLastCacheSize)
+ } else {
+ Ok(Self(size))
+ }
+ }
+}
+
+impl TryFrom<usize> for LastCacheSize {
+ type Error = Error;
+
+ fn try_from(value: usize) -> Result<Self, Self::Error> {
+ Self::new(value)
+ }
+}
+
+impl From<LastCacheSize> for usize {
+ fn from(value: LastCacheSize) -> Self {
+ value.0
+ }
+}
+
+impl From<LastCacheSize> for u64 {
+ fn from(value: LastCacheSize) -> Self {
+ value
+ .0
+ .try_into()
+ .expect("usize fits into a 64 bit unsigned integer")
+ }
+}
+
+impl PartialEq<usize> for LastCacheSize {
+ fn eq(&self, other: &usize) -> bool {
+ self.0.eq(other)
+ }
+}
+
+impl PartialEq<LastCacheSize> for usize {
+ fn eq(&self, other: &LastCacheSize) -> bool {
+ self.eq(&other.0)
+ }
+}
+
+#[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)]
+pub struct LastCacheDelete {
+ pub table: String,
+ pub name: String,
+}
+
#[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)]
pub struct WriteBatch {
pub database_name: Arc<str>,
diff --git a/influxdb3_wal/src/object_store.rs b/influxdb3_wal/src/object_store.rs
index 4b1805202d..b30c5b010d 100644
--- a/influxdb3_wal/src/object_store.rs
+++ b/influxdb3_wal/src/object_store.rs
@@ -467,7 +467,7 @@ struct WalBuffer {
impl WalBuffer {
fn is_empty(&self) -> bool {
- self.database_to_write_batch.is_empty()
+ self.database_to_write_batch.is_empty() && self.catalog_batches.is_empty()
}
}
@@ -537,6 +537,11 @@ impl WalBuffer {
max_timestamp_ns = max_timestamp_ns.max(write_batch.max_time_ns);
}
+ for catalog_batch in &self.catalog_batches {
+ min_timestamp_ns = min_timestamp_ns.min(catalog_batch.time_ns);
+ max_timestamp_ns = max_timestamp_ns.max(catalog_batch.time_ns);
+ }
+
// have the catalog ops come before any writes in ordering
let mut ops =
Vec::with_capacity(self.database_to_write_batch.len() + self.catalog_batches.len());
diff --git a/influxdb3_write/src/last_cache/mod.rs b/influxdb3_write/src/last_cache/mod.rs
index 95928efc99..776809c4de 100644
--- a/influxdb3_write/src/last_cache/mod.rs
+++ b/influxdb3_write/src/last_cache/mod.rs
@@ -4,6 +4,7 @@ use std::{
time::{Duration, Instant},
};
+use arrow::datatypes::SchemaRef;
use arrow::{
array::{
new_null_array, ArrayRef, BooleanBuilder, Float64Builder, GenericByteDictionaryBuilder,
@@ -22,8 +23,11 @@ use datafusion::{
};
use hashbrown::{HashMap, HashSet};
use indexmap::{IndexMap, IndexSet};
-use influxdb3_catalog::catalog::{LastCacheDefinition, LastCacheSize, LastCacheValueColumnsDef};
-use influxdb3_wal::{Field, FieldData, Row, WalContents, WalOp};
+use influxdb3_catalog::catalog::InnerCatalog;
+use influxdb3_wal::{
+ Field, FieldData, LastCacheDefinition, LastCacheSize, LastCacheValueColumnsDef, Row,
+ WalContents, WalOp,
+};
use iox_time::Time;
use parking_lot::RwLock;
use schema::{InfluxColumnType, InfluxFieldType, Schema, TIME_COLUMN_NAME};
@@ -113,10 +117,7 @@ impl LastCacheProvider {
}
/// Initialize a [`LastCacheProvider`] from a [`InnerCatalog`]
- #[cfg(test)]
- pub(crate) fn new_from_catalog(
- catalog: &influxdb3_catalog::catalog::InnerCatalog,
- ) -> Result<Self, Error> {
+ pub(crate) fn new_from_catalog(catalog: &InnerCatalog) -> Result<Self, Error> {
let provider = LastCacheProvider::new();
for db_schema in catalog.databases() {
for tbl_def in db_schema.tables() {
@@ -244,59 +245,15 @@ impl LastCacheProvider {
format!("{tbl_name}_{keys}_last_cache", keys = key_columns.join("_"))
});
- let (value_columns, accept_new_fields) = if let Some(mut vals) = value_columns {
- // if value columns are specified, check that they are present in the table schema
- for name in vals.iter() {
- if schema.field_by_name(name).is_none() {
- return Err(Error::ValueColumnDoesNotExist {
- column_name: name.into(),
- });
- }
- }
- // double-check that time column is included
- let time_col = TIME_COLUMN_NAME.to_string();
- if !vals.contains(&time_col) {
- vals.push(time_col);
- }
- (vals, false)
- } else {
- // default to all non-key columns
- (
- schema
- .iter()
- .filter_map(|(_, f)| {
- if key_columns.contains(f.name()) {
- None
- } else {
- Some(f.name().to_string())
- }
- })
- .collect::<Vec<String>>(),
- true,
- )
+ let accept_new_fields = value_columns.is_none();
+ let last_cache_value_columns_def = match &value_columns {
+ None => LastCacheValueColumnsDef::AllNonKeyColumns,
+ Some(cols) => LastCacheValueColumnsDef::Explicit {
+ columns: cols.clone(),
+ },
};
- let mut schema_builder = ArrowSchemaBuilder::new();
- // Add key columns first:
- for (t, field) in schema
- .iter()
- .filter(|&(_, f)| key_columns.contains(f.name()))
- {
- if let InfluxColumnType::Tag = t {
- // override tags with string type in the schema, because the KeyValue type stores
- // them as strings, and produces them as StringArray when creating RecordBatches:
- schema_builder.push(ArrowField::new(field.name(), DataType::Utf8, false))
- } else {
- schema_builder.push(field.clone());
- };
- }
- // Add value columns second:
- for (_, field) in schema
- .iter()
- .filter(|&(_, f)| value_columns.contains(f.name()))
- {
- schema_builder.push(field.clone());
- }
+ let cache_schema = self.last_cache_schema_from_schema(&schema, &key_columns, value_columns);
let series_key = schema
.series_key()
@@ -312,7 +269,7 @@ impl LastCacheProvider {
count,
ttl,
key_columns.clone(),
- Arc::new(schema_builder.finish()),
+ cache_schema,
series_key,
accept_new_fields,
);
@@ -341,18 +298,91 @@ impl LastCacheProvider {
table: tbl_name,
name: cache_name,
key_columns,
- value_columns: if accept_new_fields {
- LastCacheValueColumnsDef::AllNonKeyColumns
- } else {
- LastCacheValueColumnsDef::Explicit {
- columns: value_columns,
- }
- },
+ value_columns: last_cache_value_columns_def,
count,
ttl: ttl.as_secs(),
}))
}
+ fn last_cache_schema_from_schema(
+ &self,
+ schema: &Schema,
+ key_columns: &[String],
+ value_columns: Option<Vec<String>>,
+ ) -> SchemaRef {
+ let mut schema_builder = ArrowSchemaBuilder::new();
+ // Add key columns first:
+ for (t, field) in schema
+ .iter()
+ .filter(|&(_, f)| key_columns.contains(f.name()))
+ {
+ if let InfluxColumnType::Tag = t {
+ // override tags with string type in the schema, because the KeyValue type stores
+ // them as strings, and produces them as StringArray when creating RecordBatches:
+ schema_builder.push(ArrowField::new(field.name(), DataType::Utf8, false))
+ } else {
+ schema_builder.push(field.clone());
+ };
+ }
+ // Add value columns second:
+ match value_columns {
+ Some(cols) => {
+ for (_, field) in schema
+ .iter()
+ .filter(|&(_, f)| cols.contains(f.name()) || f.name() == TIME_COLUMN_NAME)
+ {
+ schema_builder.push(field.clone());
+ }
+ }
+ None => {
+ for (_, field) in schema
+ .iter()
+ .filter(|&(_, f)| !key_columns.contains(f.name()))
+ {
+ schema_builder.push(field.clone());
+ }
+ }
+ }
+
+ Arc::new(schema_builder.finish())
+ }
+
+ pub fn create_cache_from_definition(
+ &self,
+ db_name: &str,
+ schema: &Schema,
+ definition: &LastCacheDefinition,
+ ) {
+ let value_columns = match &definition.value_columns {
+ LastCacheValueColumnsDef::AllNonKeyColumns => None,
+ LastCacheValueColumnsDef::Explicit { columns } => Some(columns.clone()),
+ };
+ let accept_new_fields = value_columns.is_none();
+ let series_key = schema
+ .series_key()
+ .map(|keys| keys.into_iter().map(|s| s.to_string()).collect());
+
+ let schema =
+ self.last_cache_schema_from_schema(schema, &definition.key_columns, value_columns);
+
+ let last_cache = LastCache::new(
+ definition.count,
+ Duration::from_secs(definition.ttl),
+ definition.key_columns.clone(),
+ schema,
+ series_key,
+ accept_new_fields,
+ );
+
+ let mut lock = self.cache_map.write();
+
+ lock.entry(db_name.to_string())
+ .or_default()
+ .entry_ref(&definition.table)
+ .or_default()
+ .insert(definition.name.clone(), last_cache);
+ }
+
/// Delete a cache from the provider
///
/// This will also clean up empty levels in the provider hierarchy, so if there are no more
@@ -1545,10 +1575,8 @@ mod tests {
use ::object_store::{memory::InMemory, ObjectStore};
use arrow_util::{assert_batches_eq, assert_batches_sorted_eq};
use data_types::NamespaceName;
- use influxdb3_catalog::catalog::{
- Catalog, DatabaseSchema, LastCacheDefinition, TableDefinition,
- };
- use influxdb3_wal::WalConfig;
+ use influxdb3_catalog::catalog::{Catalog, DatabaseSchema, TableDefinition};
+ use influxdb3_wal::{LastCacheDefinition, WalConfig};
use insta::assert_json_snapshot;
use iox_time::{MockProvider, Time};
diff --git a/influxdb3_write/src/lib.rs b/influxdb3_write/src/lib.rs
index dc11d75acb..af2a51f4a1 100644
--- a/influxdb3_write/src/lib.rs
+++ b/influxdb3_write/src/lib.rs
@@ -21,8 +21,7 @@ use datafusion::execution::context::SessionState;
use datafusion::physical_plan::SendableRecordBatchStream;
use datafusion::prelude::Expr;
use influxdb3_catalog::catalog;
-use influxdb3_catalog::catalog::LastCacheDefinition;
-use influxdb3_wal::WalFileSequenceNumber;
+use influxdb3_wal::{LastCacheDefinition, WalFileSequenceNumber};
use iox_query::QueryChunk;
use iox_time::Time;
use last_cache::LastCacheProvider;
@@ -364,7 +363,7 @@ mod test_helpers {
lp: &str,
) -> WriteBatch {
let db_name = NamespaceName::new(db_name).unwrap();
- let result = WriteValidator::initialize(db_name.clone(), catalog)
+ let result = WriteValidator::initialize(db_name.clone(), catalog, 0)
.unwrap()
.v1_parse_lines_and_update_schema(lp, false)
.unwrap()
diff --git a/influxdb3_write/src/write_buffer/mod.rs b/influxdb3_write/src/write_buffer/mod.rs
index 8a2ef47619..df19481a52 100644
--- a/influxdb3_write/src/write_buffer/mod.rs
+++ b/influxdb3_write/src/write_buffer/mod.rs
@@ -23,9 +23,13 @@ use datafusion::datasource::object_store::ObjectStoreUrl;
use datafusion::execution::context::SessionState;
use datafusion::logical_expr::Expr;
use datafusion::physical_plan::SendableRecordBatchStream;
-use influxdb3_catalog::catalog::{Catalog, LastCacheDefinition};
+use influxdb3_catalog::catalog::Catalog;
use influxdb3_wal::object_store::WalObjectStore;
-use influxdb3_wal::{Wal, WalConfig, WalFileNotifier, WalOp};
+use influxdb3_wal::CatalogOp::CreateLastCache;
+use influxdb3_wal::{
+ CatalogBatch, CatalogOp, LastCacheDefinition, LastCacheDelete, Wal, WalConfig, WalFileNotifier,
+ WalOp,
+};
use iox_query::chunk_statistics::{create_chunk_statistics, NoColumnRanges};
use iox_query::QueryChunk;
use iox_time::{Time, TimeProvider};
@@ -114,8 +118,6 @@ impl<T: TimeProvider> WriteBufferImpl<T> {
executor: Arc<iox_query::exec::Executor>,
wal_config: WalConfig,
) -> Result<Self> {
- let last_cache = Arc::new(LastCacheProvider::new());
-
// load up the catalog, the snapshots, and replay the wal into the in memory buffer
let catalog = persister.load_catalog().await?;
let catalog = Arc::new(
@@ -123,6 +125,9 @@ impl<T: TimeProvider> WriteBufferImpl<T> {
.map(|c| Catalog::from_inner(c.catalog))
.unwrap_or_else(Catalog::new),
);
+
+ let last_cache = Arc::new(LastCacheProvider::new_from_catalog(&catalog.clone_inner())?);
+
let persisted_snapshots = persister.load_snapshots(1000).await?;
let last_snapshot_wal_sequence = persisted_snapshots
.first()
@@ -182,9 +187,13 @@ impl<T: TimeProvider> WriteBufferImpl<T> {
// validated lines will update the in-memory catalog, ensuring that all write operations
// past this point will be infallible
- let result = WriteValidator::initialize(db_name.clone(), self.catalog())?
- .v1_parse_lines_and_update_schema(lp, accept_partial)?
- .convert_lines_to_buffer(ingest_time, self.wal_config.level_0_duration, precision);
+ let result = WriteValidator::initialize(
+ db_name.clone(),
+ self.catalog(),
+ ingest_time.timestamp_nanos(),
+ )?
+ .v1_parse_lines_and_update_schema(lp, accept_partial)?
+ .convert_lines_to_buffer(ingest_time, self.wal_config.level_0_duration, precision);
// if there were catalog updates, ensure they get persisted to the wal, so they're
// replayed on restart
@@ -220,9 +229,13 @@ impl<T: TimeProvider> WriteBufferImpl<T> {
) -> Result<BufferedWriteRequest> {
// validated lines will update the in-memory catalog, ensuring that all write operations
// past this point will be infallible
- let result = WriteValidator::initialize(db_name.clone(), self.catalog())?
- .v3_parse_lines_and_update_schema(lp, accept_partial)?
- .convert_lines_to_buffer(ingest_time, self.wal_config.level_0_duration, precision);
+ let result = WriteValidator::initialize(
+ db_name.clone(),
+ self.catalog(),
+ ingest_time.timestamp_nanos(),
+ )?
+ .v3_parse_lines_and_update_schema(lp, accept_partial)?
+ .convert_lines_to_buffer(ingest_time, self.wal_config.level_0_duration, precision);
// if there were catalog updates, ensure they get persisted to the wal, so they're
// replayed on restart
@@ -510,6 +523,7 @@ impl<T: TimeProvider> LastCacheManager for WriteBufferImpl<T> {
.ok_or(Error::TableDoesNotExist)?
.schema()
.clone();
+
if let Some(info) = self.last_cache.create_cache(CreateCacheArguments {
db_name: db_name.to_string(),
tbl_name: tbl_name.to_string(),
@@ -520,14 +534,14 @@ impl<T: TimeProvider> LastCacheManager for WriteBufferImpl<T> {
key_columns,
value_columns,
})? {
- let last_wal_file_number = self.wal.last_sequence_number().await;
self.catalog.add_last_cache(db_name, tbl_name, info.clone());
+ let add_cache_catalog_batch = WalOp::Catalog(CatalogBatch {
+ time_ns: self.time_provider.now().timestamp_nanos(),
+ database_name: Arc::clone(&db_schema.name),
+ ops: vec![CreateLastCache(info.clone())],
+ });
+ self.wal.write_ops(vec![add_cache_catalog_batch]).await?;
- let inner_catalog = catalog.clone_inner();
- // Force persistence to the catalog, since we aren't going through the WAL:
- self.persister
- .persist_catalog(last_wal_file_number, Catalog::from_inner(inner_catalog))
- .await?;
Ok(Some(info))
} else {
Ok(None)
@@ -545,14 +559,19 @@ impl<T: TimeProvider> LastCacheManager for WriteBufferImpl<T> {
.delete_cache(db_name, tbl_name, cache_name)?;
catalog.delete_last_cache(db_name, tbl_name, cache_name);
- let last_wal_file_number = self.wal.last_sequence_number().await;
// NOTE: if this fails then the cache will be gone from the running server, but will be
// resurrected on server restart.
- let inner_catalog = catalog.clone_inner();
- // Force persistence to the catalog, since we aren't going through the WAL:
- self.persister
- .persist_catalog(last_wal_file_number, Catalog::from_inner(inner_catalog))
+ self.wal
+ .write_ops(vec![WalOp::Catalog(CatalogBatch {
+ time_ns: self.time_provider.now().timestamp_nanos(),
+ database_name: db_name.into(),
+ ops: vec![CatalogOp::DeleteLastCache(LastCacheDelete {
+ table: tbl_name.into(),
+ name: cache_name.into(),
+ })],
+ })])
.await?;
+
Ok(())
}
}
@@ -562,13 +581,11 @@ impl<T: TimeProvider> WriteBuffer for WriteBufferImpl<T> {}
#[cfg(test)]
mod tests {
use super::*;
- use crate::paths::CatalogFilePath;
use crate::persister::PersisterImpl;
use arrow::record_batch::RecordBatch;
use arrow_util::assert_batches_eq;
use datafusion::assert_batches_sorted_eq;
use datafusion_util::config::register_iox_object_store;
- use futures_util::StreamExt;
use influxdb3_wal::Level0Duration;
use iox_query::exec::IOxSessionContext;
use iox_time::{MockProvider, Time};
@@ -580,7 +597,7 @@ mod tests {
let catalog = Arc::new(Catalog::new());
let db_name = NamespaceName::new("foo").unwrap();
let lp = "cpu,region=west user=23.2 100\nfoo f1=1i";
- WriteValidator::initialize(db_name, Arc::clone(&catalog))
+ WriteValidator::initialize(db_name, Arc::clone(&catalog), 0)
.unwrap()
.v1_parse_lines_and_update_schema(lp, false)
.unwrap()
@@ -690,7 +707,7 @@ mod tests {
}
#[tokio::test]
- async fn persists_catalog_on_last_cache_create_and_delete() {
+ async fn last_cache_create_and_delete_is_durable() {
let (wbuf, _ctx) = setup(
Time::from_timestamp_nanos(0),
WalConfig {
@@ -708,7 +725,7 @@ mod tests {
wbuf.write_lp(
NamespaceName::new(db_name).unwrap(),
format!("{tbl_name},t1=a f1=true").as_str(),
- Time::from_timestamp(30, 0).unwrap(),
+ Time::from_timestamp(20, 0).unwrap(),
false,
Precision::Nanosecond,
)
@@ -718,19 +735,30 @@ mod tests {
wbuf.create_last_cache(db_name, tbl_name, Some(cache_name), None, None, None, None)
.await
.unwrap();
- // Check that the catalog was persisted, without advancing time:
- let object_store = wbuf.persister.object_store();
- let catalog_json = fetch_catalog_as_json(
- Arc::clone(&object_store),
- wbuf.persister.host_identifier_prefix(),
+
+ // load a new write buffer to ensure its durable
+ let wbuf = WriteBufferImpl::new(
+ Arc::clone(&wbuf.persister),
+ Arc::clone(&wbuf.time_provider),
+ Arc::clone(&wbuf.buffer.executor),
+ WalConfig {
+ level_0_duration: Level0Duration::new_1m(),
+ max_write_buffer_size: 100,
+ flush_interval: Duration::from_millis(10),
+ snapshot_size: 1,
+ },
)
- .await;
+ .await
+ .unwrap();
+
+ let catalog_json = catalog_to_json(&wbuf.catalog);
insta::assert_json_snapshot!("catalog-immediately-after-last-cache-create", catalog_json);
+
// Do another write that will update the state of the catalog, specifically, the table
// that the last cache was created for, and add a new field to the table/cache `f2`:
wbuf.write_lp(
NamespaceName::new(db_name).unwrap(),
- format!("{tbl_name},t1=a f1=true,f2=42i").as_str(),
+ format!("{tbl_name},t1=a f1=false,f2=42i").as_str(),
Time::from_timestamp(30, 0).unwrap(),
false,
Precision::Nanosecond,
@@ -738,39 +766,44 @@ mod tests {
.await
.unwrap();
- // do another write, which will force a snapshot of the WAL and thus the persistence of
- // the catalog
+ // and do another replay and verification
+ let wbuf = WriteBufferImpl::new(
+ Arc::clone(&wbuf.persister),
+ Arc::clone(&wbuf.time_provider),
+ Arc::clone(&wbuf.buffer.executor),
+ WalConfig {
+ level_0_duration: Level0Duration::new_1m(),
+ max_write_buffer_size: 100,
+ flush_interval: Duration::from_millis(10),
+ snapshot_size: 1,
+ },
+ )
+ .await
+ .unwrap();
+
+ let catalog_json = catalog_to_json(&wbuf.catalog);
+ insta::assert_json_snapshot!(
+ "catalog-after-last-cache-create-and-new-field",
+ catalog_json
+ );
+
+ // write a new data point to fill the cache
wbuf.write_lp(
NamespaceName::new(db_name).unwrap(),
- format!("{tbl_name},t1=b f1=false").as_str(),
+ format!("{tbl_name},t1=a f1=true,f2=53i").as_str(),
Time::from_timestamp(40, 0).unwrap(),
false,
Precision::Nanosecond,
)
.await
.unwrap();
- // Check the catalog again, to make sure it still has the last cache with the correct
- // configuration:
- let catalog_json = fetch_catalog_as_json(
- Arc::clone(&object_store),
- wbuf.persister.host_identifier_prefix(),
- )
- .await;
- // NOTE: the asserted snapshot is correct in-so-far as the catalog contains the last cache
- // configuration; however, it is not correct w.r.t. the fields. The second write adds a new
- // field `f2` to the last cache (which you can see in the query below), but the persisted
- // catalog does not have `f2` in the value columns. This will need to be fixed, see
- // https://github.com/influxdata/influxdb/issues/25171
- insta::assert_json_snapshot!(
- "catalog-after-allowing-time-to-persist-segments-after-create",
- catalog_json
- );
+
// Fetch record batches from the last cache directly:
let expected = [
"+----+------+----------------------+----+",
"| t1 | f1 | time | f2 |",
"+----+------+----------------------+----+",
- "| a | true | 1970-01-01T00:00:30Z | 42 |",
+ "| a | true | 1970-01-01T00:00:40Z | 53 |",
"+----+------+----------------------+----+",
];
let actual = wbuf
@@ -783,47 +816,23 @@ mod tests {
wbuf.delete_last_cache(db_name, tbl_name, cache_name)
.await
.unwrap();
- // Catalog should be persisted, and no longer have the last cache, without advancing time:
- let catalog_json = fetch_catalog_as_json(
- Arc::clone(&object_store),
- wbuf.persister.host_identifier_prefix(),
- )
- .await;
- insta::assert_json_snapshot!("catalog-immediately-after-last-cache-delete", catalog_json);
- // Do another write so there is data to be persisted in the buffer:
- wbuf.write_lp(
- NamespaceName::new(db_name).unwrap(),
- format!("{tbl_name},t1=b f1=false,f2=1337i").as_str(),
- Time::from_timestamp(830, 0).unwrap(),
- false,
- Precision::Nanosecond,
+
+ // do another reload and verify it's gone
+ let wbuf = WriteBufferImpl::new(
+ Arc::clone(&wbuf.persister),
+ Arc::clone(&wbuf.time_provider),
+ Arc::clone(&wbuf.buffer.executor),
+ WalConfig {
+ level_0_duration: Level0Duration::new_1m(),
+ max_write_buffer_size: 100,
+ flush_interval: Duration::from_millis(10),
+ snapshot_size: 1,
+ },
)
.await
.unwrap();
- // Advance time to allow for persistence of segment data:
- wbuf.time_provider
- .set(Time::from_timestamp(1600, 0).unwrap());
- let mut count = 0;
- loop {
- count += 1;
- tokio::time::sleep(Duration::from_millis(10)).await;
- let files = wbuf.persisted_files.get_files(db_name, tbl_name);
- if !files.is_empty() {
- break;
- } else if count > 9 {
- panic!("not persisting");
- }
- }
- // Check the catalog again, to ensure the last cache is still gone:
- let catalog_json = fetch_catalog_as_json(
- Arc::clone(&object_store),
- wbuf.persister.host_identifier_prefix(),
- )
- .await;
- insta::assert_json_snapshot!(
- "catalog-after-allowing-time-to-persist-segments-after-delete",
- catalog_json
- );
+ let catalog_json = catalog_to_json(&wbuf.catalog);
+ insta::assert_json_snapshot!("catalog-immediately-after-last-cache-delete", catalog_json);
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
@@ -1016,23 +1025,9 @@ mod tests {
assert_batches_sorted_eq!(&expected, &actual);
}
- async fn fetch_catalog_as_json(
- object_store: Arc<dyn ObjectStore>,
- host_identifier_prefix: &str,
- ) -> serde_json::Value {
- let mut list = object_store.list(Some(&CatalogFilePath::dir(host_identifier_prefix)));
- let Some(item) = list.next().await else {
- panic!("there should have been a catalog file persisted");
- };
- let item = item.expect("item from object store");
- let obj = object_store.get(&item.location).await.expect("get catalog");
- serde_json::from_slice::<serde_json::Value>(
- obj.bytes()
- .await
- .expect("get bytes from GetResult")
- .as_ref(),
- )
- .expect("parse bytes as JSON")
+ fn catalog_to_json(catalog: &Catalog) -> serde_json::Value {
+ let bytes = serde_json::to_vec_pretty(catalog).unwrap();
+ serde_json::from_slice::<serde_json::Value>(&bytes).expect("parse bytes as JSON")
}
async fn setup(
diff --git a/influxdb3_write/src/write_buffer/queryable_buffer.rs b/influxdb3_write/src/write_buffer/queryable_buffer.rs
index 87b960a1e9..bf4dd5e666 100644
--- a/influxdb3_write/src/write_buffer/queryable_buffer.rs
+++ b/influxdb3_write/src/write_buffer/queryable_buffer.rs
@@ -16,7 +16,7 @@ use datafusion::logical_expr::Expr;
use datafusion_util::stream_from_batches;
use hashbrown::HashMap;
use influxdb3_catalog::catalog::{Catalog, DatabaseSchema};
-use influxdb3_wal::{SnapshotDetails, WalContents, WalFileNotifier, WalOp, WriteBatch};
+use influxdb3_wal::{CatalogOp, SnapshotDetails, WalContents, WalFileNotifier, WalOp, WriteBatch};
use iox_query::chunk_statistics::{create_chunk_statistics, NoColumnRanges};
use iox_query::exec::Executor;
use iox_query::frontend::reorg::ReorgPlanner;
@@ -131,16 +131,7 @@ impl QueryableBuffer {
let mut buffer = self.buffer.write();
self.last_cache_provider.evict_expired_cache_entries();
self.last_cache_provider.write_wal_contents_to_cache(&write);
-
- for op in write.ops {
- match op {
- WalOp::Write(write_batch) => buffer.add_write_batch(write_batch),
- WalOp::Catalog(catalog_batch) => buffer
- .catalog
- .apply_catalog_batch(&catalog_batch)
- .expect("catalog batch should apply"),
- }
- }
+ buffer.buffer_ops(write.ops, &self.last_cache_provider);
}
/// Called when the wal has written a new file and is attempting to snapshot. Kicks off persistence of
@@ -186,15 +177,7 @@ impl QueryableBuffer {
// we must buffer the ops after the snapshotting as this data should not be persisted
// with this set of wal files
- for op in write.ops {
- match op {
- WalOp::Write(write_batch) => buffer.add_write_batch(write_batch),
- WalOp::Catalog(catalog_batch) => buffer
- .catalog
- .apply_catalog_batch(&catalog_batch)
- .expect("catalog batch should apply"),
- }
- }
+ buffer.buffer_ops(write.ops, &self.last_cache_provider);
persisting_chunks
};
@@ -328,6 +311,50 @@ impl BufferState {
}
}
+ fn buffer_ops(&mut self, ops: Vec<WalOp>, last_cache_provider: &LastCacheProvider) {
+ for op in ops {
+ match op {
+ WalOp::Write(write_batch) => self.add_write_batch(write_batch),
+ WalOp::Catalog(catalog_batch) => {
+ self.catalog
+ .apply_catalog_batch(&catalog_batch)
+ .expect("catalog batch should apply");
+
+ let db_schema = self
+ .catalog
+ .db_schema(&catalog_batch.database_name)
+ .expect("database should exist");
+
+ for op in catalog_batch.ops {
+ match op {
+ CatalogOp::CreateLastCache(definition) => {
+ let table_schema = db_schema
+ .get_table_schema(&definition.table)
+ .expect("table should exist");
+ last_cache_provider.create_cache_from_definition(
+ db_schema.name.as_ref(),
+ table_schema,
+ &definition,
+ );
+ }
+ CatalogOp::DeleteLastCache(cache) => {
+ // we can ignore it if this doesn't exist for any reason
+ let _ = last_cache_provider.delete_cache(
+ db_schema.name.as_ref(),
+ &cache.table,
+ &cache.name,
+ );
+ }
+ CatalogOp::AddFields(_) => (),
+ CatalogOp::CreateTable(_) => (),
+ CatalogOp::CreateDatabase(_) => (),
+ }
+ }
+ }
+ }
+ }
+ }
+
fn add_write_batch(&mut self, write_batch: WriteBatch) {
let db_schema = self
.catalog
diff --git a/influxdb3_write/src/write_buffer/snapshots/influxdb3_write__write_buffer__tests__catalog-after-allowing-time-to-persist-segments-after-delete.snap b/influxdb3_write/src/write_buffer/snapshots/influxdb3_write__write_buffer__tests__catalog-after-allowing-time-to-persist-segments-after-delete.snap
deleted file mode 100644
index 5c2ecd0544..0000000000
--- a/influxdb3_write/src/write_buffer/snapshots/influxdb3_write__write_buffer__tests__catalog-after-allowing-time-to-persist-segments-after-delete.snap
+++ /dev/null
@@ -1,49 +0,0 @@
----
-source: influxdb3_write/src/write_buffer/mod.rs
-expression: catalog_json
----
-{
- "databases": {
- "db": {
- "name": "db",
- "tables": {
- "table": {
- "cols": {
- "f1": {
- "influx_type": "field",
- "nullable": true,
- "type": "bool"
- },
- "f2": {
- "influx_type": "field",
- "nullable": true,
- "type": "i64"
- },
- "t1": {
- "influx_type": "tag",
- "nullable": true,
- "type": {
- "dict": [
- "i32",
- "str"
- ]
- }
- },
- "time": {
- "influx_type": "time",
- "nullable": false,
- "type": {
- "time": [
- "ns",
- null
- ]
- }
- }
- },
- "name": "table"
- }
- }
- }
- },
- "sequence": 7
-}
diff --git a/influxdb3_write/src/write_buffer/snapshots/influxdb3_write__write_buffer__tests__catalog-after-allowing-time-to-persist-segments-after-create.snap b/influxdb3_write/src/write_buffer/snapshots/influxdb3_write__write_buffer__tests__catalog-after-last-cache-create-and-new-field.snap
similarity index 97%
rename from influxdb3_write/src/write_buffer/snapshots/influxdb3_write__write_buffer__tests__catalog-after-allowing-time-to-persist-segments-after-create.snap
rename to influxdb3_write/src/write_buffer/snapshots/influxdb3_write__write_buffer__tests__catalog-after-last-cache-create-and-new-field.snap
index 072943b888..1306dfe17e 100644
--- a/influxdb3_write/src/write_buffer/snapshots/influxdb3_write__write_buffer__tests__catalog-after-allowing-time-to-persist-segments-after-create.snap
+++ b/influxdb3_write/src/write_buffer/snapshots/influxdb3_write__write_buffer__tests__catalog-after-last-cache-create-and-new-field.snap
@@ -1,5 +1,6 @@
---
source: influxdb3_write/src/write_buffer/mod.rs
+assertion_line: 774
expression: catalog_json
---
{
@@ -57,5 +58,5 @@ expression: catalog_json
}
}
},
- "sequence": 6
+ "sequence": 7
}
diff --git a/influxdb3_write/src/write_buffer/snapshots/influxdb3_write__write_buffer__tests__catalog-immediately-after-last-cache-create.snap b/influxdb3_write/src/write_buffer/snapshots/influxdb3_write__write_buffer__tests__catalog-immediately-after-last-cache-create.snap
index c0f5f47c0c..da4b9204c9 100644
--- a/influxdb3_write/src/write_buffer/snapshots/influxdb3_write__write_buffer__tests__catalog-immediately-after-last-cache-create.snap
+++ b/influxdb3_write/src/write_buffer/snapshots/influxdb3_write__write_buffer__tests__catalog-immediately-after-last-cache-create.snap
@@ -52,5 +52,5 @@ expression: catalog_json
}
}
},
- "sequence": 4
+ "sequence": 2
}
diff --git a/influxdb3_write/src/write_buffer/snapshots/influxdb3_write__write_buffer__tests__catalog-immediately-after-last-cache-delete.snap b/influxdb3_write/src/write_buffer/snapshots/influxdb3_write__write_buffer__tests__catalog-immediately-after-last-cache-delete.snap
index 5c2ecd0544..f44aec3b72 100644
--- a/influxdb3_write/src/write_buffer/snapshots/influxdb3_write__write_buffer__tests__catalog-immediately-after-last-cache-delete.snap
+++ b/influxdb3_write/src/write_buffer/snapshots/influxdb3_write__write_buffer__tests__catalog-immediately-after-last-cache-delete.snap
@@ -45,5 +45,5 @@ expression: catalog_json
}
}
},
- "sequence": 7
+ "sequence": 8
}
diff --git a/influxdb3_write/src/write_buffer/validator.rs b/influxdb3_write/src/write_buffer/validator.rs
index e036978cb8..1a504948cf 100644
--- a/influxdb3_write/src/write_buffer/validator.rs
+++ b/influxdb3_write/src/write_buffer/validator.rs
@@ -22,6 +22,7 @@ use super::Error;
pub(crate) struct WithCatalog {
catalog: Arc<Catalog>,
db_schema: Arc<DatabaseSchema>,
+ time_now_ns: i64,
}
/// Type state for the [`WriteValidator`] after it has parsed v1 or v3
@@ -45,10 +46,15 @@ impl WriteValidator<WithCatalog> {
pub(crate) fn initialize(
db_name: NamespaceName<'static>,
catalog: Arc<Catalog>,
+ time_now_ns: i64,
) -> Result<WriteValidator<WithCatalog>> {
let db_schema = catalog.db_or_create(db_name.as_str())?;
Ok(WriteValidator {
- state: WithCatalog { catalog, db_schema },
+ state: WithCatalog {
+ catalog,
+ db_schema,
+ time_now_ns,
+ },
})
}
@@ -105,6 +111,7 @@ impl WriteValidator<WithCatalog> {
} else {
let catalog_batch = CatalogBatch {
database_name: Arc::clone(&self.state.db_schema.name),
+ time_ns: self.state.time_now_ns,
ops: catalog_updates,
};
self.state.catalog.apply_catalog_batch(&catalog_batch)?;
@@ -178,6 +185,7 @@ impl WriteValidator<WithCatalog> {
None
} else {
let catalog_batch = CatalogBatch {
+ time_ns: self.state.time_now_ns,
database_name: Arc::clone(&self.state.db_schema.name),
ops: catalog_updates,
};
@@ -775,7 +783,7 @@ mod tests {
fn write_validator_v1() -> Result<(), Error> {
let namespace = NamespaceName::new("test").unwrap();
let catalog = Arc::new(Catalog::new());
- let result = WriteValidator::initialize(namespace.clone(), catalog)?
+ let result = WriteValidator::initialize(namespace.clone(), catalog, 0)?
.v1_parse_lines_and_update_schema("cpu,tag1=foo val1=\"bar\" 1234", false)?
.convert_lines_to_buffer(
Time::from_timestamp_nanos(0),
|
30b292f3df54ed596ad970ea73bd706a5648d5a9
|
Fraser Savage
|
2023-03-30 15:33:33
|
Update namespace service protection limits
|
This commit adds a client method to invoke the
UpdateNamespaceServiceProtectionLimits RPC API, providing a user
friendly way to do this through the IOx command line.
| null |
feat(cli): Update namespace service protection limits
This commit adds a client method to invoke the
UpdateNamespaceServiceProtectionLimits RPC API, providing a user
friendly way to do this through the IOx command line.
|
diff --git a/influxdb_iox/src/commands/namespace/mod.rs b/influxdb_iox/src/commands/namespace/mod.rs
index c823847d7f..9edb414fbb 100644
--- a/influxdb_iox/src/commands/namespace/mod.rs
+++ b/influxdb_iox/src/commands/namespace/mod.rs
@@ -6,6 +6,7 @@ use thiserror::Error;
mod create;
mod delete;
mod retention;
+mod update_limit;
#[allow(clippy::enum_variant_names)]
#[derive(Debug, Error)]
@@ -15,8 +16,13 @@ pub enum Error {
#[error("Client error: {0}")]
ClientError(#[from] influxdb_iox_client::error::Error),
+
+ #[error("No valid limit was provided")]
+ InvalidLimit,
}
+pub type Result<T, E = Error> = std::result::Result<T, E>;
+
/// Various commands for namespace inspection
#[derive(Debug, clap::Parser)]
pub struct Config {
@@ -36,11 +42,14 @@ enum Command {
/// Update retention of an existing namespace
Retention(retention::Config),
+ /// Update one of the service protection limits for an existing namespace
+ UpdateLimit(update_limit::Config),
+
/// Delete a namespace
Delete(delete::Config),
}
-pub async fn command(connection: Connection, config: Config) -> Result<(), Error> {
+pub async fn command(connection: Connection, config: Config) -> Result<()> {
match config.command {
Command::Create(config) => {
create::command(connection, config).await?;
@@ -53,6 +62,9 @@ pub async fn command(connection: Connection, config: Config) -> Result<(), Error
Command::Retention(config) => {
retention::command(connection, config).await?;
}
+ Command::UpdateLimit(config) => {
+ update_limit::command(connection, config).await?;
+ }
Command::Delete(config) => {
delete::command(connection, config).await?;
} // Deliberately not adding _ => so the compiler will direct people here to impl new
diff --git a/influxdb_iox/src/commands/namespace/update_limit.rs b/influxdb_iox/src/commands/namespace/update_limit.rs
new file mode 100644
index 0000000000..2aefa70305
--- /dev/null
+++ b/influxdb_iox/src/commands/namespace/update_limit.rs
@@ -0,0 +1,67 @@
+use influxdb_iox_client::connection::Connection;
+use influxdb_iox_client::namespace::generated_types::LimitUpdate;
+
+use crate::commands::namespace::{Error, Result};
+
+#[derive(Debug, clap::Parser)]
+pub struct Config {
+ /// The namespace to update a service protection limit for
+ #[clap(action)]
+ namespace: String,
+
+ #[command(flatten)]
+ args: Args,
+}
+
+#[derive(Debug, clap::Args)]
+#[clap(group(
+ clap::ArgGroup::new("limit")
+ .required(true)
+ .args(&["max_tables", "max_columns_per_table"])
+ ))]
+struct Args {
+ /// The maximum number of tables to allow for this namespace
+ #[clap(action, long = "max-tables", short = 't', group = "limit")]
+ max_tables: Option<i32>,
+
+ /// The maximum number of columns to allow per table for this namespace
+ #[clap(action, long = "max-columns-per-table", short = 'c', group = "limit")]
+ max_columns_per_table: Option<i32>,
+}
+
+impl TryFrom<Args> for LimitUpdate {
+ type Error = Error;
+ fn try_from(args: Args) -> Result<Self> {
+ let Args {
+ max_tables,
+ max_columns_per_table,
+ } = args;
+
+ if let Some(n) = max_tables {
+ return Ok(Self::MaxTables(n));
+ }
+ if let Some(n) = max_columns_per_table {
+ return Ok(Self::MaxColumnsPerTable(n));
+ }
+
+ Err(Error::InvalidLimit)
+ }
+}
+
+pub async fn command(connection: Connection, config: Config) -> Result<()> {
+ let mut client = influxdb_iox_client::namespace::Client::new(connection);
+
+ let namespace = client
+ .update_namespace_service_protection_limit(
+ &config.namespace,
+ LimitUpdate::try_from(config.args)?,
+ )
+ .await?;
+ println!("{}", serde_json::to_string_pretty(&namespace)?);
+
+ println!(
+ r"
+NOTE: This change will NOT take effect until all router instances have been restarted!"
+ );
+ Ok(())
+}
diff --git a/influxdb_iox/tests/end_to_end_cases/cli.rs b/influxdb_iox/tests/end_to_end_cases/cli.rs
index d9996dae03..71173d109a 100644
--- a/influxdb_iox/tests/end_to_end_cases/cli.rs
+++ b/influxdb_iox/tests/end_to_end_cases/cli.rs
@@ -1040,3 +1040,113 @@ async fn query_ingester() {
.run()
.await
}
+
+/// Test the namespace update service limit command
+#[tokio::test]
+async fn namespace_update_service_limit() {
+ test_helpers::maybe_start_logging();
+ let database_url = maybe_skip_integration!();
+ let mut cluster = MiniCluster::create_shared2(database_url).await;
+
+ StepTest::new(
+ &mut cluster,
+ vec![
+ Step::Custom(Box::new(|state: &mut StepTestState| {
+ async {
+ let namespace = "service_limiter_namespace";
+ let addr = state.cluster().router().router_grpc_base().to_string();
+
+ // {
+ // "id": <foo>,
+ // "name": "service_limiter_namespace",
+ // "serviceProtectionLimits": {
+ // "maxTables": 500,
+ // "maxColumnsPerTable": 200
+ // }
+ // }
+ Command::cargo_bin("influxdb_iox")
+ .unwrap()
+ .arg("-h")
+ .arg(&addr)
+ .arg("namespace")
+ .arg("create")
+ .arg(namespace)
+ .assert()
+ .success()
+ .stdout(
+ predicate::str::contains(namespace)
+ .and(predicate::str::contains(r#""maxTables": 500"#))
+ .and(predicate::str::contains(r#""maxColumnsPerTable": 200"#)),
+ );
+ }
+ .boxed()
+ })),
+ Step::Custom(Box::new(|state: &mut StepTestState| {
+ async {
+ let namespace = "service_limiter_namespace";
+ let addr = state.cluster().router().router_grpc_base().to_string();
+
+ // {
+ // "id": <foo>,
+ // "name": "service_limiter_namespace",
+ // "serviceProtectionLimits": {
+ // "maxTables": 1337,
+ // "maxColumnsPerTable": 200
+ // }
+ // }
+ Command::cargo_bin("influxdb_iox")
+ .unwrap()
+ .arg("-h")
+ .arg(&addr)
+ .arg("namespace")
+ .arg("update-limit")
+ .arg("--max-tables")
+ .arg("1337")
+ .arg(namespace)
+ .assert()
+ .success()
+ .stdout(
+ predicate::str::contains(namespace)
+ .and(predicate::str::contains(r#""maxTables": 1337"#))
+ .and(predicate::str::contains(r#""maxColumnsPerTable": 200"#)),
+ );
+ }
+ .boxed()
+ })),
+ Step::Custom(Box::new(|state: &mut StepTestState| {
+ async {
+ let namespace = "service_limiter_namespace";
+ let addr = state.cluster().router().router_grpc_base().to_string();
+
+ // {
+ // "id": <foo>,
+ // "name": "service_limiter_namespace",
+ // "serviceProtectionLimits": {
+ // "maxTables": 1337,
+ // "maxColumnsPerTable": 42
+ // }
+ // }
+ Command::cargo_bin("influxdb_iox")
+ .unwrap()
+ .arg("-h")
+ .arg(&addr)
+ .arg("namespace")
+ .arg("update-limit")
+ .arg("--max-columns-per-table")
+ .arg("42")
+ .arg(namespace)
+ .assert()
+ .success()
+ .stdout(
+ predicate::str::contains(namespace)
+ .and(predicate::str::contains(r#""maxTables": 1337"#))
+ .and(predicate::str::contains(r#""maxColumnsPerTable": 42"#)),
+ );
+ }
+ .boxed()
+ })),
+ ],
+ )
+ .run()
+ .await
+}
diff --git a/influxdb_iox_client/src/client/namespace.rs b/influxdb_iox_client/src/client/namespace.rs
index c94ea8389c..fb88dca097 100644
--- a/influxdb_iox_client/src/client/namespace.rs
+++ b/influxdb_iox_client/src/client/namespace.rs
@@ -7,7 +7,9 @@ use ::generated_types::google::OptionalField;
/// Re-export generated_types
pub mod generated_types {
- pub use generated_types::influxdata::iox::namespace::v1::*;
+ pub use generated_types::influxdata::iox::namespace::v1::{
+ update_namespace_service_protection_limit_request::LimitUpdate, *,
+ };
}
/// A basic client for working with Namespaces.
@@ -77,6 +79,30 @@ impl Client {
Ok(response.into_inner().namespace.unwrap_field("namespace")?)
}
+ /// Update one of the service protection limits for a namespace
+ ///
+ /// `limit_update` is the new service limit protection limit to set
+ /// on the namespace.
+ ///
+ /// Zero-valued limits are rejected, returning an error.
+ pub async fn update_namespace_service_protection_limit(
+ &mut self,
+ namespace: &str,
+ limit_update: LimitUpdate,
+ ) -> Result<Namespace, Error> {
+ let response = self
+ .inner
+ .update_namespace_service_protection_limit(
+ UpdateNamespaceServiceProtectionLimitRequest {
+ name: namespace.to_string(),
+ limit_update: Some(limit_update),
+ },
+ )
+ .await?;
+
+ Ok(response.into_inner().namespace.unwrap_field("namespace")?)
+ }
+
/// Delete a namespace
pub async fn delete_namespace(&mut self, namespace: &str) -> Result<(), Error> {
self.inner
|
6c17ee29a5f31570f416b2075a0630f7ef35cc29
|
Andrew Lamb
|
2022-11-10 06:59:54
|
make logging clearer when parquet files upload is retried (#6056)
|
* feat: log success when parquet files are retried
* fix: Update parquet_file/src/storage.rs
Co-authored-by: Carol (Nichols || Goulding) <[email protected]>
* fix: fmt
|
Co-authored-by: Carol (Nichols || Goulding) <[email protected]>
Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
|
feat: make logging clearer when parquet files upload is retried (#6056)
* feat: log success when parquet files are retried
* fix: Update parquet_file/src/storage.rs
Co-authored-by: Carol (Nichols || Goulding) <[email protected]>
* fix: fmt
Co-authored-by: Carol (Nichols || Goulding) <[email protected]>
Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
|
diff --git a/parquet_file/src/storage.rs b/parquet_file/src/storage.rs
index c4702fc246..963bab4618 100644
--- a/parquet_file/src/storage.rs
+++ b/parquet_file/src/storage.rs
@@ -242,9 +242,18 @@ impl ParquetStorage {
// This is abort-able by the user by dropping the upload() future.
//
// Cloning `data` is a ref count inc, rather than a data copy.
+ let mut retried = false;
while let Err(e) = self.object_store.put(&path, data.clone()).await {
- error!(error=%e, ?meta, "failed to upload parquet file to object storage");
+ warn!(error=%e, ?meta, "failed to upload parquet file to object storage, retrying");
tokio::time::sleep(Duration::from_secs(1)).await;
+ retried = true;
+ }
+
+ if retried {
+ info!(
+ ?meta,
+ "Succeeded uploading files to object storage on retry"
+ );
}
Ok((parquet_meta, file_size))
|
3e26567e058aa9febea3172e6b5256b537f14662
|
Marco Neumann
|
2023-06-15 10:01:55
|
cache slices instead of vecs (#7989)
|
Immutable `Box<Vec<T>>`/`Arc<Vec<T>>` are better stored as
`Box<[T]>`/`Arc<[T]>` because:
- allocation always exact (no need for `shrink_to_fit`)
- smaller (the fat pointer is just the memory address and the length, no
capacity required)
- less allocation (`Box`/`Arc` -> slice instead of `Box`/`Arc` -> `Vec`
-> buffer); in fact the vector itself was offen missing in the
accounting code
Found while I was working on #7987.
|
Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
|
refactor: cache slices instead of vecs (#7989)
Immutable `Box<Vec<T>>`/`Arc<Vec<T>>` are better stored as
`Box<[T]>`/`Arc<[T]>` because:
- allocation always exact (no need for `shrink_to_fit`)
- smaller (the fat pointer is just the memory address and the length, no
capacity required)
- less allocation (`Box`/`Arc` -> slice instead of `Box`/`Arc` -> `Vec`
-> buffer); in fact the vector itself was offen missing in the
accounting code
Found while I was working on #7987.
Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
|
diff --git a/querier/src/cache/namespace.rs b/querier/src/cache/namespace.rs
index 521aa9419e..9c57ccb167 100644
--- a/querier/src/cache/namespace.rs
+++ b/querier/src/cache/namespace.rs
@@ -215,7 +215,7 @@ pub struct CachedTable {
pub schema: Schema,
pub column_id_map: HashMap<ColumnId, Arc<str>>,
pub column_id_map_rev: HashMap<Arc<str>, ColumnId>,
- pub primary_key_column_ids: Vec<ColumnId>,
+ pub primary_key_column_ids: Box<[ColumnId]>,
}
impl CachedTable {
@@ -234,7 +234,7 @@ impl CachedTable {
.keys()
.map(|name| name.len())
.sum::<usize>()
- + (self.primary_key_column_ids.capacity() * size_of::<ColumnId>())
+ + (self.primary_key_column_ids.len() * size_of::<ColumnId>())
}
}
@@ -259,7 +259,7 @@ impl From<TableSchema> for CachedTable {
.collect();
column_id_map_rev.shrink_to_fit();
- let mut primary_key_column_ids: Vec<ColumnId> = schema
+ let primary_key_column_ids = schema
.primary_key()
.into_iter()
.map(|name| {
@@ -268,7 +268,6 @@ impl From<TableSchema> for CachedTable {
.unwrap_or_else(|| panic!("primary key not known?!: {name}"))
})
.collect();
- primary_key_column_ids.shrink_to_fit();
Self {
id,
@@ -394,7 +393,7 @@ mod tests {
(Arc::from(col112.column.name.clone()), col112.column.id),
(Arc::from(col113.column.name.clone()), col113.column.id),
]),
- primary_key_column_ids: vec![col112.column.id, col113.column.id],
+ primary_key_column_ids: [col112.column.id, col113.column.id].into(),
}),
),
(
@@ -415,7 +414,7 @@ mod tests {
(Arc::from(col121.column.name.clone()), col121.column.id),
(Arc::from(col122.column.name.clone()), col122.column.id),
]),
- primary_key_column_ids: vec![col122.column.id],
+ primary_key_column_ids: [col122.column.id].into(),
}),
),
]),
@@ -447,7 +446,7 @@ mod tests {
Arc::from(col211.column.name.clone()),
col211.column.id,
)]),
- primary_key_column_ids: vec![col211.column.id],
+ primary_key_column_ids: [col211.column.id].into(),
}),
)]),
};
diff --git a/querier/src/cache/parquet_file.rs b/querier/src/cache/parquet_file.rs
index 0a96882aae..633a837571 100644
--- a/querier/src/cache/parquet_file.rs
+++ b/querier/src/cache/parquet_file.rs
@@ -38,13 +38,13 @@ pub enum Error {
},
}
-type IngesterCounts = Option<Arc<Vec<(Uuid, u64)>>>;
+type IngesterCounts = Option<Arc<[(Uuid, u64)]>>;
/// Holds catalog information about a parquet file
#[derive(Debug)]
pub struct CachedParquetFiles {
/// Parquet catalog information
- pub files: Arc<Vec<Arc<ParquetFile>>>,
+ pub files: Arc<[Arc<ParquetFile>]>,
/// Number of persisted Parquet files per table ID per ingester UUID that ingesters have told
/// us about. When a call to `get` includes a number of persisted Parquet files for this table
@@ -60,10 +60,10 @@ impl CachedParquetFiles {
parquet_files: Vec<ParquetFile>,
persisted_file_counts_from_ingesters: IngesterCounts,
) -> Self {
- let files: Vec<_> = parquet_files.into_iter().map(Arc::new).collect();
+ let files = parquet_files.into_iter().map(Arc::new).collect();
Self {
- files: Arc::new(files),
+ files,
persisted_file_counts_from_ingesters,
}
}
@@ -71,13 +71,13 @@ impl CachedParquetFiles {
/// return the underlying files as a new Vec
#[cfg(test)]
fn vec(&self) -> Vec<Arc<ParquetFile>> {
- self.files.as_ref().clone()
+ self.files.as_ref().to_vec()
}
/// Estimate the memory consumption of this object and its contents
fn size(&self) -> usize {
// simplify accounting by ensuring len and capacity of vector are the same
- assert_eq!(self.files.len(), self.files.capacity());
+ assert_eq!(self.files.len(), self.files.len());
// Note size_of_val is the size of the Arc
// https://play.rust-lang.org/?version=stable&mode=debug&edition=2021&gist=ae8fee8b4f7f5f013dc01ea1fda165da
@@ -93,7 +93,7 @@ impl CachedParquetFiles {
.as_ref()
.map(|map| {
std::mem::size_of_val(map.as_ref()) +
- map.capacity() * mem::size_of::<(Uuid, u64)>()
+ map.len() * mem::size_of::<(Uuid, u64)>()
}).unwrap_or_default()
}
}
@@ -227,8 +227,7 @@ impl ParquetFileCache {
persisted_file_counts_by_ingester_uuid.map(|map| {
let mut entries = map.into_iter().collect::<Vec<_>>();
entries.sort();
- entries.shrink_to_fit();
- Arc::new(entries)
+ entries.into()
});
let persisted_file_counts_by_ingester_uuid_captured =
persisted_file_counts_by_ingester_uuid.clone();
@@ -246,7 +245,7 @@ impl ParquetFileCache {
cached_file
.persisted_file_counts_from_ingesters
.as_ref()
- .map(|x| x.as_ref().as_ref()),
+ .map(|x| x.as_ref()),
ingester_counts,
)
} else {
@@ -361,7 +360,7 @@ mod tests {
let table_id = table.table.id;
let single_file_size = 200;
- let two_file_size = 360;
+ let two_file_size = 368;
assert!(single_file_size < two_file_size);
let cache = make_cache(&catalog);
diff --git a/querier/src/cache/partition.rs b/querier/src/cache/partition.rs
index a3de22d0ff..1ef5de87cf 100644
--- a/querier/src/cache/partition.rs
+++ b/querier/src/cache/partition.rs
@@ -166,14 +166,14 @@ impl CachedPartition {
pub struct PartitionSortKey {
pub sort_key: Arc<SortKey>,
pub column_set: HashSet<ColumnId>,
- pub column_order: Vec<ColumnId>,
+ pub column_order: Box<[ColumnId]>,
}
impl PartitionSortKey {
fn new(sort_key: SortKey, column_id_map_rev: &HashMap<Arc<str>, ColumnId>) -> Self {
let sort_key = Arc::new(sort_key);
- let mut column_order: Vec<ColumnId> = sort_key
+ let column_order: Box<[ColumnId]> = sort_key
.iter()
.map(|(name, _opts)| {
*column_id_map_rev
@@ -181,7 +181,6 @@ impl PartitionSortKey {
.unwrap_or_else(|| panic!("column_id_map_rev misses data: {name}"))
})
.collect();
- column_order.shrink_to_fit();
let mut column_set: HashSet<ColumnId> = column_order.iter().copied().collect();
column_set.shrink_to_fit();
@@ -198,7 +197,7 @@ impl PartitionSortKey {
size_of_val(self)
+ self.sort_key.as_ref().size()
+ (self.column_set.capacity() * size_of::<ColumnId>())
- + (self.column_order.capacity() * size_of::<ColumnId>())
+ + (self.column_order.len() * size_of::<ColumnId>())
}
}
@@ -239,7 +238,7 @@ mod tests {
(Arc::from(c1.column.name.clone()), c1.column.id),
(Arc::from(c2.column.name.clone()), c2.column.id),
]),
- primary_key_column_ids: vec![c1.column.id, c2.column.id],
+ primary_key_column_ids: [c1.column.id, c2.column.id].into(),
});
let cache = PartitionCache::new(
@@ -259,7 +258,7 @@ mod tests {
&PartitionSortKey {
sort_key: Arc::new(p1.sort_key().unwrap()),
column_set: HashSet::from([c1.column.id, c2.column.id]),
- column_order: vec![c1.column.id, c2.column.id],
+ column_order: [c1.column.id, c2.column.id].into(),
}
);
assert_histogram_metric_count(&catalog.metric_registry, "partition_get_by_id", 1);
@@ -320,7 +319,7 @@ mod tests {
(Arc::from(c1.column.name.clone()), c1.column.id),
(Arc::from(c2.column.name.clone()), c2.column.id),
]),
- primary_key_column_ids: vec![c1.column.id, c2.column.id],
+ primary_key_column_ids: [c1.column.id, c2.column.id].into(),
});
let cache = PartitionCache::new(
@@ -370,7 +369,7 @@ mod tests {
(Arc::from(c1.column.name.clone()), c1.column.id),
(Arc::from(c2.column.name.clone()), c2.column.id),
]),
- primary_key_column_ids: vec![c1.column.id, c2.column.id],
+ primary_key_column_ids: [c1.column.id, c2.column.id].into(),
});
let cache = PartitionCache::new(
@@ -421,7 +420,7 @@ mod tests {
&PartitionSortKey {
sort_key: Arc::new(p_sort_key.clone().unwrap()),
column_set: HashSet::from([c1.column.id, c2.column.id]),
- column_order: vec![c1.column.id, c2.column.id],
+ column_order: [c1.column.id, c2.column.id].into(),
}
);
assert_histogram_metric_count(&catalog.metric_registry, "partition_get_by_id", 4);
diff --git a/querier/src/cache/projected_schema.rs b/querier/src/cache/projected_schema.rs
index 4213324293..57d42e0f19 100644
--- a/querier/src/cache/projected_schema.rs
+++ b/querier/src/cache/projected_schema.rs
@@ -29,7 +29,7 @@ const CACHE_ID: &str = "projected_schema";
#[derive(Debug, Clone, Hash, PartialEq, Eq, PartialOrd, Ord)]
struct CacheKey {
table_id: TableId,
- projection: Vec<ColumnId>,
+ projection: Box<[ColumnId]>,
}
impl CacheKey {
@@ -40,18 +40,15 @@ impl CacheKey {
// normalize column order
projection.sort();
- // ensure that cache key is as small as possible
- projection.shrink_to_fit();
-
Self {
table_id,
- projection,
+ projection: projection.into(),
}
}
/// Size in of key including `Self`.
fn size(&self) -> usize {
- size_of_val(self) + self.projection.capacity() * size_of::<ColumnId>()
+ size_of_val(self) + self.projection.len() * size_of::<ColumnId>()
}
}
@@ -207,37 +204,40 @@ mod tests {
schema: table_schema_a.clone(),
column_id_map: column_id_map_a.clone(),
column_id_map_rev: reverse_map(&column_id_map_a),
- primary_key_column_ids: vec![
+ primary_key_column_ids: [
ColumnId::new(1),
ColumnId::new(2),
ColumnId::new(3),
ColumnId::new(4),
- ],
+ ]
+ .into(),
});
let table_1b = Arc::new(CachedTable {
id: table_id_1,
schema: table_schema_b.clone(),
column_id_map: column_id_map_b.clone(),
column_id_map_rev: reverse_map(&column_id_map_b),
- primary_key_column_ids: vec![
+ primary_key_column_ids: [
ColumnId::new(1),
ColumnId::new(2),
ColumnId::new(3),
ColumnId::new(4),
- ],
+ ]
+ .into(),
});
let table_2a = Arc::new(CachedTable {
id: table_id_2,
schema: table_schema_a.clone(),
column_id_map: column_id_map_a.clone(),
column_id_map_rev: reverse_map(&column_id_map_a),
- primary_key_column_ids: vec![
+ primary_key_column_ids: [
ColumnId::new(1),
ColumnId::new(2),
ColumnId::new(3),
ColumnId::new(4),
ColumnId::new(5),
- ],
+ ]
+ .into(),
});
// initial request
diff --git a/querier/src/parquet/creation.rs b/querier/src/parquet/creation.rs
index 9112176f38..65a65b574c 100644
--- a/querier/src/parquet/creation.rs
+++ b/querier/src/parquet/creation.rs
@@ -62,7 +62,7 @@ impl ChunkAdapter {
pub(crate) async fn new_chunks(
&self,
cached_table: Arc<CachedTable>,
- files: Arc<Vec<Arc<ParquetFile>>>,
+ files: Arc<[Arc<ParquetFile>]>,
predicate: &Predicate,
early_pruning_observer: MetricPruningObserver,
span: Option<Span>,
diff --git a/querier/src/parquet/mod.rs b/querier/src/parquet/mod.rs
index 8900f3da10..6663f3dd41 100644
--- a/querier/src/parquet/mod.rs
+++ b/querier/src/parquet/mod.rs
@@ -240,7 +240,7 @@ pub mod tests {
self.adapter
.new_chunks(
Arc::clone(cached_table),
- Arc::new(vec![Arc::clone(&self.parquet_file)]),
+ vec![Arc::clone(&self.parquet_file)].into(),
&Predicate::new(),
MetricPruningObserver::new_unregistered(),
None,
|
bfa476c08fedbd604dc843bd25c86e57b0406422
|
Dom Dwyer
|
2023-02-21 14:30:33
|
PartitionStream construction helper
|
Mocking out query responses requires constructing a PartitionResponse
containing the set of PartitionStream, itself a stream of RecordBatch.
This nested stream of structures is required to enable a pull-based /
streaming query response, but makes testing difficult because the types
are hard to initialise.
This commit adds a helper macro make_partition_stream! which when
combined with make_batch! to initialise the inner RecordBatch instances,
reduces the developer burden when writing test code that interacts with
query responses:
let stream = make_partition_stream!(
PartitionId::new(1) => [
make_batch!(
Int64Array("a" => vec![1, 2, 3, 4, 5]),
Float32Array("b" => vec![4.1, 4.2, 4.3, 4.4, 5.0]),
),
make_batch!(
Int64Array("c" => vec![1, 2, 3, 4, 5]),
),
],
PartitionId::new(2) => [
make_batch!(
Float32Array("d" => vec![1.1, 2.2, 3.3, 4.4, 5.5]),
),
],
);
The above yields a PartitionStream containing two partitions, with their
respective RecordBatch instances.
| null |
test: PartitionStream construction helper
Mocking out query responses requires constructing a PartitionResponse
containing the set of PartitionStream, itself a stream of RecordBatch.
This nested stream of structures is required to enable a pull-based /
streaming query response, but makes testing difficult because the types
are hard to initialise.
This commit adds a helper macro make_partition_stream! which when
combined with make_batch! to initialise the inner RecordBatch instances,
reduces the developer burden when writing test code that interacts with
query responses:
let stream = make_partition_stream!(
PartitionId::new(1) => [
make_batch!(
Int64Array("a" => vec![1, 2, 3, 4, 5]),
Float32Array("b" => vec![4.1, 4.2, 4.3, 4.4, 5.0]),
),
make_batch!(
Int64Array("c" => vec![1, 2, 3, 4, 5]),
),
],
PartitionId::new(2) => [
make_batch!(
Float32Array("d" => vec![1.1, 2.2, 3.3, 4.4, 5.5]),
),
],
);
The above yields a PartitionStream containing two partitions, with their
respective RecordBatch instances.
|
diff --git a/ingester2/src/test_util.rs b/ingester2/src/test_util.rs
index 2e932fd571..c2a15253e5 100644
--- a/ingester2/src/test_util.rs
+++ b/ingester2/src/test_util.rs
@@ -52,6 +52,65 @@ macro_rules! make_batch {(
}}
}
+/// Construct a [`PartitionStream`] from the given partitions & batches.
+///
+/// This example constructs a [`PartitionStream`] yielding two partitions
+/// (with IDs 1 & 2), the former containing two [`RecordBatch`] and the
+/// latter containing one.
+///
+/// See [`make_batch`] for a handy way to construct the [`RecordBatch`].
+///
+/// ```
+/// let stream = make_partition_stream!(
+/// PartitionId::new(1) => [
+/// make_batch!(
+/// Int64Array("a" => vec![1, 2, 3, 4, 5]),
+/// Float32Array("b" => vec![4.1, 4.2, 4.3, 4.4, 5.0]),
+/// ),
+/// make_batch!(
+/// Int64Array("c" => vec![1, 2, 3, 4, 5]),
+/// ),
+/// ],
+/// PartitionId::new(2) => [
+/// make_batch!(
+/// Float32Array("d" => vec![1.1, 2.2, 3.3, 4.4, 5.5]),
+/// ),
+/// ],
+/// );
+/// ```
+#[macro_export]
+macro_rules! make_partition_stream {
+ (
+ $(
+ $id:expr => [$($batch:expr,)+],
+ )+
+ ) => {{
+ use arrow::datatypes::Schema;
+ use datafusion::physical_plan::memory::MemoryStream;
+ use $crate::query::{response::PartitionStream, partition_response::PartitionResponse};
+ use futures::stream;
+
+ PartitionStream::new(stream::iter([
+ $({
+ let mut batches = vec![];
+ let mut schema = Schema::empty();
+ $(
+ let (batch, this_schema) = $batch;
+ batches.push(batch);
+ schema = Schema::try_merge([schema, (*this_schema).clone()]).expect("incompatible batch schemas");
+ )+
+
+ let batch = MemoryStream::try_new(batches, Arc::new(schema), None).unwrap();
+ PartitionResponse::new(
+ Some(Box::pin(batch)),
+ $id,
+ 42,
+ )
+ },)+
+ ]))
+ }};
+ }
+
/// Construct a [`DmlWrite`] with the specified parameters, for LP that contains
/// a single table identified by `table_id`.
///
|
6e13ff8cb8ae5644d65add024b10785ec2dfa1b4
|
Andrew Lamb
|
2023-08-02 09:58:16
|
Update DataFusion pin (#8390)
|
* chore: Update DataFusion pin
* chore: Update for API
* fix: update plans
---------
|
Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
|
chore: Update DataFusion pin (#8390)
* chore: Update DataFusion pin
* chore: Update for API
* fix: update plans
---------
Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
|
diff --git a/Cargo.lock b/Cargo.lock
index d3397de382..1bcac8c3d8 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -699,7 +699,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6798148dccfbff0fae41c7574d2fa8f1ef3492fba0face179de5d8d447d67b05"
dependencies = [
"memchr",
- "regex-automata 0.3.4",
+ "regex-automata 0.3.3",
"serde",
]
@@ -778,12 +778,11 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5"
[[package]]
name = "cc"
-version = "1.0.80"
+version = "1.0.79"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "51f1226cd9da55587234753d1245dd5b132343ea240f26b6a9003d68706141ba"
+checksum = "50d30906286121d95be3d479533b458f87493b30a4b5f79a607db8f5d11aa91f"
dependencies = [
"jobserver",
- "libc",
]
[[package]]
@@ -1376,7 +1375,7 @@ dependencies = [
[[package]]
name = "datafusion"
version = "28.0.0"
-source = "git+https://github.com/apache/arrow-datafusion.git?rev=2cf5f5b5bb824598de185d64c541c52c930728cf#2cf5f5b5bb824598de185d64c541c52c930728cf"
+source = "git+https://github.com/apache/arrow-datafusion.git?rev=5faa10b2911ecca4c2199f78ae675363c7d8230e#5faa10b2911ecca4c2199f78ae675363c7d8230e"
dependencies = [
"ahash",
"arrow",
@@ -1424,7 +1423,7 @@ dependencies = [
[[package]]
name = "datafusion-common"
version = "28.0.0"
-source = "git+https://github.com/apache/arrow-datafusion.git?rev=2cf5f5b5bb824598de185d64c541c52c930728cf#2cf5f5b5bb824598de185d64c541c52c930728cf"
+source = "git+https://github.com/apache/arrow-datafusion.git?rev=5faa10b2911ecca4c2199f78ae675363c7d8230e#5faa10b2911ecca4c2199f78ae675363c7d8230e"
dependencies = [
"arrow",
"arrow-array",
@@ -1438,7 +1437,7 @@ dependencies = [
[[package]]
name = "datafusion-execution"
version = "28.0.0"
-source = "git+https://github.com/apache/arrow-datafusion.git?rev=2cf5f5b5bb824598de185d64c541c52c930728cf#2cf5f5b5bb824598de185d64c541c52c930728cf"
+source = "git+https://github.com/apache/arrow-datafusion.git?rev=5faa10b2911ecca4c2199f78ae675363c7d8230e#5faa10b2911ecca4c2199f78ae675363c7d8230e"
dependencies = [
"dashmap",
"datafusion-common",
@@ -1455,7 +1454,7 @@ dependencies = [
[[package]]
name = "datafusion-expr"
version = "28.0.0"
-source = "git+https://github.com/apache/arrow-datafusion.git?rev=2cf5f5b5bb824598de185d64c541c52c930728cf#2cf5f5b5bb824598de185d64c541c52c930728cf"
+source = "git+https://github.com/apache/arrow-datafusion.git?rev=5faa10b2911ecca4c2199f78ae675363c7d8230e#5faa10b2911ecca4c2199f78ae675363c7d8230e"
dependencies = [
"ahash",
"arrow",
@@ -1469,7 +1468,7 @@ dependencies = [
[[package]]
name = "datafusion-optimizer"
version = "28.0.0"
-source = "git+https://github.com/apache/arrow-datafusion.git?rev=2cf5f5b5bb824598de185d64c541c52c930728cf#2cf5f5b5bb824598de185d64c541c52c930728cf"
+source = "git+https://github.com/apache/arrow-datafusion.git?rev=5faa10b2911ecca4c2199f78ae675363c7d8230e#5faa10b2911ecca4c2199f78ae675363c7d8230e"
dependencies = [
"arrow",
"async-trait",
@@ -1486,7 +1485,7 @@ dependencies = [
[[package]]
name = "datafusion-physical-expr"
version = "28.0.0"
-source = "git+https://github.com/apache/arrow-datafusion.git?rev=2cf5f5b5bb824598de185d64c541c52c930728cf#2cf5f5b5bb824598de185d64c541c52c930728cf"
+source = "git+https://github.com/apache/arrow-datafusion.git?rev=5faa10b2911ecca4c2199f78ae675363c7d8230e#5faa10b2911ecca4c2199f78ae675363c7d8230e"
dependencies = [
"ahash",
"arrow",
@@ -1520,7 +1519,7 @@ dependencies = [
[[package]]
name = "datafusion-proto"
version = "28.0.0"
-source = "git+https://github.com/apache/arrow-datafusion.git?rev=2cf5f5b5bb824598de185d64c541c52c930728cf#2cf5f5b5bb824598de185d64c541c52c930728cf"
+source = "git+https://github.com/apache/arrow-datafusion.git?rev=5faa10b2911ecca4c2199f78ae675363c7d8230e#5faa10b2911ecca4c2199f78ae675363c7d8230e"
dependencies = [
"arrow",
"chrono",
@@ -1534,7 +1533,7 @@ dependencies = [
[[package]]
name = "datafusion-sql"
version = "28.0.0"
-source = "git+https://github.com/apache/arrow-datafusion.git?rev=2cf5f5b5bb824598de185d64c541c52c930728cf#2cf5f5b5bb824598de185d64c541c52c930728cf"
+source = "git+https://github.com/apache/arrow-datafusion.git?rev=5faa10b2911ecca4c2199f78ae675363c7d8230e#5faa10b2911ecca4c2199f78ae675363c7d8230e"
dependencies = [
"arrow",
"arrow-schema",
@@ -4569,7 +4568,7 @@ checksum = "b2eae68fc220f7cf2532e4494aded17545fce192d59cd996e0fe7887f4ceb575"
dependencies = [
"aho-corasick",
"memchr",
- "regex-automata 0.3.4",
+ "regex-automata 0.3.3",
"regex-syntax 0.7.4",
]
@@ -4584,9 +4583,9 @@ dependencies = [
[[package]]
name = "regex-automata"
-version = "0.3.4"
+version = "0.3.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b7b6d6190b7594385f61bd3911cd1be99dfddcfc365a4160cc2ab5bff4aed294"
+checksum = "39354c10dd07468c2e73926b23bb9c2caca74c5501e38a35da70406f1d923310"
dependencies = [
"aho-corasick",
"memchr",
@@ -6866,7 +6865,7 @@ dependencies = [
"rand",
"rand_core",
"regex",
- "regex-automata 0.3.4",
+ "regex-automata 0.3.3",
"regex-syntax 0.7.4",
"reqwest",
"ring",
diff --git a/Cargo.toml b/Cargo.toml
index 93bd0fc959..5df1c74d88 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -121,8 +121,8 @@ license = "MIT OR Apache-2.0"
[workspace.dependencies]
arrow = { version = "43.0.0" }
arrow-flight = { version = "43.0.0" }
-datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev = "2cf5f5b5bb824598de185d64c541c52c930728cf", default-features = false }
-datafusion-proto = { git = "https://github.com/apache/arrow-datafusion.git", rev = "2cf5f5b5bb824598de185d64c541c52c930728cf" }
+datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev = "5faa10b2911ecca4c2199f78ae675363c7d8230e", default-features = false }
+datafusion-proto = { git = "https://github.com/apache/arrow-datafusion.git", rev = "5faa10b2911ecca4c2199f78ae675363c7d8230e" }
hashbrown = { version = "0.14.0" }
object_store = { version = "0.6.0" }
diff --git a/influxdb_iox/tests/query_tests/cases/in/aggregates.sql.expected b/influxdb_iox/tests/query_tests/cases/in/aggregates.sql.expected
index 746e4c6a74..5ab8fdd0d2 100644
--- a/influxdb_iox/tests/query_tests/cases/in/aggregates.sql.expected
+++ b/influxdb_iox/tests/query_tests/cases/in/aggregates.sql.expected
@@ -1,11 +1,11 @@
-- Test Setup: OneMeasurementWithTags
-- SQL: SELECT count(time), count(*), count(bar), min(bar), max(bar), min(time), max(time) FROM cpu;
-- Results After Sorting
-+-----------------+-----------------+----------------+--------------+--------------+--------------------------------+--------------------------------+
-| COUNT(cpu.time) | COUNT(UInt8(1)) | COUNT(cpu.bar) | MIN(cpu.bar) | MAX(cpu.bar) | MIN(cpu.time) | MAX(cpu.time) |
-+-----------------+-----------------+----------------+--------------+--------------+--------------------------------+--------------------------------+
-| 4 | 4 | 4 | 1.0 | 2.0 | 1970-01-01T00:00:00.000000010Z | 1970-01-01T00:00:00.000000040Z |
-+-----------------+-----------------+----------------+--------------+--------------+--------------------------------+--------------------------------+
++-----------------+----------+----------------+--------------+--------------+--------------------------------+--------------------------------+
+| COUNT(cpu.time) | COUNT(*) | COUNT(cpu.bar) | MIN(cpu.bar) | MAX(cpu.bar) | MIN(cpu.time) | MAX(cpu.time) |
++-----------------+----------+----------------+--------------+--------------+--------------------------------+--------------------------------+
+| 4 | 4 | 4 | 1.0 | 2.0 | 1970-01-01T00:00:00.000000010Z | 1970-01-01T00:00:00.000000040Z |
++-----------------+----------+----------------+--------------+--------------+--------------------------------+--------------------------------+
-- SQL: SELECT max(foo) FROM cpu;
-- Results After Sorting
+--------------+
diff --git a/influxdb_iox/tests/query_tests/cases/in/aggregates_with_nulls.sql.expected b/influxdb_iox/tests/query_tests/cases/in/aggregates_with_nulls.sql.expected
index dd753a62ac..8ad9aaee52 100644
--- a/influxdb_iox/tests/query_tests/cases/in/aggregates_with_nulls.sql.expected
+++ b/influxdb_iox/tests/query_tests/cases/in/aggregates_with_nulls.sql.expected
@@ -8,10 +8,10 @@
+-------------------------+
-- SQL: SELECT count(*), city FROM o2 GROUP BY city;
-- Results After Sorting
-+-----------------+--------+
-| COUNT(UInt8(1)) | city |
-+-----------------+--------+
-| 1 | Boston |
-| 2 | NYC |
-| 2 | |
-+-----------------+--------+
\ No newline at end of file
++----------+--------+
+| COUNT(*) | city |
++----------+--------+
+| 1 | Boston |
+| 2 | NYC |
+| 2 | |
++----------+--------+
\ No newline at end of file
diff --git a/influxdb_iox/tests/query_tests/cases/in/basic.sql.expected b/influxdb_iox/tests/query_tests/cases/in/basic.sql.expected
index 2d8917f2cb..d52920d496 100644
--- a/influxdb_iox/tests/query_tests/cases/in/basic.sql.expected
+++ b/influxdb_iox/tests/query_tests/cases/in/basic.sql.expected
@@ -46,11 +46,11 @@
| 21.0 | west |
+------+--------+
-- SQL: SELECT count(*) from cpu group by region;
-+-----------------+
-| COUNT(UInt8(1)) |
-+-----------------+
-| 2 |
-+-----------------+
++----------+
+| COUNT(*) |
++----------+
+| 2 |
++----------+
-- SQL: SELECT * from disk;
+-------+--------+--------------------------------+
| bytes | region | time |
diff --git a/influxdb_iox/tests/query_tests/cases/in/duplicates_ingester.sql.expected b/influxdb_iox/tests/query_tests/cases/in/duplicates_ingester.sql.expected
index dc9fd64026..30a89657d0 100644
--- a/influxdb_iox/tests/query_tests/cases/in/duplicates_ingester.sql.expected
+++ b/influxdb_iox/tests/query_tests/cases/in/duplicates_ingester.sql.expected
@@ -102,8 +102,8 @@
| | |
----------
-- SQL: select count(*) from h2o;
-+-----------------+
-| COUNT(UInt8(1)) |
-+-----------------+
-| 18 |
-+-----------------+
\ No newline at end of file
++----------+
+| COUNT(*) |
++----------+
+| 18 |
++----------+
\ No newline at end of file
diff --git a/influxdb_iox/tests/query_tests/cases/in/duplicates_parquet.sql.expected b/influxdb_iox/tests/query_tests/cases/in/duplicates_parquet.sql.expected
index b4d20b99fe..53cc5c3796 100644
--- a/influxdb_iox/tests/query_tests/cases/in/duplicates_parquet.sql.expected
+++ b/influxdb_iox/tests/query_tests/cases/in/duplicates_parquet.sql.expected
@@ -85,11 +85,11 @@
| | |
----------
-- SQL: select count(*) from h2o;
-+-----------------+
-| COUNT(UInt8(1)) |
-+-----------------+
-| 18 |
-+-----------------+
++----------+
+| COUNT(*) |
++----------+
+| 18 |
++----------+
-- SQL: EXPLAIN ANALYZE SELECT * from h2o where state = 'MA'
-- Results After Normalizing UUIDs
-- Results After Normalizing Metrics
diff --git a/influxdb_iox/tests/query_tests/cases/in/duplicates_parquet_20.sql.expected b/influxdb_iox/tests/query_tests/cases/in/duplicates_parquet_20.sql.expected
index 2e648981d8..3312e31515 100644
--- a/influxdb_iox/tests/query_tests/cases/in/duplicates_parquet_20.sql.expected
+++ b/influxdb_iox/tests/query_tests/cases/in/duplicates_parquet_20.sql.expected
@@ -1,20 +1,20 @@
-- Test Setup: TwentySortedParquetFiles
-- SQL: select count(*), sum(f) from m;
-+-----------------+----------+
-| COUNT(UInt8(1)) | SUM(m.f) |
-+-----------------+----------+
-| 21 | 33.0 |
-+-----------------+----------+
++----------+----------+
+| COUNT(*) | SUM(m.f) |
++----------+----------+
+| 21 | 33.0 |
++----------+----------+
-- SQL: EXPLAIN select count(*), sum(f) from m;
-- Results After Normalizing UUIDs
----------
| plan_type | plan |
----------
-| logical_plan | Aggregate: groupBy=[[]], aggr=[[COUNT(UInt8(1)), SUM(m.f)]] |
+| logical_plan | Aggregate: groupBy=[[]], aggr=[[COUNT(UInt8(1)) AS COUNT(*), SUM(m.f)]] |
| | TableScan: m projection=[f] |
-| physical_plan | AggregateExec: mode=Final, gby=[], aggr=[COUNT(UInt8(1)), SUM(m.f)] |
+| physical_plan | AggregateExec: mode=Final, gby=[], aggr=[COUNT(*), SUM(m.f)] |
| | CoalescePartitionsExec |
-| | AggregateExec: mode=Partial, gby=[], aggr=[COUNT(UInt8(1)), SUM(m.f)] |
+| | AggregateExec: mode=Partial, gby=[], aggr=[COUNT(*), SUM(m.f)] |
| | UnionExec |
| | ParquetExec: file_groups={4 groups: [[1/1/1/00000000-0000-0000-0000-000000000000.parquet, 1/1/1/00000000-0000-0000-0000-000000000001.parquet, 1/1/1/00000000-0000-0000-0000-000000000002.parquet], [1/1/1/00000000-0000-0000-0000-000000000003.parquet, 1/1/1/00000000-0000-0000-0000-000000000004.parquet, 1/1/1/00000000-0000-0000-0000-000000000005.parquet], [1/1/1/00000000-0000-0000-0000-000000000006.parquet, 1/1/1/00000000-0000-0000-0000-000000000007.parquet], [1/1/1/00000000-0000-0000-0000-000000000008.parquet, 1/1/1/00000000-0000-0000-0000-000000000009.parquet]]}, projection=[f] |
| | ProjectionExec: expr=[f@1 as f] |
diff --git a/influxdb_iox/tests/query_tests/cases/in/duplicates_parquet_20_and_ingester.sql.expected b/influxdb_iox/tests/query_tests/cases/in/duplicates_parquet_20_and_ingester.sql.expected
index a6a4ef65da..c82ace4f82 100644
--- a/influxdb_iox/tests/query_tests/cases/in/duplicates_parquet_20_and_ingester.sql.expected
+++ b/influxdb_iox/tests/query_tests/cases/in/duplicates_parquet_20_and_ingester.sql.expected
@@ -1,20 +1,20 @@
-- Test Setup: TwentySortedParquetFilesAndIngester
-- SQL: select count(*), sum(f) from m;
-+-----------------+----------+
-| COUNT(UInt8(1)) | SUM(m.f) |
-+-----------------+----------+
-| 21 | 33.0 |
-+-----------------+----------+
++----------+----------+
+| COUNT(*) | SUM(m.f) |
++----------+----------+
+| 21 | 33.0 |
++----------+----------+
-- SQL: EXPLAIN select count(*), sum(f) from m;
-- Results After Normalizing UUIDs
----------
| plan_type | plan |
----------
-| logical_plan | Aggregate: groupBy=[[]], aggr=[[COUNT(UInt8(1)), SUM(m.f)]] |
+| logical_plan | Aggregate: groupBy=[[]], aggr=[[COUNT(UInt8(1)) AS COUNT(*), SUM(m.f)]] |
| | TableScan: m projection=[f] |
-| physical_plan | AggregateExec: mode=Final, gby=[], aggr=[COUNT(UInt8(1)), SUM(m.f)] |
+| physical_plan | AggregateExec: mode=Final, gby=[], aggr=[COUNT(*), SUM(m.f)] |
| | CoalescePartitionsExec |
-| | AggregateExec: mode=Partial, gby=[], aggr=[COUNT(UInt8(1)), SUM(m.f)] |
+| | AggregateExec: mode=Partial, gby=[], aggr=[COUNT(*), SUM(m.f)] |
| | UnionExec |
| | ParquetExec: file_groups={4 groups: [[1/1/1/00000000-0000-0000-0000-000000000000.parquet, 1/1/1/00000000-0000-0000-0000-000000000001.parquet, 1/1/1/00000000-0000-0000-0000-000000000002.parquet], [1/1/1/00000000-0000-0000-0000-000000000003.parquet, 1/1/1/00000000-0000-0000-0000-000000000004.parquet, 1/1/1/00000000-0000-0000-0000-000000000005.parquet], [1/1/1/00000000-0000-0000-0000-000000000006.parquet, 1/1/1/00000000-0000-0000-0000-000000000007.parquet], [1/1/1/00000000-0000-0000-0000-000000000008.parquet, 1/1/1/00000000-0000-0000-0000-000000000009.parquet]]}, projection=[f] |
| | ProjectionExec: expr=[f@1 as f] |
diff --git a/influxdb_iox/tests/query_tests/cases/in/gapfill.sql.expected b/influxdb_iox/tests/query_tests/cases/in/gapfill.sql.expected
index f540b17fdf..046b6d6709 100644
--- a/influxdb_iox/tests/query_tests/cases/in/gapfill.sql.expected
+++ b/influxdb_iox/tests/query_tests/cases/in/gapfill.sql.expected
@@ -183,29 +183,29 @@ Error during planning: gap-filling query is missing lower time bound
| 2000-05-05T12:40:00Z | 60.0 |
+----------------------+---------------------+
-- SQL: SELECT date_bin_gapfill(interval '4 minutes', time) as four_minute, interpolate(min(cpu.idle)), interpolate(min(cpu."user")), count(*) from cpu where time between timestamp '2000-05-05T12:19:00Z' and timestamp '2000-05-05T12:40:00Z' group by four_minute;
-+----------------------+----------------------------+----------------------------+-----------------+
-| four_minute | interpolate(MIN(cpu.idle)) | interpolate(MIN(cpu.user)) | COUNT(UInt8(1)) |
-+----------------------+----------------------------+----------------------------+-----------------+
-| 2000-05-05T12:16:00Z | | | |
-| 2000-05-05T12:20:00Z | 70.0 | 23.2 | 1 |
-| 2000-05-05T12:24:00Z | 67.5 | 24.2 | |
-| 2000-05-05T12:28:00Z | 65.0 | 25.2 | 1 |
-| 2000-05-05T12:32:00Z | 62.5 | 27.05 | |
-| 2000-05-05T12:36:00Z | 60.0 | 28.9 | 1 |
-| 2000-05-05T12:40:00Z | | 21.0 | 1 |
-+----------------------+----------------------------+----------------------------+-----------------+
++----------------------+----------------------------+----------------------------+----------+
+| four_minute | interpolate(MIN(cpu.idle)) | interpolate(MIN(cpu.user)) | COUNT(*) |
++----------------------+----------------------------+----------------------------+----------+
+| 2000-05-05T12:16:00Z | | | |
+| 2000-05-05T12:20:00Z | 70.0 | 23.2 | 1 |
+| 2000-05-05T12:24:00Z | 67.5 | 24.2 | |
+| 2000-05-05T12:28:00Z | 65.0 | 25.2 | 1 |
+| 2000-05-05T12:32:00Z | 62.5 | 27.05 | |
+| 2000-05-05T12:36:00Z | 60.0 | 28.9 | 1 |
+| 2000-05-05T12:40:00Z | | 21.0 | 1 |
++----------------------+----------------------------+----------------------------+----------+
-- SQL: SELECT date_bin_gapfill(interval '4 minutes 1 nanosecond', time, timestamp '2000-05-05T12:15:59.999999999') as four_minute, interpolate(min(cpu.idle)), interpolate(min(cpu."user")), count(*) from cpu where time between timestamp '2000-05-05T12:19:00Z' and timestamp '2000-05-05T12:44:00Z' group by four_minute;
-+--------------------------------+----------------------------+----------------------------+-----------------+
-| four_minute | interpolate(MIN(cpu.idle)) | interpolate(MIN(cpu.user)) | COUNT(UInt8(1)) |
-+--------------------------------+----------------------------+----------------------------+-----------------+
-| 2000-05-05T12:15:59.999999999Z | | | |
-| 2000-05-05T12:20:00Z | 70.0 | 23.2 | 1 |
-| 2000-05-05T12:24:00.000000001Z | 67.5 | 24.2 | |
-| 2000-05-05T12:28:00.000000002Z | 65.0 | 25.2 | 1 |
-| 2000-05-05T12:32:00.000000003Z | 62.5 | 23.1 | |
-| 2000-05-05T12:36:00.000000004Z | 60.0 | 21.0 | 2 |
-| 2000-05-05T12:40:00.000000005Z | | | |
-+--------------------------------+----------------------------+----------------------------+-----------------+
++--------------------------------+----------------------------+----------------------------+----------+
+| four_minute | interpolate(MIN(cpu.idle)) | interpolate(MIN(cpu.user)) | COUNT(*) |
++--------------------------------+----------------------------+----------------------------+----------+
+| 2000-05-05T12:15:59.999999999Z | | | |
+| 2000-05-05T12:20:00Z | 70.0 | 23.2 | 1 |
+| 2000-05-05T12:24:00.000000001Z | 67.5 | 24.2 | |
+| 2000-05-05T12:28:00.000000002Z | 65.0 | 25.2 | 1 |
+| 2000-05-05T12:32:00.000000003Z | 62.5 | 23.1 | |
+| 2000-05-05T12:36:00.000000004Z | 60.0 | 21.0 | 2 |
+| 2000-05-05T12:40:00.000000005Z | | | |
++--------------------------------+----------------------------+----------------------------+----------+
-- SQL: SELECT region, date_bin_gapfill('10 minute', time) as minute, locf(avg(cpu.user)) as locf_avg_user from cpu where time between timestamp '2000-05-05T12:00:00Z' and timestamp '2000-05-05T12:59:00Z' group by region, minute;
+--------+----------------------+--------------------+
| region | minute | locf_avg_user |
diff --git a/predicate/src/rpc_predicate/rewrite.rs b/predicate/src/rpc_predicate/rewrite.rs
index 463732b7b5..08fd660e4c 100644
--- a/predicate/src/rpc_predicate/rewrite.rs
+++ b/predicate/src/rpc_predicate/rewrite.rs
@@ -159,6 +159,9 @@ fn is_comparison(op: Operator) -> bool {
Operator::RegexNotMatch => true,
Operator::RegexNotIMatch => true,
Operator::StringConcat => false,
+ // array containment operators
+ Operator::ArrowAt => true,
+ Operator::AtArrow => true,
}
}
diff --git a/workspace-hack/Cargo.toml b/workspace-hack/Cargo.toml
index cf612bea13..c1681a05f8 100644
--- a/workspace-hack/Cargo.toml
+++ b/workspace-hack/Cargo.toml
@@ -28,9 +28,9 @@ bytes = { version = "1" }
chrono = { version = "0.4", default-features = false, features = ["alloc", "clock", "serde"] }
crossbeam-utils = { version = "0.8" }
crypto-common = { version = "0.1", default-features = false, features = ["std"] }
-datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev = "2cf5f5b5bb824598de185d64c541c52c930728cf" }
-datafusion-optimizer = { git = "https://github.com/apache/arrow-datafusion.git", rev = "2cf5f5b5bb824598de185d64c541c52c930728cf", default-features = false, features = ["crypto_expressions", "regex_expressions", "unicode_expressions"] }
-datafusion-physical-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "2cf5f5b5bb824598de185d64c541c52c930728cf", default-features = false, features = ["crypto_expressions", "encoding_expressions", "regex_expressions", "unicode_expressions"] }
+datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev = "5faa10b2911ecca4c2199f78ae675363c7d8230e" }
+datafusion-optimizer = { git = "https://github.com/apache/arrow-datafusion.git", rev = "5faa10b2911ecca4c2199f78ae675363c7d8230e", default-features = false, features = ["crypto_expressions", "regex_expressions", "unicode_expressions"] }
+datafusion-physical-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "5faa10b2911ecca4c2199f78ae675363c7d8230e", default-features = false, features = ["crypto_expressions", "encoding_expressions", "regex_expressions", "unicode_expressions"] }
digest = { version = "0.10", features = ["mac", "std"] }
either = { version = "1", features = ["serde"] }
fixedbitset = { version = "0.4" }
|
db5ad12b9a3741c8da4faa3743968c915994217c
|
Dom Dwyer
|
2023-09-13 14:05:59
|
remove misleading documentation
|
In an ArcMap, an init() function is called exactly once, this sentence
was supposed to suggest threads race to call init, but instead it sounds
like they race to initialise a V (via init()) and put it in the map
before the other thread, which is incorrect.
| null |
docs: remove misleading documentation
In an ArcMap, an init() function is called exactly once, this sentence
was supposed to suggest threads race to call init, but instead it sounds
like they race to initialise a V (via init()) and put it in the map
before the other thread, which is incorrect.
|
diff --git a/ingester/src/arcmap.rs b/ingester/src/arcmap.rs
index 509c314179..fed9cbb7a6 100644
--- a/ingester/src/arcmap.rs
+++ b/ingester/src/arcmap.rs
@@ -62,10 +62,9 @@ where
/// This call is thread-safe - if two calls race, a value will be
/// initialised exactly once (one arbitrary caller's `init` closure will be
/// executed) and both callers will obtain a handle to the same instance of
- /// `V`. Both threads will eagerly initialise V and race to "win" storing V
- /// in the map.
+ /// `V`.
///
- /// # Performance
+ /// # Performance
///
/// This method is biased towards read-heavy workloads, with many readers
/// progressing in parallel. If the value for `key` must be initialised, all
|
eb5a661ab3a8786b912eddad52506e6397c0286b
|
Marco Neumann
|
2022-10-19 11:54:42
|
prep work for #5897 (#5907)
|
* refactor: add ID to `ParquetStorage`
* refactor: remove duplicate code
* refactor: use dedicated `StorageId`
| null |
refactor: prep work for #5897 (#5907)
* refactor: add ID to `ParquetStorage`
* refactor: remove duplicate code
* refactor: use dedicated `StorageId`
|
diff --git a/Cargo.lock b/Cargo.lock
index 983b1fa688..23d1518988 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -2524,6 +2524,7 @@ dependencies = [
"observability_deps",
"once_cell",
"parquet_file",
+ "predicate",
"schema",
"sharder",
"uuid",
diff --git a/compactor/src/cold.rs b/compactor/src/cold.rs
index 57323df6a8..83f0dd74f0 100644
--- a/compactor/src/cold.rs
+++ b/compactor/src/cold.rs
@@ -110,6 +110,7 @@ mod tests {
use iox_query::exec::Executor;
use iox_tests::util::{TestCatalog, TestParquetFileBuilder, TestTable};
use iox_time::{SystemProvider, TimeProvider};
+ use parquet_file::storage::StorageId;
use std::collections::HashMap;
#[tokio::test]
@@ -178,7 +179,7 @@ mod tests {
let compactor = Compactor::new(
vec![shard.shard.id],
Arc::clone(&catalog.catalog),
- ParquetStorage::new(Arc::clone(&catalog.object_store)),
+ ParquetStorage::new(Arc::clone(&catalog.object_store), StorageId::from("iox")),
Arc::new(Executor::new(1)),
Arc::new(SystemProvider::new()),
BackoffConfig::default(),
@@ -417,7 +418,7 @@ mod tests {
let compactor = Compactor::new(
vec![shard.shard.id],
Arc::clone(&catalog.catalog),
- ParquetStorage::new(Arc::clone(&catalog.object_store)),
+ ParquetStorage::new(Arc::clone(&catalog.object_store), StorageId::from("iox")),
Arc::new(Executor::new(1)),
Arc::new(SystemProvider::new()),
BackoffConfig::default(),
@@ -642,7 +643,7 @@ mod tests {
let compactor = Arc::new(Compactor::new(
vec![shard.shard.id],
Arc::clone(&catalog.catalog),
- ParquetStorage::new(Arc::clone(&catalog.object_store)),
+ ParquetStorage::new(Arc::clone(&catalog.object_store), StorageId::from("iox")),
Arc::new(Executor::new(1)),
Arc::new(SystemProvider::new()),
BackoffConfig::default(),
@@ -916,7 +917,7 @@ mod tests {
let compactor = Arc::new(Compactor::new(
vec![shard.shard.id],
Arc::clone(&catalog.catalog),
- ParquetStorage::new(Arc::clone(&catalog.object_store)),
+ ParquetStorage::new(Arc::clone(&catalog.object_store), StorageId::from("iox")),
Arc::new(Executor::new(1)),
Arc::new(SystemProvider::new()),
BackoffConfig::default(),
@@ -1021,7 +1022,7 @@ mod tests {
let compactor = Arc::new(Compactor::new(
vec![shard.shard.id],
Arc::clone(&catalog.catalog),
- ParquetStorage::new(Arc::clone(&catalog.object_store)),
+ ParquetStorage::new(Arc::clone(&catalog.object_store), StorageId::from("iox")),
Arc::new(Executor::new(1)),
Arc::new(SystemProvider::new()),
BackoffConfig::default(),
@@ -1161,7 +1162,7 @@ mod tests {
let compactor = Arc::new(Compactor::new(
vec![shard.shard.id],
Arc::clone(&catalog.catalog),
- ParquetStorage::new(Arc::clone(&catalog.object_store)),
+ ParquetStorage::new(Arc::clone(&catalog.object_store), StorageId::from("iox")),
Arc::new(Executor::new(1)),
Arc::new(SystemProvider::new()),
BackoffConfig::default(),
@@ -1407,7 +1408,7 @@ mod tests {
let compactor = Arc::new(Compactor::new(
vec![shard.shard.id],
Arc::clone(&catalog.catalog),
- ParquetStorage::new(Arc::clone(&catalog.object_store)),
+ ParquetStorage::new(Arc::clone(&catalog.object_store), StorageId::from("iox")),
Arc::new(Executor::new(1)),
Arc::new(SystemProvider::new()),
BackoffConfig::default(),
diff --git a/compactor/src/compact.rs b/compactor/src/compact.rs
index b389b05e8f..e5681d6cd3 100644
--- a/compactor/src/compact.rs
+++ b/compactor/src/compact.rs
@@ -558,6 +558,7 @@ pub mod tests {
};
use iox_tests::util::{TestCatalog, TestPartition};
use iox_time::SystemProvider;
+ use parquet_file::storage::StorageId;
use uuid::Uuid;
impl PartitionCompactionCandidateWithInfo {
@@ -814,7 +815,7 @@ pub mod tests {
let compactor = Compactor::new(
vec![shard.id, another_shard.id],
Arc::clone(&catalog.catalog),
- ParquetStorage::new(Arc::clone(&catalog.object_store)),
+ ParquetStorage::new(Arc::clone(&catalog.object_store), StorageId::from("iox")),
Arc::new(Executor::new(1)),
time_provider,
BackoffConfig::default(),
diff --git a/compactor/src/hot.rs b/compactor/src/hot.rs
index d6528f575e..186bb68084 100644
--- a/compactor/src/hot.rs
+++ b/compactor/src/hot.rs
@@ -211,7 +211,7 @@ mod tests {
use data_types::CompactionLevel;
use iox_query::exec::Executor;
use iox_tests::util::{TestCatalog, TestParquetFileBuilder, TestShard, TestTable};
- use parquet_file::storage::ParquetStorage;
+ use parquet_file::storage::{ParquetStorage, StorageId};
use std::sync::Arc;
struct TestSetup {
@@ -499,7 +499,7 @@ mod tests {
let compactor = Arc::new(Compactor::new(
vec![shard1.shard.id, shard2.shard.id],
Arc::clone(&catalog.catalog),
- ParquetStorage::new(Arc::clone(&catalog.object_store)),
+ ParquetStorage::new(Arc::clone(&catalog.object_store), StorageId::from("iox")),
Arc::new(Executor::new(1)),
time_provider,
BackoffConfig::default(),
diff --git a/compactor/src/lib.rs b/compactor/src/lib.rs
index 71e03be520..c57ba0edee 100644
--- a/compactor/src/lib.rs
+++ b/compactor/src/lib.rs
@@ -436,7 +436,7 @@ pub mod tests {
compact::Compactor, compact_one_partition, handler::CompactorConfig,
parquet_file_filtering, parquet_file_lookup, ParquetFilesForCompaction,
};
- use ::parquet_file::storage::ParquetStorage;
+ use ::parquet_file::storage::{ParquetStorage, StorageId};
use arrow_util::assert_batches_sorted_eq;
use backoff::BackoffConfig;
use data_types::{ColumnType, CompactionLevel, ParquetFileId};
@@ -599,7 +599,7 @@ pub mod tests {
let compactor = Arc::new(Compactor::new(
vec![shard.shard.id],
Arc::clone(&catalog.catalog),
- ParquetStorage::new(Arc::clone(&catalog.object_store)),
+ ParquetStorage::new(Arc::clone(&catalog.object_store), StorageId::from("iox")),
Arc::new(Executor::new(1)),
time_provider,
BackoffConfig::default(),
@@ -927,7 +927,7 @@ pub mod tests {
let compactor = Arc::new(Compactor::new(
vec![shard.shard.id],
Arc::clone(&catalog.catalog),
- ParquetStorage::new(Arc::clone(&catalog.object_store)),
+ ParquetStorage::new(Arc::clone(&catalog.object_store), StorageId::from("iox")),
Arc::new(Executor::new(1)),
Arc::new(SystemProvider::new()),
BackoffConfig::default(),
diff --git a/compactor/src/parquet_file_combining.rs b/compactor/src/parquet_file_combining.rs
index 34e0dad011..df20acf158 100644
--- a/compactor/src/parquet_file_combining.rs
+++ b/compactor/src/parquet_file_combining.rs
@@ -659,12 +659,11 @@ mod tests {
use crate::parquet_file::CompactorParquetFile;
use super::*;
- use arrow::record_batch::RecordBatch;
use arrow_util::assert_batches_sorted_eq;
use data_types::{ColumnType, PartitionParam, ShardId};
use iox_tests::util::{TestCatalog, TestParquetFileBuilder, TestTable};
use metric::U64HistogramOptions;
- use parquet_file::ParquetFilePath;
+ use parquet_file::storage::StorageId;
use test_helpers::assert_error;
#[test]
@@ -852,7 +851,7 @@ mod tests {
files,
candidate_partition,
Arc::clone(&catalog.catalog),
- ParquetStorage::new(Arc::clone(&catalog.object_store)),
+ ParquetStorage::new(Arc::clone(&catalog.object_store), StorageId::from("iox")),
Arc::clone(&catalog.exec),
Arc::clone(&catalog.time_provider) as Arc<dyn TimeProvider>,
&compaction_input_file_bytes,
@@ -893,7 +892,7 @@ mod tests {
vec![parquet_file],
candidate_partition,
Arc::clone(&catalog.catalog),
- ParquetStorage::new(Arc::clone(&catalog.object_store)),
+ ParquetStorage::new(Arc::clone(&catalog.object_store), StorageId::from("iox")),
Arc::clone(&catalog.exec),
Arc::clone(&catalog.time_provider) as Arc<dyn TimeProvider>,
&compaction_input_file_bytes,
@@ -955,7 +954,7 @@ mod tests {
parquet_files.into_iter().take(4).collect(),
candidate_partition,
Arc::clone(&catalog.catalog),
- ParquetStorage::new(Arc::clone(&catalog.object_store)),
+ ParquetStorage::new(Arc::clone(&catalog.object_store), StorageId::from("iox")),
Arc::clone(&catalog.exec),
Arc::clone(&catalog.time_provider) as Arc<dyn TimeProvider>,
&compaction_input_file_bytes,
@@ -1003,7 +1002,7 @@ mod tests {
// Compacted file
let file1 = files.pop().unwrap();
- let batches = read_parquet_file(&table, file1).await;
+ let batches = table.read_parquet_file(file1).await;
assert_batches_sorted_eq!(
&[
"+-----------+------+------+------+-----------------------------+",
@@ -1040,7 +1039,7 @@ mod tests {
parquet_files.into_iter().take(5).collect(),
candidate_partition,
Arc::clone(&catalog.catalog),
- ParquetStorage::new(Arc::clone(&catalog.object_store)),
+ ParquetStorage::new(Arc::clone(&catalog.object_store), StorageId::from("iox")),
Arc::clone(&catalog.exec),
Arc::clone(&catalog.time_provider) as Arc<dyn TimeProvider>,
&compaction_input_file_bytes,
@@ -1087,7 +1086,7 @@ mod tests {
// Compacted file with the later data
let file1 = files.pop().unwrap();
- let batches = read_parquet_file(&table, file1).await;
+ let batches = table.read_parquet_file(file1).await;
assert_batches_sorted_eq!(
&[
"+-----------+------+------+------+-----------------------------+",
@@ -1102,7 +1101,7 @@ mod tests {
// Compacted file with the earlier data
let file0 = files.pop().unwrap();
- let batches = read_parquet_file(&table, file0).await;
+ let batches = table.read_parquet_file(file0).await;
assert_batches_sorted_eq!(
&[
"+-----------+------+------+------+-----------------------------+",
@@ -1143,7 +1142,7 @@ mod tests {
files_to_compact,
candidate_partition,
Arc::clone(&catalog.catalog),
- ParquetStorage::new(Arc::clone(&catalog.object_store)),
+ ParquetStorage::new(Arc::clone(&catalog.object_store), StorageId::from("iox")),
Arc::clone(&catalog.exec),
Arc::clone(&catalog.time_provider) as Arc<dyn TimeProvider>,
&compaction_input_file_bytes,
@@ -1190,7 +1189,7 @@ mod tests {
// Compacted file with all the data
let file1 = files.pop().unwrap();
- let batches = read_parquet_file(&table, file1).await;
+ let batches = table.read_parquet_file(file1).await;
assert_batches_sorted_eq!(
&[
"+-----------+------+------+------+-----------------------------+",
@@ -1227,7 +1226,7 @@ mod tests {
parquet_files,
candidate_partition,
Arc::clone(&catalog.catalog),
- ParquetStorage::new(Arc::clone(&catalog.object_store)),
+ ParquetStorage::new(Arc::clone(&catalog.object_store), StorageId::from("iox")),
Arc::clone(&catalog.exec),
Arc::clone(&catalog.time_provider) as Arc<dyn TimeProvider>,
&compaction_input_file_bytes,
@@ -1272,7 +1271,7 @@ mod tests {
// Compacted file with the latest data
let file2 = files.pop().unwrap();
- let batches = read_parquet_file(&table, file2).await;
+ let batches = table.read_parquet_file(file2).await;
assert_batches_sorted_eq!(
&[
"+-----------+------+------+------+-----------------------------+",
@@ -1286,7 +1285,7 @@ mod tests {
// Compacted file with the later data
let file1 = files.pop().unwrap();
- let batches = read_parquet_file(&table, file1).await;
+ let batches = table.read_parquet_file(file1).await;
assert_batches_sorted_eq!(
&[
"+-----------+------+------+------+-----------------------------+",
@@ -1300,7 +1299,7 @@ mod tests {
// Compacted file with the earlier data
let file0 = files.pop().unwrap();
- let batches = read_parquet_file(&table, file0).await;
+ let batches = table.read_parquet_file(file0).await;
assert_batches_sorted_eq!(
&[
"+-----------+------+------+------+-----------------------------+",
@@ -1343,7 +1342,7 @@ mod tests {
level_1_files,
candidate_partition,
Arc::clone(&catalog.catalog),
- ParquetStorage::new(Arc::clone(&catalog.object_store)),
+ ParquetStorage::new(Arc::clone(&catalog.object_store), StorageId::from("iox")),
Arc::clone(&catalog.exec),
Arc::clone(&catalog.time_provider) as Arc<dyn TimeProvider>,
&compaction_input_file_bytes,
@@ -1384,7 +1383,7 @@ mod tests {
// ------------------------------------------------
// Verify the parquet file content
- let batches = read_parquet_file(&table, file).await;
+ let batches = table.read_parquet_file(file).await;
assert_batches_sorted_eq!(
&[
"+-----------+------+------+------+-----------------------------+",
@@ -1400,29 +1399,6 @@ mod tests {
);
}
- async fn read_parquet_file(table: &Arc<TestTable>, file: ParquetFile) -> Vec<RecordBatch> {
- let storage = ParquetStorage::new(table.catalog.object_store());
-
- // get schema
- let table_catalog_schema = table.catalog_schema().await;
- let column_id_lookup = table_catalog_schema.column_id_map();
- let table_schema = table.schema().await;
- let selection: Vec<_> = file
- .column_set
- .iter()
- .map(|id| *column_id_lookup.get(id).unwrap())
- .collect();
- let schema = table_schema.select_by_names(&selection).unwrap();
-
- let path: ParquetFilePath = (&file).into();
- let rx = storage
- .read_all(schema.as_arrow(), &path, file.file_size_bytes as usize)
- .unwrap();
- datafusion::physical_plan::common::collect(rx)
- .await
- .unwrap()
- }
-
#[derive(Debug, PartialEq)]
struct ExtractedByteMetrics {
sample_count: u64,
diff --git a/compactor/src/query.rs b/compactor/src/query.rs
index 20a8d068cc..f94937308e 100644
--- a/compactor/src/query.rs
+++ b/compactor/src/query.rs
@@ -283,7 +283,7 @@ mod tests {
use super::*;
use data_types::ColumnType;
use iox_tests::util::{TestCatalog, TestParquetFileBuilder};
- use parquet_file::storage::ParquetStorage;
+ use parquet_file::storage::{ParquetStorage, StorageId};
async fn test_setup(
compaction_level: CompactionLevel,
@@ -314,7 +314,7 @@ mod tests {
let parquet_chunk = Arc::new(ParquetChunk::new(
Arc::clone(&parquet_file),
Arc::new(table.schema().await),
- ParquetStorage::new(Arc::clone(&catalog.object_store)),
+ ParquetStorage::new(Arc::clone(&catalog.object_store), StorageId::from("iox")),
));
QueryableParquetChunk::new(
diff --git a/ingester/src/data.rs b/ingester/src/data.rs
index c663bae41f..0f0270d910 100644
--- a/ingester/src/data.rs
+++ b/ingester/src/data.rs
@@ -15,7 +15,10 @@ use iox_time::{SystemProvider, TimeProvider};
use metric::{Attributes, Metric, U64Histogram, U64HistogramOptions};
use object_store::DynObjectStore;
use observability_deps::tracing::*;
-use parquet_file::{metadata::IoxMetadata, storage::ParquetStorage};
+use parquet_file::{
+ metadata::IoxMetadata,
+ storage::{ParquetStorage, StorageId},
+};
use snafu::{OptionExt, Snafu};
use write_summary::ShardProgress;
@@ -136,7 +139,7 @@ impl IngesterData {
.collect();
Self {
- store: ParquetStorage::new(object_store),
+ store: ParquetStorage::new(object_store, StorageId::from("iox")),
catalog,
shards,
exec,
diff --git a/iox_tests/Cargo.toml b/iox_tests/Cargo.toml
index 4a188e1a65..bc27bfa630 100644
--- a/iox_tests/Cargo.toml
+++ b/iox_tests/Cargo.toml
@@ -18,6 +18,7 @@ object_store = "0.5.1"
observability_deps = { path = "../observability_deps" }
once_cell = { version = "1.15.0", features = ["parking_lot"] }
parquet_file = { path = "../parquet_file" }
+predicate = { path = "../predicate" }
iox_query = { path = "../iox_query" }
schema = { path = "../schema" }
sharder = { path = "../sharder" }
diff --git a/iox_tests/src/util.rs b/iox_tests/src/util.rs
index cb3dae6614..160dcca498 100644
--- a/iox_tests/src/util.rs
+++ b/iox_tests/src/util.rs
@@ -20,7 +20,12 @@ use mutable_batch_lp::test_helpers::lp_to_mutable_batch;
use object_store::{memory::InMemory, DynObjectStore};
use observability_deps::tracing::debug;
use once_cell::sync::Lazy;
-use parquet_file::{metadata::IoxMetadata, storage::ParquetStorage, ParquetFilePath};
+use parquet_file::{
+ chunk::ParquetChunk,
+ metadata::IoxMetadata,
+ storage::{ParquetStorage, StorageId},
+};
+use predicate::Predicate;
use schema::{
selection::Selection,
sort::{adjust_sort_key_columns, compute_sort_key, SortKey},
@@ -347,7 +352,7 @@ impl TestTable {
/// Read the record batches from the specified Parquet File associated with this table.
pub async fn read_parquet_file(&self, file: ParquetFile) -> Vec<RecordBatch> {
- let storage = ParquetStorage::new(self.catalog.object_store());
+ let storage = ParquetStorage::new(self.catalog.object_store(), StorageId::from("iox"));
// get schema
let table_catalog_schema = self.catalog_schema().await;
@@ -360,9 +365,9 @@ impl TestTable {
.collect();
let schema = table_schema.select_by_names(&selection).unwrap();
- let path: ParquetFilePath = (&file).into();
- let rx = storage
- .read_all(schema.as_arrow(), &path, file.file_size_bytes as usize)
+ let chunk = ParquetChunk::new(Arc::new(file), Arc::new(schema), storage);
+ let rx = chunk
+ .read_filter(&Predicate::default(), Selection::All)
.unwrap();
datafusion::physical_plan::common::collect(rx)
.await
@@ -560,7 +565,10 @@ impl TestPartition {
sort_key: Some(sort_key.clone()),
};
let real_file_size_bytes = create_parquet_file(
- ParquetStorage::new(Arc::clone(&self.catalog.object_store)),
+ ParquetStorage::new(
+ Arc::clone(&self.catalog.object_store),
+ StorageId::from("iox"),
+ ),
&metadata,
record_batch.clone(),
)
diff --git a/ioxd_compactor/src/lib.rs b/ioxd_compactor/src/lib.rs
index 17b2fb7256..0fa1941449 100644
--- a/ioxd_compactor/src/lib.rs
+++ b/ioxd_compactor/src/lib.rs
@@ -19,7 +19,7 @@ use ioxd_common::{
};
use metric::Registry;
use object_store::DynObjectStore;
-use parquet_file::storage::ParquetStorage;
+use parquet_file::storage::{ParquetStorage, StorageId};
use std::{
fmt::{Debug, Display},
sync::Arc,
@@ -193,7 +193,7 @@ pub async fn build_compactor_from_config(
}
txn.commit().await?;
- let parquet_store = ParquetStorage::new(object_store);
+ let parquet_store = ParquetStorage::new(object_store, StorageId::from("iox"));
let CompactorConfig {
max_desired_file_size_bytes,
diff --git a/parquet_file/src/storage.rs b/parquet_file/src/storage.rs
index a1606b28ca..c5da5bca8c 100644
--- a/parquet_file/src/storage.rs
+++ b/parquet_file/src/storage.rs
@@ -93,6 +93,16 @@ pub enum ReadError {
MalformedRowCount(#[from] TryFromIntError),
}
+/// ID for an object store hooked up into DataFusion.
+#[derive(Debug, PartialEq, Eq, Hash, Clone, Copy)]
+pub struct StorageId(&'static str);
+
+impl From<&'static str> for StorageId {
+ fn from(id: &'static str) -> Self {
+ Self(id)
+ }
+}
+
/// The [`ParquetStorage`] type encapsulates [`RecordBatch`] persistence to an
/// underlying [`ObjectStore`].
///
@@ -107,13 +117,26 @@ pub enum ReadError {
pub struct ParquetStorage {
/// Underlying object store.
object_store: Arc<DynObjectStore>,
+
+ /// Storage ID to hook it into DataFusion.
+ id: StorageId,
}
impl ParquetStorage {
/// Initialise a new [`ParquetStorage`] using `object_store` as the
/// persistence layer.
- pub fn new(object_store: Arc<DynObjectStore>) -> Self {
- Self { object_store }
+ pub fn new(object_store: Arc<DynObjectStore>, id: StorageId) -> Self {
+ Self { object_store, id }
+ }
+
+ /// Get underlying object store.
+ pub fn object_store(&self) -> &Arc<DynObjectStore> {
+ &self.object_store
+ }
+
+ /// Get ID.
+ pub fn id(&self) -> StorageId {
+ self.id
}
/// Push `batches`, a stream of [`RecordBatch`] instances, to object
@@ -248,22 +271,6 @@ impl ParquetStorage {
futures::stream::once(execute_stream(Arc::new(exec), task_ctx)).try_flatten(),
)))
}
-
- /// Read all data from the parquet file.
- pub fn read_all(
- &self,
- schema: SchemaRef,
- path: &ParquetFilePath,
- file_size: usize,
- ) -> Result<SendableRecordBatchStream, ReadError> {
- self.read_filter(
- &Predicate::default(),
- Selection::All,
- schema,
- path,
- file_size,
- )
- }
}
/// Error during projecting parquet file data to an expected schema.
@@ -298,7 +305,7 @@ mod tests {
async fn test_upload_metadata() {
let object_store: Arc<DynObjectStore> = Arc::new(object_store::memory::InMemory::default());
- let store = ParquetStorage::new(object_store);
+ let store = ParquetStorage::new(object_store, StorageId::from("iox"));
let meta = meta();
let batch = RecordBatch::try_from_iter([("a", to_string_array(&["value"]))]).unwrap();
@@ -443,7 +450,7 @@ mod tests {
async fn test_schema_check_ignore_additional_metadata_in_mem() {
let object_store: Arc<DynObjectStore> = Arc::new(object_store::memory::InMemory::default());
- let store = ParquetStorage::new(object_store);
+ let store = ParquetStorage::new(object_store, StorageId::from("iox"));
let meta = meta();
let batch = RecordBatch::try_from_iter([("a", to_string_array(&["value"]))]).unwrap();
@@ -468,7 +475,7 @@ mod tests {
async fn test_schema_check_ignore_additional_metadata_in_file() {
let object_store: Arc<DynObjectStore> = Arc::new(object_store::memory::InMemory::default());
- let store = ParquetStorage::new(object_store);
+ let store = ParquetStorage::new(object_store, StorageId::from("iox"));
let meta = meta();
let batch = RecordBatch::try_from_iter([("a", to_string_array(&["value"]))]).unwrap();
@@ -599,7 +606,7 @@ mod tests {
) {
let object_store: Arc<DynObjectStore> = Arc::new(object_store::memory::InMemory::default());
- let store = ParquetStorage::new(object_store);
+ let store = ParquetStorage::new(object_store, StorageId::from("iox"));
// Serialize & upload the record batches.
let meta = meta();
@@ -619,7 +626,7 @@ mod tests {
) {
let object_store: Arc<DynObjectStore> = Arc::new(object_store::memory::InMemory::default());
- let store = ParquetStorage::new(object_store);
+ let store = ParquetStorage::new(object_store, StorageId::from("iox"));
let meta = meta();
let (_iox_md, file_size) = upload(&store, &meta, persisted_batch).await;
diff --git a/parquet_file/tests/metadata.rs b/parquet_file/tests/metadata.rs
index 8175250d3b..f4837a3351 100644
--- a/parquet_file/tests/metadata.rs
+++ b/parquet_file/tests/metadata.rs
@@ -13,7 +13,7 @@ use object_store::DynObjectStore;
use parquet_file::{
metadata::IoxMetadata,
serialize::CodecError,
- storage::{ParquetStorage, UploadError},
+ storage::{ParquetStorage, StorageId, UploadError},
};
use schema::{builder::SchemaBuilder, sort::SortKey, InfluxFieldType, TIME_COLUMN_NAME};
@@ -59,7 +59,7 @@ async fn test_decoded_iox_metadata() {
let stream = futures::stream::iter([Ok(batch.clone())]);
let object_store: Arc<DynObjectStore> = Arc::new(object_store::memory::InMemory::default());
- let storage = ParquetStorage::new(object_store);
+ let storage = ParquetStorage::new(object_store, StorageId::from("iox"));
let (iox_parquet_meta, file_size) = storage
.upload(stream, &meta)
@@ -188,7 +188,7 @@ async fn test_empty_parquet_file_panic() {
let stream = futures::stream::iter([Ok(batch.clone())]);
let object_store: Arc<DynObjectStore> = Arc::new(object_store::memory::InMemory::default());
- let storage = ParquetStorage::new(object_store);
+ let storage = ParquetStorage::new(object_store, StorageId::from("iox"));
// Serialising empty data should cause a panic for human investigation.
let err = storage
@@ -270,7 +270,7 @@ async fn test_decoded_many_columns_with_null_cols_iox_metadata() {
let stream = futures::stream::iter([Ok(batch.clone())]);
let object_store: Arc<DynObjectStore> = Arc::new(object_store::memory::InMemory::default());
- let storage = ParquetStorage::new(object_store);
+ let storage = ParquetStorage::new(object_store, StorageId::from("iox"));
let (iox_parquet_meta, file_size) = storage
.upload(stream, &meta)
@@ -355,7 +355,7 @@ async fn test_derive_parquet_file_params() {
let stream = futures::stream::iter([Ok(batch.clone())]);
let object_store: Arc<DynObjectStore> = Arc::new(object_store::memory::InMemory::default());
- let storage = ParquetStorage::new(object_store);
+ let storage = ParquetStorage::new(object_store, StorageId::from("iox"));
let (iox_parquet_meta, file_size) = storage
.upload(stream, &meta)
diff --git a/querier/src/chunk/mod.rs b/querier/src/chunk/mod.rs
index 1793f90786..6d7957b0a7 100644
--- a/querier/src/chunk/mod.rs
+++ b/querier/src/chunk/mod.rs
@@ -7,7 +7,10 @@ use data_types::{
PartitionId, SequenceNumber, ShardId, TableSummary, TimestampMinMax,
};
use iox_catalog::interface::Catalog;
-use parquet_file::{chunk::ParquetChunk, storage::ParquetStorage};
+use parquet_file::{
+ chunk::ParquetChunk,
+ storage::{ParquetStorage, StorageId},
+};
use schema::{sort::SortKey, Schema};
use std::{collections::HashMap, sync::Arc};
use trace::span::{Span, SpanRecorder};
@@ -186,7 +189,10 @@ pub struct ChunkAdapter {
impl ChunkAdapter {
/// Create new adapter with empty cache.
pub fn new(catalog_cache: Arc<CatalogCache>, metric_registry: Arc<metric::Registry>) -> Self {
- let store = ParquetStorage::new(Arc::clone(catalog_cache.object_store().object_store()));
+ let store = ParquetStorage::new(
+ Arc::clone(catalog_cache.object_store().object_store()),
+ StorageId::from("iox"),
+ );
Self {
catalog_cache,
store,
|
871f9b68072bffb58763d666f16e501964136ebc
|
Dom Dwyer
|
2023-08-24 15:38:57
|
sort Cargo.toml dependencies
|
Alphabetically sort the dependencies to avoid diff noise.
| null |
refactor(compactor): sort Cargo.toml dependencies
Alphabetically sort the dependencies to avoid diff noise.
|
diff --git a/compactor/Cargo.toml b/compactor/Cargo.toml
index b0b0ec89d9..938c578d51 100644
--- a/compactor/Cargo.toml
+++ b/compactor/Cargo.toml
@@ -11,8 +11,8 @@ backoff = { path = "../backoff" }
bytes = "1.4"
chrono = { version = "0.4", default-features = false }
compactor_scheduler = { path = "../compactor_scheduler" }
-datafusion = { workspace = true }
data_types = { path = "../data_types" }
+datafusion = { workspace = true }
futures = "0.3"
iox_catalog = { path = "../iox_catalog" }
iox_query = { path = "../iox_query" }
@@ -21,6 +21,7 @@ itertools = "0.11.0"
metric = { path = "../metric" }
object_store = { workspace = true }
observability_deps = { path = "../observability_deps" }
+parking_lot = "0.12.1"
parquet_file = { path = "../parquet_file" }
rand = "0.8.3"
schema = { path = "../schema" }
@@ -30,12 +31,11 @@ trace = { version = "0.1.0", path = "../trace" }
tracker = { path = "../tracker" }
uuid = { version = "1", features = ["v4"] }
workspace-hack = { version = "0.1", path = "../workspace-hack" }
-parking_lot = "0.12.1"
[dev-dependencies]
arrow_util = { path = "../arrow_util" }
assert_matches = "1"
compactor_test_utils = { path = "../compactor_test_utils" }
iox_tests = { path = "../iox_tests" }
-test_helpers = { path = "../test_helpers"}
+test_helpers = { path = "../test_helpers" }
insta = { version = "1.31.0", features = ["yaml"] }
|
d49276a7fb88c53760dc9f4e0f3f93fbfacdeb89
|
Paul Dix
|
2025-01-27 11:26:46
|
Refactor plugins to only require creating trigger (#25914)
| "\n\nThis refactors plugins and triggers so that plugins no longer need to be \"created\". Since plu(...TRUNCATED)
| null | "feat: Refactor plugins to only require creating trigger (#25914)\n\nThis refactors plugins and trig(...TRUNCATED)
| "diff --git a/influxdb3/src/commands/create.rs b/influxdb3/src/commands/create.rs\nindex e432442b76.(...TRUNCATED)
|
End of preview. Expand
in Data Studio
No dataset card yet
- Downloads last month
- 7